1
# Copyright (C) 2005 Canonical Ltd
4
# Johan Rydberg <jrydberg@gnu.org>
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
11
# This program is distributed in the hope that it will be useful,
12
# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
# GNU General Public License for more details.
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
# considered typical and check that it can be detected/corrected.
24
from itertools import chain
25
from StringIO import StringIO
33
from bzrlib.errors import (
35
RevisionAlreadyPresent,
38
from bzrlib import knit as _mod_knit
39
from bzrlib.knit import (
44
from bzrlib.symbol_versioning import one_four, one_five
45
from bzrlib.tests import TestCaseWithMemoryTransport, TestSkipped
46
from bzrlib.tests.http_utils import TestCaseWithWebserver
47
from bzrlib.trace import mutter
48
from bzrlib.transport import get_transport
49
from bzrlib.transport.memory import MemoryTransport
50
from bzrlib.tsort import topo_sort
51
from bzrlib.tuned_gzip import GzipFile
52
import bzrlib.versionedfile as versionedfile
53
from bzrlib.weave import WeaveFile
54
from bzrlib.weavefile import read_weave, write_weave
57
def get_diamond_vf(f, trailing_eol=True, left_only=False):
58
"""Get a diamond graph to exercise deltas and merges.
60
:param trailing_eol: If True end the last line with \n.
64
'base': (('origin',),),
66
'right': (('base',),),
67
'merged': (('left',), ('right',)),
69
# insert a diamond graph to exercise deltas and merges.
74
f.add_lines('origin', [], ['origin' + last_char])
75
f.add_lines('base', ['origin'], ['base' + last_char])
76
f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
78
f.add_lines('right', ['base'],
79
['base\n', 'right' + last_char])
80
f.add_lines('merged', ['left', 'right'],
81
['base\n', 'left\n', 'right\n', 'merged' + last_char])
85
class VersionedFileTestMixIn(object):
86
"""A mixin test class for testing VersionedFiles.
88
This is not an adaptor-style test at this point because
89
theres no dynamic substitution of versioned file implementations,
90
they are strictly controlled by their owning repositories.
93
def get_transaction(self):
94
if not hasattr(self, '_transaction'):
95
self._transaction = None
96
return self._transaction
100
f.add_lines('r0', [], ['a\n', 'b\n'])
101
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
103
versions = f.versions()
104
self.assertTrue('r0' in versions)
105
self.assertTrue('r1' in versions)
106
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
107
self.assertEquals(f.get_text('r0'), 'a\nb\n')
108
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
109
self.assertEqual(2, len(f))
110
self.assertEqual(2, f.num_versions())
112
self.assertRaises(RevisionNotPresent,
113
f.add_lines, 'r2', ['foo'], [])
114
self.assertRaises(RevisionAlreadyPresent,
115
f.add_lines, 'r1', [], [])
117
# this checks that reopen with create=True does not break anything.
118
f = self.reopen_file(create=True)
121
def test_get_record_stream_empty(self):
122
"""get_record_stream is a replacement for get_data_stream."""
124
entries = f.get_record_stream([], 'unordered', False)
125
self.assertEqual([], list(entries))
127
def assertValidStorageKind(self, storage_kind):
128
"""Assert that storage_kind is a valid storage_kind."""
129
self.assertSubset([storage_kind],
130
['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
131
'knit-ft', 'knit-delta', 'fulltext', 'knit-annotated-ft-gz',
132
'knit-annotated-delta-gz', 'knit-ft-gz', 'knit-delta-gz'])
134
def capture_stream(self, f, entries, on_seen, parents):
135
"""Capture a stream for testing."""
136
for factory in entries:
138
self.assertValidStorageKind(factory.storage_kind)
139
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
140
self.assertEqual(parents[factory.key[0]], factory.parents)
141
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
144
def test_get_record_stream_interface(self):
145
"""Each item in a stream has to provide a regular interface."""
146
f, parents = get_diamond_vf(self.get_file())
147
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
150
self.capture_stream(f, entries, seen.add, parents)
151
self.assertEqual(set([('base',), ('left',), ('right',), ('merged',)]),
154
def test_get_record_stream_interface_ordered(self):
155
"""Each item in a stream has to provide a regular interface."""
156
f, parents = get_diamond_vf(self.get_file())
157
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
158
'topological', False)
160
self.capture_stream(f, entries, seen.append, parents)
161
self.assertSubset([tuple(seen)],
163
(('base',), ('left',), ('right',), ('merged',)),
164
(('base',), ('right',), ('left',), ('merged',)),
167
def test_get_record_stream_interface_ordered_with_delta_closure(self):
168
"""Each item in a stream has to provide a regular interface."""
169
f, parents = get_diamond_vf(self.get_file())
170
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
173
for factory in entries:
174
seen.append(factory.key)
175
self.assertValidStorageKind(factory.storage_kind)
176
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
177
self.assertEqual(parents[factory.key[0]], factory.parents)
178
self.assertEqual(f.get_text(factory.key[0]),
179
factory.get_bytes_as('fulltext'))
180
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
182
self.assertSubset([tuple(seen)],
184
(('base',), ('left',), ('right',), ('merged',)),
185
(('base',), ('right',), ('left',), ('merged',)),
188
def test_get_record_stream_unknown_storage_kind_raises(self):
189
"""Asking for a storage kind that the stream cannot supply raises."""
190
f, parents = get_diamond_vf(self.get_file())
191
entries = f.get_record_stream(['merged', 'left', 'right', 'base'],
193
# We track the contents because we should be able to try, fail a
194
# particular kind and then ask for one that works and continue.
196
for factory in entries:
197
seen.add(factory.key)
198
self.assertValidStorageKind(factory.storage_kind)
199
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
200
self.assertEqual(parents[factory.key[0]], factory.parents)
201
# currently no stream emits mpdiff
202
self.assertRaises(errors.UnavailableRepresentation,
203
factory.get_bytes_as, 'mpdiff')
204
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
206
self.assertEqual(set([('base',), ('left',), ('right',), ('merged',)]),
209
def test_get_record_stream_missing_records_are_absent(self):
210
f, parents = get_diamond_vf(self.get_file())
211
entries = f.get_record_stream(['merged', 'left', 'right', 'or', 'base'],
213
self.assertAbsentRecord(f, parents, entries)
214
entries = f.get_record_stream(['merged', 'left', 'right', 'or', 'base'],
215
'topological', False)
216
self.assertAbsentRecord(f, parents, entries)
218
def assertAbsentRecord(self, f, parents, entries):
219
"""Helper for test_get_record_stream_missing_records_are_absent."""
221
for factory in entries:
222
seen.add(factory.key)
223
if factory.key == ('or',):
224
self.assertEqual('absent', factory.storage_kind)
225
self.assertEqual(None, factory.sha1)
226
self.assertEqual(None, factory.parents)
228
self.assertValidStorageKind(factory.storage_kind)
229
self.assertEqual(f.get_sha1s([factory.key[0]])[0], factory.sha1)
230
self.assertEqual(parents[factory.key[0]], factory.parents)
231
self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
234
set([('base',), ('left',), ('right',), ('merged',), ('or',)]),
237
def test_filter_absent_records(self):
238
"""Requested missing records can be filter trivially."""
239
f, parents = get_diamond_vf(self.get_file())
240
entries = f.get_record_stream(['merged', 'left', 'right', 'extra', 'base'],
243
self.capture_stream(f, versionedfile.filter_absent(entries), seen.add,
245
self.assertEqual(set([('base',), ('left',), ('right',), ('merged',)]),
248
def test_insert_record_stream_empty(self):
249
"""Inserting an empty record stream should work."""
252
f.insert_record_stream([])
254
def assertIdenticalVersionedFile(self, left, right):
255
"""Assert that left and right have the same contents."""
256
self.assertEqual(set(left.versions()), set(right.versions()))
257
self.assertEqual(left.get_parent_map(left.versions()),
258
right.get_parent_map(right.versions()))
259
for v in left.versions():
260
self.assertEqual(left.get_text(v), right.get_text(v))
262
def test_insert_record_stream_fulltexts(self):
263
"""Any file should accept a stream of fulltexts."""
265
weave_vf = WeaveFile('source', get_transport(self.get_url('.')),
266
create=True, get_scope=self.get_transaction)
267
source, _ = get_diamond_vf(weave_vf)
268
stream = source.get_record_stream(source.versions(), 'topological',
270
f.insert_record_stream(stream)
271
self.assertIdenticalVersionedFile(f, source)
273
def test_insert_record_stream_fulltexts_noeol(self):
274
"""Any file should accept a stream of fulltexts."""
276
weave_vf = WeaveFile('source', get_transport(self.get_url('.')),
277
create=True, get_scope=self.get_transaction)
278
source, _ = get_diamond_vf(weave_vf, trailing_eol=False)
279
stream = source.get_record_stream(source.versions(), 'topological',
281
f.insert_record_stream(stream)
282
self.assertIdenticalVersionedFile(f, source)
284
def test_insert_record_stream_annotated_knits(self):
285
"""Any file should accept a stream from plain knits."""
287
source = make_file_knit('source', get_transport(self.get_url('.')),
289
get_diamond_vf(source)
290
stream = source.get_record_stream(source.versions(), 'topological',
292
f.insert_record_stream(stream)
293
self.assertIdenticalVersionedFile(f, source)
295
def test_insert_record_stream_annotated_knits_noeol(self):
296
"""Any file should accept a stream from plain knits."""
298
source = make_file_knit('source', get_transport(self.get_url('.')),
300
get_diamond_vf(source, trailing_eol=False)
301
stream = source.get_record_stream(source.versions(), 'topological',
303
f.insert_record_stream(stream)
304
self.assertIdenticalVersionedFile(f, source)
306
def test_insert_record_stream_plain_knits(self):
307
"""Any file should accept a stream from plain knits."""
309
source = make_file_knit('source', get_transport(self.get_url('.')),
310
create=True, factory=KnitPlainFactory())
311
get_diamond_vf(source)
312
stream = source.get_record_stream(source.versions(), 'topological',
314
f.insert_record_stream(stream)
315
self.assertIdenticalVersionedFile(f, source)
317
def test_insert_record_stream_plain_knits_noeol(self):
318
"""Any file should accept a stream from plain knits."""
320
source = make_file_knit('source', get_transport(self.get_url('.')),
321
create=True, factory=KnitPlainFactory())
322
get_diamond_vf(source, trailing_eol=False)
323
stream = source.get_record_stream(source.versions(), 'topological',
325
f.insert_record_stream(stream)
326
self.assertIdenticalVersionedFile(f, source)
328
def test_insert_record_stream_existing_keys(self):
329
"""Inserting keys already in a file should not error."""
331
source = make_file_knit('source', get_transport(self.get_url('.')),
332
create=True, factory=KnitPlainFactory())
333
get_diamond_vf(source)
334
# insert some keys into f.
335
get_diamond_vf(f, left_only=True)
336
stream = source.get_record_stream(source.versions(), 'topological',
338
f.insert_record_stream(stream)
339
self.assertIdenticalVersionedFile(f, source)
341
def test_insert_record_stream_missing_keys(self):
342
"""Inserting a stream with absent keys should raise an error."""
344
source = make_file_knit('source', get_transport(self.get_url('.')),
345
create=True, factory=KnitPlainFactory())
346
stream = source.get_record_stream(['missing'], 'topological',
348
self.assertRaises(errors.RevisionNotPresent, f.insert_record_stream,
351
def test_insert_record_stream_out_of_order(self):
352
"""An out of order stream can either error or work."""
353
f, parents = get_diamond_vf(self.get_file())
354
origin_entries = f.get_record_stream(['origin'], 'unordered', False)
355
end_entries = f.get_record_stream(['merged', 'left'],
356
'topological', False)
357
start_entries = f.get_record_stream(['right', 'base'],
358
'topological', False)
359
entries = chain(origin_entries, end_entries, start_entries)
360
target = self.get_file('target')
362
target.insert_record_stream(entries)
363
except RevisionNotPresent:
364
# Must not have corrupted the file.
367
self.assertIdenticalVersionedFile(f, target)
369
def test_insert_record_stream_delta_missing_basis_no_corruption(self):
370
"""Insertion where a needed basis is not included aborts safely."""
371
# Annotated source - deltas can be used in any knit.
372
source = make_file_knit('source', get_transport(self.get_url('.')),
374
get_diamond_vf(source)
375
entries = source.get_record_stream(['origin', 'merged'], 'unordered', False)
377
self.assertRaises(RevisionNotPresent, f.insert_record_stream, entries)
379
self.assertFalse(f.has_version('merged'))
381
def test_adds_with_parent_texts(self):
384
_, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
386
_, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
387
['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
388
except NotImplementedError:
389
# if the format doesn't support ghosts, just add normally.
390
_, _, parent_texts['r1'] = f.add_lines('r1',
391
['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
392
f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
393
self.assertNotEqual(None, parent_texts['r0'])
394
self.assertNotEqual(None, parent_texts['r1'])
396
versions = f.versions()
397
self.assertTrue('r0' in versions)
398
self.assertTrue('r1' in versions)
399
self.assertTrue('r2' in versions)
400
self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
401
self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
402
self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
403
self.assertEqual(3, f.num_versions())
404
origins = f.annotate('r1')
405
self.assertEquals(origins[0][0], 'r0')
406
self.assertEquals(origins[1][0], 'r1')
407
origins = f.annotate('r2')
408
self.assertEquals(origins[0][0], 'r1')
409
self.assertEquals(origins[1][0], 'r2')
412
f = self.reopen_file()
415
def test_add_unicode_content(self):
416
# unicode content is not permitted in versioned files.
417
# versioned files version sequences of bytes only.
419
self.assertRaises(errors.BzrBadParameterUnicode,
420
vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
422
(errors.BzrBadParameterUnicode, NotImplementedError),
423
vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
425
def test_add_follows_left_matching_blocks(self):
426
"""If we change left_matching_blocks, delta changes
428
Note: There are multiple correct deltas in this case, because
429
we start with 1 "a" and we get 3.
432
if isinstance(vf, WeaveFile):
433
raise TestSkipped("WeaveFile ignores left_matching_blocks")
434
vf.add_lines('1', [], ['a\n'])
435
vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
436
left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
437
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
438
vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
439
left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
440
self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
442
def test_inline_newline_throws(self):
443
# \r characters are not permitted in lines being added
445
self.assertRaises(errors.BzrBadParameterContainsNewline,
446
vf.add_lines, 'a', [], ['a\n\n'])
448
(errors.BzrBadParameterContainsNewline, NotImplementedError),
449
vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
450
# but inline CR's are allowed
451
vf.add_lines('a', [], ['a\r\n'])
453
vf.add_lines_with_ghosts('b', [], ['a\r\n'])
454
except NotImplementedError:
457
def test_add_reserved(self):
459
self.assertRaises(errors.ReservedId,
460
vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
462
def test_add_lines_nostoresha(self):
463
"""When nostore_sha is supplied using old content raises."""
465
empty_text = ('a', [])
466
sample_text_nl = ('b', ["foo\n", "bar\n"])
467
sample_text_no_nl = ('c', ["foo\n", "bar"])
469
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
470
sha, _, _ = vf.add_lines(version, [], lines)
472
# we now have a copy of all the lines in the vf.
473
for sha, (version, lines) in zip(
474
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
475
self.assertRaises(errors.ExistingContent,
476
vf.add_lines, version + "2", [], lines,
478
# and no new version should have been added.
479
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
482
def test_add_lines_with_ghosts_nostoresha(self):
483
"""When nostore_sha is supplied using old content raises."""
485
empty_text = ('a', [])
486
sample_text_nl = ('b', ["foo\n", "bar\n"])
487
sample_text_no_nl = ('c', ["foo\n", "bar"])
489
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
490
sha, _, _ = vf.add_lines(version, [], lines)
492
# we now have a copy of all the lines in the vf.
493
# is the test applicable to this vf implementation?
495
vf.add_lines_with_ghosts('d', [], [])
496
except NotImplementedError:
497
raise TestSkipped("add_lines_with_ghosts is optional")
498
for sha, (version, lines) in zip(
499
shas, (empty_text, sample_text_nl, sample_text_no_nl)):
500
self.assertRaises(errors.ExistingContent,
501
vf.add_lines_with_ghosts, version + "2", [], lines,
503
# and no new version should have been added.
504
self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
507
def test_add_lines_return_value(self):
508
# add_lines should return the sha1 and the text size.
510
empty_text = ('a', [])
511
sample_text_nl = ('b', ["foo\n", "bar\n"])
512
sample_text_no_nl = ('c', ["foo\n", "bar"])
513
# check results for the three cases:
514
for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
515
# the first two elements are the same for all versioned files:
516
# - the digest and the size of the text. For some versioned files
517
# additional data is returned in additional tuple elements.
518
result = vf.add_lines(version, [], lines)
519
self.assertEqual(3, len(result))
520
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
522
# parents should not affect the result:
523
lines = sample_text_nl[1]
524
self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
525
vf.add_lines('d', ['b', 'c'], lines)[0:2])
527
def test_get_reserved(self):
529
self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
530
self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
531
self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
533
def test_add_unchanged_last_line_noeol_snapshot(self):
534
"""Add a text with an unchanged last line with no eol should work."""
535
# Test adding this in a number of chain lengths; because the interface
536
# for VersionedFile does not allow forcing a specific chain length, we
537
# just use a small base to get the first snapshot, then a much longer
538
# first line for the next add (which will make the third add snapshot)
539
# and so on. 20 has been chosen as an aribtrary figure - knits use 200
540
# as a capped delta length, but ideally we would have some way of
541
# tuning the test to the store (e.g. keep going until a snapshot
543
for length in range(20):
545
vf = self.get_file('case-%d' % length)
548
for step in range(length):
549
version = prefix % step
550
lines = (['prelude \n'] * step) + ['line']
551
vf.add_lines(version, parents, lines)
552
version_lines[version] = lines
554
vf.add_lines('no-eol', parents, ['line'])
555
vf.get_texts(version_lines.keys())
556
self.assertEqualDiff('line', vf.get_text('no-eol'))
558
def test_get_texts_eol_variation(self):
559
# similar to the failure in <http://bugs.launchpad.net/234748>
561
sample_text_nl = ["line\n"]
562
sample_text_no_nl = ["line"]
569
lines = sample_text_nl
571
lines = sample_text_no_nl
572
# left_matching blocks is an internal api; it operates on the
573
# *internal* representation for a knit, which is with *all* lines
574
# being normalised to end with \n - even the final line in a no_nl
575
# file. Using it here ensures that a broken internal implementation
576
# (which is what this test tests) will generate a correct line
577
# delta (which is to say, an empty delta).
578
vf.add_lines(version, parents, lines,
579
left_matching_blocks=[(0, 0, 1)])
581
versions.append(version)
582
version_lines[version] = lines
584
vf.get_texts(versions)
585
vf.get_texts(reversed(versions))
587
def test_add_lines_with_matching_blocks_noeol_last_line(self):
588
"""Add a text with an unchanged last line with no eol should work."""
589
from bzrlib import multiparent
590
# Hand verified sha1 of the text we're adding.
591
sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
592
# Create a mpdiff which adds a new line before the trailing line, and
593
# reuse the last line unaltered (which can cause annotation reuse).
594
# Test adding this in two situations:
595
# On top of a new insertion
596
vf = self.get_file('fulltext')
597
vf.add_lines('noeol', [], ['line'])
598
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
599
left_matching_blocks=[(0, 1, 1)])
600
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
602
vf = self.get_file('delta')
603
vf.add_lines('base', [], ['line'])
604
vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
605
vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
606
left_matching_blocks=[(1, 1, 1)])
607
self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
609
def test_make_mpdiffs(self):
610
from bzrlib import multiparent
611
vf = self.get_file('foo')
612
sha1s = self._setup_for_deltas(vf)
613
new_vf = self.get_file('bar')
614
for version in multiparent.topo_iter(vf):
615
mpdiff = vf.make_mpdiffs([version])[0]
616
new_vf.add_mpdiffs([(version, vf.get_parent_map([version])[version],
617
vf.get_sha1s([version])[0], mpdiff)])
618
self.assertEqualDiff(vf.get_text(version),
619
new_vf.get_text(version))
621
def _setup_for_deltas(self, f):
622
self.assertFalse(f.has_version('base'))
623
# add texts that should trip the knit maximum delta chain threshold
624
# as well as doing parallel chains of data in knits.
625
# this is done by two chains of 25 insertions
626
f.add_lines('base', [], ['line\n'])
627
f.add_lines('noeol', ['base'], ['line'])
628
# detailed eol tests:
629
# shared last line with parent no-eol
630
f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
631
# differing last line with parent, both no-eol
632
f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
633
# add eol following a noneol parent, change content
634
f.add_lines('eol', ['noeol'], ['phone\n'])
635
# add eol following a noneol parent, no change content
636
f.add_lines('eolline', ['noeol'], ['line\n'])
637
# noeol with no parents:
638
f.add_lines('noeolbase', [], ['line'])
639
# noeol preceeding its leftmost parent in the output:
640
# this is done by making it a merge of two parents with no common
641
# anestry: noeolbase and noeol with the
642
# later-inserted parent the leftmost.
643
f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
644
# two identical eol texts
645
f.add_lines('noeoldup', ['noeol'], ['line'])
647
text_name = 'chain1-'
649
sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
650
1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
651
2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
652
3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
653
4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
654
5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
655
6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
656
7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
657
8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
658
9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
659
10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
660
11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
661
12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
662
13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
663
14:'2c4b1736566b8ca6051e668de68650686a3922f2',
664
15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
665
16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
666
17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
667
18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
668
19:'1ebed371807ba5935958ad0884595126e8c4e823',
669
20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
670
21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
671
22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
672
23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
673
24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
674
25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
676
for depth in range(26):
677
new_version = text_name + '%s' % depth
678
text = text + ['line\n']
679
f.add_lines(new_version, [next_parent], text)
680
next_parent = new_version
682
text_name = 'chain2-'
684
for depth in range(26):
685
new_version = text_name + '%s' % depth
686
text = text + ['line\n']
687
f.add_lines(new_version, [next_parent], text)
688
next_parent = new_version
691
def test_ancestry(self):
693
self.assertEqual([], f.get_ancestry([]))
694
f.add_lines('r0', [], ['a\n', 'b\n'])
695
f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
696
f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
697
f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
698
f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
699
self.assertEqual([], f.get_ancestry([]))
700
versions = f.get_ancestry(['rM'])
701
# there are some possibilities:
705
# so we check indexes
706
r0 = versions.index('r0')
707
r1 = versions.index('r1')
708
r2 = versions.index('r2')
709
self.assertFalse('r3' in versions)
710
rM = versions.index('rM')
711
self.assertTrue(r0 < r1)
712
self.assertTrue(r0 < r2)
713
self.assertTrue(r1 < rM)
714
self.assertTrue(r2 < rM)
716
self.assertRaises(RevisionNotPresent,
717
f.get_ancestry, ['rM', 'rX'])
719
self.assertEqual(set(f.get_ancestry('rM')),
720
set(f.get_ancestry('rM', topo_sorted=False)))
722
def test_mutate_after_finish(self):
723
self._transaction = 'before'
725
self._transaction = 'after'
726
self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
727
self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
728
self.assertRaises(errors.OutSideTransaction, self.applyDeprecated,
729
one_five, f.join, '')
731
def test_copy_to(self):
733
f.add_lines('0', [], ['a\n'])
734
t = MemoryTransport()
736
for suffix in self.get_factory().get_suffixes():
737
self.assertTrue(t.has('foo' + suffix))
739
def test_get_suffixes(self):
741
# and should be a list
742
self.assertTrue(isinstance(self.get_factory().get_suffixes(), list))
744
def test_get_parent_map(self):
746
f.add_lines('r0', [], ['a\n', 'b\n'])
748
{'r0':()}, f.get_parent_map(['r0']))
749
f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
751
{'r1':('r0',)}, f.get_parent_map(['r1']))
755
f.get_parent_map(['r0', 'r1']))
756
f.add_lines('r2', [], ['a\n', 'b\n'])
757
f.add_lines('r3', [], ['a\n', 'b\n'])
758
f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
760
{'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
761
self.assertEqual({}, f.get_parent_map('y'))
765
f.get_parent_map(['r0', 'y', 'r1']))
767
def test_annotate(self):
769
f.add_lines('r0', [], ['a\n', 'b\n'])
770
f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
771
origins = f.annotate('r1')
772
self.assertEquals(origins[0][0], 'r1')
773
self.assertEquals(origins[1][0], 'r0')
775
self.assertRaises(RevisionNotPresent,
778
def test_detection(self):
779
# Test weaves detect corruption.
781
# Weaves contain a checksum of their texts.
782
# When a text is extracted, this checksum should be
785
w = self.get_file_corrupted_text()
787
self.assertEqual('hello\n', w.get_text('v1'))
788
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
789
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
790
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
792
w = self.get_file_corrupted_checksum()
794
self.assertEqual('hello\n', w.get_text('v1'))
795
self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
796
self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
797
self.assertRaises(errors.WeaveInvalidChecksum, w.check)
799
def get_file_corrupted_text(self):
800
"""Return a versioned file with corrupt text but valid metadata."""
801
raise NotImplementedError(self.get_file_corrupted_text)
803
def reopen_file(self, name='foo'):
804
"""Open the versioned file from disk again."""
805
raise NotImplementedError(self.reopen_file)
807
def test_iter_lines_added_or_present_in_versions(self):
808
# test that we get at least an equalset of the lines added by
809
# versions in the weave
810
# the ordering here is to make a tree so that dumb searches have
811
# more changes to muck up.
813
class InstrumentedProgress(progress.DummyProgress):
817
progress.DummyProgress.__init__(self)
820
def update(self, msg=None, current=None, total=None):
821
self.updates.append((msg, current, total))
824
# add a base to get included
825
vf.add_lines('base', [], ['base\n'])
826
# add a ancestor to be included on one side
827
vf.add_lines('lancestor', [], ['lancestor\n'])
828
# add a ancestor to be included on the other side
829
vf.add_lines('rancestor', ['base'], ['rancestor\n'])
830
# add a child of rancestor with no eofile-nl
831
vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
832
# add a child of lancestor and base to join the two roots
833
vf.add_lines('otherchild',
834
['lancestor', 'base'],
835
['base\n', 'lancestor\n', 'otherchild\n'])
836
def iter_with_versions(versions, expected):
837
# now we need to see what lines are returned, and how often.
839
progress = InstrumentedProgress()
840
# iterate over the lines
841
for line in vf.iter_lines_added_or_present_in_versions(versions,
843
lines.setdefault(line, 0)
845
if []!= progress.updates:
846
self.assertEqual(expected, progress.updates)
848
lines = iter_with_versions(['child', 'otherchild'],
849
[('Walking content.', 0, 2),
850
('Walking content.', 1, 2),
851
('Walking content.', 2, 2)])
852
# we must see child and otherchild
853
self.assertTrue(lines[('child\n', 'child')] > 0)
854
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
855
# we dont care if we got more than that.
858
lines = iter_with_versions(None, [('Walking content.', 0, 5),
859
('Walking content.', 1, 5),
860
('Walking content.', 2, 5),
861
('Walking content.', 3, 5),
862
('Walking content.', 4, 5),
863
('Walking content.', 5, 5)])
864
# all lines must be seen at least once
865
self.assertTrue(lines[('base\n', 'base')] > 0)
866
self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
867
self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
868
self.assertTrue(lines[('child\n', 'child')] > 0)
869
self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
871
def test_add_lines_with_ghosts(self):
872
# some versioned file formats allow lines to be added with parent
873
# information that is > than that in the format. Formats that do
874
# not support this need to raise NotImplementedError on the
875
# add_lines_with_ghosts api.
877
# add a revision with ghost parents
878
# The preferred form is utf8, but we should translate when needed
879
parent_id_unicode = u'b\xbfse'
880
parent_id_utf8 = parent_id_unicode.encode('utf8')
882
vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
883
except NotImplementedError:
884
# check the other ghost apis are also not implemented
885
self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
886
self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
888
vf = self.reopen_file()
889
# test key graph related apis: getncestry, _graph, get_parents
891
# - these are ghost unaware and must not be reflect ghosts
892
self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
893
self.assertFalse(vf.has_version(parent_id_utf8))
894
# we have _with_ghost apis to give us ghost information.
895
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
896
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
897
# if we add something that is a ghost of another, it should correct the
898
# results of the prior apis
899
vf.add_lines(parent_id_utf8, [], [])
900
self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
901
self.assertEqual({'notbxbfse':(parent_id_utf8,)},
902
vf.get_parent_map(['notbxbfse']))
903
self.assertTrue(vf.has_version(parent_id_utf8))
904
# we have _with_ghost apis to give us ghost information.
905
self.assertEqual([parent_id_utf8, 'notbxbfse'],
906
vf.get_ancestry_with_ghosts(['notbxbfse']))
907
self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
909
def test_add_lines_with_ghosts_after_normal_revs(self):
910
# some versioned file formats allow lines to be added with parent
911
# information that is > than that in the format. Formats that do
912
# not support this need to raise NotImplementedError on the
913
# add_lines_with_ghosts api.
915
# probe for ghost support
917
vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
918
except NotImplementedError:
920
vf.add_lines_with_ghosts('references_ghost',
922
['line\n', 'line_b\n', 'line_c\n'])
923
origins = vf.annotate('references_ghost')
924
self.assertEquals(('base', 'line\n'), origins[0])
925
self.assertEquals(('base', 'line_b\n'), origins[1])
926
self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
928
def test_readonly_mode(self):
929
transport = get_transport(self.get_url('.'))
930
factory = self.get_factory()
931
vf = factory('id', transport, 0777, create=True, access_mode='w')
932
vf = factory('id', transport, access_mode='r')
933
self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
934
self.assertRaises(errors.ReadOnlyError,
935
vf.add_lines_with_ghosts,
939
self.assertRaises(errors.ReadOnlyError, self.applyDeprecated, one_five,
942
def test_get_sha1s(self):
943
# check the sha1 data is available
946
vf.add_lines('a', [], ['a\n'])
947
# the same file, different metadata
948
vf.add_lines('b', ['a'], ['a\n'])
949
# a file differing only in last newline.
950
vf.add_lines('c', [], ['a'])
951
self.assertEqual(['3f786850e387550fdab836ed7e6dc881de23001b',
952
'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
953
'3f786850e387550fdab836ed7e6dc881de23001b'],
954
vf.get_sha1s(['a', 'c', 'b']))
957
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
959
def get_file(self, name='foo'):
960
return WeaveFile(name, get_transport(self.get_url('.')), create=True,
961
get_scope=self.get_transaction)
963
def get_file_corrupted_text(self):
964
w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
965
get_scope=self.get_transaction)
966
w.add_lines('v1', [], ['hello\n'])
967
w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
969
# We are going to invasively corrupt the text
970
# Make sure the internals of weave are the same
971
self.assertEqual([('{', 0)
979
self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
980
, '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
985
w._weave[4] = 'There\n'
988
def get_file_corrupted_checksum(self):
989
w = self.get_file_corrupted_text()
991
w._weave[4] = 'there\n'
992
self.assertEqual('hello\nthere\n', w.get_text('v2'))
994
#Invalid checksum, first digit changed
995
w._sha1s[1] = 'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
998
def reopen_file(self, name='foo', create=False):
999
return WeaveFile(name, get_transport(self.get_url('.')), create=create,
1000
get_scope=self.get_transaction)
1002
def test_no_implicit_create(self):
1003
self.assertRaises(errors.NoSuchFile,
1006
get_transport(self.get_url('.')),
1007
get_scope=self.get_transaction)
1009
def get_factory(self):
1013
class TestKnit(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
1015
def get_file(self, name='foo', create=True):
1016
return make_file_knit(name, get_transport(self.get_url('.')),
1017
delta=True, create=True, get_scope=self.get_transaction)
1019
def get_factory(self):
1020
return make_file_knit
1022
def get_file_corrupted_text(self):
1023
knit = self.get_file()
1024
knit.add_lines('v1', [], ['hello\n'])
1025
knit.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
1028
def reopen_file(self, name='foo', create=False):
1029
return self.get_file(name, create)
1031
def test_detection(self):
1032
knit = self.get_file()
1035
def test_no_implicit_create(self):
1036
self.assertRaises(errors.NoSuchFile, self.get_factory(), 'foo',
1037
get_transport(self.get_url('.')))
1040
class TestPlaintextKnit(TestKnit):
1041
"""Test a knit with no cached annotations"""
1043
def get_file(self, name='foo', create=True):
1044
return make_file_knit(name, get_transport(self.get_url('.')),
1045
delta=True, create=create, get_scope=self.get_transaction,
1046
factory=_mod_knit.KnitPlainFactory())
1049
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
1052
TestCaseWithMemoryTransport.setUp(self)
1053
self.vf1 = make_file_knit('root', self.get_transport(), create=True)
1054
self.vf2 = make_file_knit('root', self.get_transport(), create=True)
1055
self.plan_merge_vf = versionedfile._PlanMergeVersionedFile('root',
1056
[self.vf1, self.vf2])
1058
def test_add_lines(self):
1059
self.plan_merge_vf.add_lines('a:', [], [])
1060
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a', [],
1062
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a:', None,
1064
self.assertRaises(ValueError, self.plan_merge_vf.add_lines, 'a:', [],
1067
def test_ancestry(self):
1068
self.vf1.add_lines('A', [], [])
1069
self.vf1.add_lines('B', ['A'], [])
1070
self.plan_merge_vf.add_lines('C:', ['B'], [])
1071
self.plan_merge_vf.add_lines('D:', ['C:'], [])
1072
self.assertEqual(set(['A', 'B', 'C:', 'D:']),
1073
self.plan_merge_vf.get_ancestry('D:', topo_sorted=False))
1075
def setup_abcde(self):
1076
self.vf1.add_lines('A', [], ['a'])
1077
self.vf1.add_lines('B', ['A'], ['b'])
1078
self.vf2.add_lines('C', [], ['c'])
1079
self.vf2.add_lines('D', ['C'], ['d'])
1080
self.plan_merge_vf.add_lines('E:', ['B', 'D'], ['e'])
1082
def test_ancestry_uses_all_versionedfiles(self):
1084
self.assertEqual(set(['A', 'B', 'C', 'D', 'E:']),
1085
self.plan_merge_vf.get_ancestry('E:', topo_sorted=False))
1087
def test_ancestry_raises_revision_not_present(self):
1088
error = self.assertRaises(errors.RevisionNotPresent,
1089
self.plan_merge_vf.get_ancestry, 'E:', False)
1090
self.assertContainsRe(str(error), '{E:} not present in "root"')
1092
def test_get_parents(self):
1094
self.assertEqual({'B':('A',)}, self.plan_merge_vf.get_parent_map(['B']))
1095
self.assertEqual({'D':('C',)}, self.plan_merge_vf.get_parent_map(['D']))
1096
self.assertEqual({'E:':('B', 'D')},
1097
self.plan_merge_vf.get_parent_map(['E:']))
1098
self.assertEqual({}, self.plan_merge_vf.get_parent_map(['F']))
1103
}, self.plan_merge_vf.get_parent_map(['B', 'D', 'E:', 'F']))
1105
def test_get_lines(self):
1107
self.assertEqual(['a'], self.plan_merge_vf.get_lines('A'))
1108
self.assertEqual(['c'], self.plan_merge_vf.get_lines('C'))
1109
self.assertEqual(['e'], self.plan_merge_vf.get_lines('E:'))
1110
error = self.assertRaises(errors.RevisionNotPresent,
1111
self.plan_merge_vf.get_lines, 'F')
1112
self.assertContainsRe(str(error), '{F} not present in "root"')
1115
class InterString(versionedfile.InterVersionedFile):
1116
"""An inter-versionedfile optimised code path for strings.
1118
This is for use during testing where we use strings as versionedfiles
1119
so that none of the default regsitered interversionedfile classes will
1120
match - which lets us test the match logic.
1124
def is_compatible(source, target):
1125
"""InterString is compatible with strings-as-versionedfiles."""
1126
return isinstance(source, str) and isinstance(target, str)
1129
# TODO this and the InterRepository core logic should be consolidatable
1130
# if we make the registry a separate class though we still need to
1131
# test the behaviour in the active registry to catch failure-to-handle-
1133
class TestInterVersionedFile(TestCaseWithMemoryTransport):
1135
def test_get_default_inter_versionedfile(self):
1136
# test that the InterVersionedFile.get(a, b) probes
1137
# for a class where is_compatible(a, b) returns
1138
# true and returns a default interversionedfile otherwise.
1139
# This also tests that the default registered optimised interversionedfile
1140
# classes do not barf inappropriately when a surprising versionedfile type
1141
# is handed to them.
1142
dummy_a = "VersionedFile 1."
1143
dummy_b = "VersionedFile 2."
1144
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
1146
def assertGetsDefaultInterVersionedFile(self, a, b):
1147
"""Asserts that InterVersionedFile.get(a, b) -> the default."""
1148
inter = versionedfile.InterVersionedFile.get(a, b)
1149
self.assertEqual(versionedfile.InterVersionedFile,
1151
self.assertEqual(a, inter.source)
1152
self.assertEqual(b, inter.target)
1154
def test_register_inter_versionedfile_class(self):
1155
# test that a optimised code path provider - a
1156
# InterVersionedFile subclass can be registered and unregistered
1157
# and that it is correctly selected when given a versionedfile
1158
# pair that it returns true on for the is_compatible static method
1160
dummy_a = "VersionedFile 1."
1161
dummy_b = "VersionedFile 2."
1162
versionedfile.InterVersionedFile.register_optimiser(InterString)
1164
# we should get the default for something InterString returns False
1166
self.assertFalse(InterString.is_compatible(dummy_a, None))
1167
self.assertGetsDefaultInterVersionedFile(dummy_a, None)
1168
# and we should get an InterString for a pair it 'likes'
1169
self.assertTrue(InterString.is_compatible(dummy_a, dummy_b))
1170
inter = versionedfile.InterVersionedFile.get(dummy_a, dummy_b)
1171
self.assertEqual(InterString, inter.__class__)
1172
self.assertEqual(dummy_a, inter.source)
1173
self.assertEqual(dummy_b, inter.target)
1175
versionedfile.InterVersionedFile.unregister_optimiser(InterString)
1176
# now we should get the default InterVersionedFile object again.
1177
self.assertGetsDefaultInterVersionedFile(dummy_a, dummy_b)
1180
class TestReadonlyHttpMixin(object):
1182
def get_transaction(self):
1185
def test_readonly_http_works(self):
1186
# we should be able to read from http with a versioned file.
1187
vf = self.get_file()
1188
# try an empty file access
1189
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1190
self.assertEqual([], readonly_vf.versions())
1192
vf.add_lines('1', [], ['a\n'])
1193
vf.add_lines('2', ['1'], ['b\n', 'a\n'])
1194
readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1195
self.assertEqual(['1', '2'], vf.versions())
1196
for version in readonly_vf.versions():
1197
readonly_vf.get_lines(version)
1200
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1203
return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
1204
get_scope=self.get_transaction)
1206
def get_factory(self):
1210
class TestKnitHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1213
return make_file_knit('foo', get_transport(self.get_url('.')),
1214
delta=True, create=True, get_scope=self.get_transaction)
1216
def get_factory(self):
1217
return make_file_knit
1220
class MergeCasesMixin(object):
1222
def doMerge(self, base, a, b, mp):
1223
from cStringIO import StringIO
1224
from textwrap import dedent
1230
w.add_lines('text0', [], map(addcrlf, base))
1231
w.add_lines('text1', ['text0'], map(addcrlf, a))
1232
w.add_lines('text2', ['text0'], map(addcrlf, b))
1234
self.log_contents(w)
1236
self.log('merge plan:')
1237
p = list(w.plan_merge('text1', 'text2'))
1238
for state, line in p:
1240
self.log('%12s | %s' % (state, line[:-1]))
1244
mt.writelines(w.weave_merge(p))
1246
self.log(mt.getvalue())
1248
mp = map(addcrlf, mp)
1249
self.assertEqual(mt.readlines(), mp)
1252
def testOneInsert(self):
1258
def testSeparateInserts(self):
1259
self.doMerge(['aaa', 'bbb', 'ccc'],
1260
['aaa', 'xxx', 'bbb', 'ccc'],
1261
['aaa', 'bbb', 'yyy', 'ccc'],
1262
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1264
def testSameInsert(self):
1265
self.doMerge(['aaa', 'bbb', 'ccc'],
1266
['aaa', 'xxx', 'bbb', 'ccc'],
1267
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1268
['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1269
overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1270
def testOverlappedInsert(self):
1271
self.doMerge(['aaa', 'bbb'],
1272
['aaa', 'xxx', 'yyy', 'bbb'],
1273
['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1275
# really it ought to reduce this to
1276
# ['aaa', 'xxx', 'yyy', 'bbb']
1279
def testClashReplace(self):
1280
self.doMerge(['aaa'],
1283
['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1286
def testNonClashInsert1(self):
1287
self.doMerge(['aaa'],
1290
['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1293
def testNonClashInsert2(self):
1294
self.doMerge(['aaa'],
1300
def testDeleteAndModify(self):
1301
"""Clashing delete and modification.
1303
If one side modifies a region and the other deletes it then
1304
there should be a conflict with one side blank.
1307
#######################################
1308
# skippd, not working yet
1311
self.doMerge(['aaa', 'bbb', 'ccc'],
1312
['aaa', 'ddd', 'ccc'],
1314
['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1316
def _test_merge_from_strings(self, base, a, b, expected):
1318
w.add_lines('text0', [], base.splitlines(True))
1319
w.add_lines('text1', ['text0'], a.splitlines(True))
1320
w.add_lines('text2', ['text0'], b.splitlines(True))
1321
self.log('merge plan:')
1322
p = list(w.plan_merge('text1', 'text2'))
1323
for state, line in p:
1325
self.log('%12s | %s' % (state, line[:-1]))
1326
self.log('merge result:')
1327
result_text = ''.join(w.weave_merge(p))
1328
self.log(result_text)
1329
self.assertEqualDiff(result_text, expected)
1331
def test_weave_merge_conflicts(self):
1332
# does weave merge properly handle plans that end with unchanged?
1333
result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1334
self.assertEqual(result, 'hello\n')
1336
def test_deletion_extended(self):
1337
"""One side deletes, the other deletes more.
1354
self._test_merge_from_strings(base, a, b, result)
1356
def test_deletion_overlap(self):
1357
"""Delete overlapping regions with no other conflict.
1359
Arguably it'd be better to treat these as agreement, rather than
1360
conflict, but for now conflict is safer.
1388
self._test_merge_from_strings(base, a, b, result)
1390
def test_agreement_deletion(self):
1391
"""Agree to delete some lines, without conflicts."""
1413
self._test_merge_from_strings(base, a, b, result)
1415
def test_sync_on_deletion(self):
1416
"""Specific case of merge where we can synchronize incorrectly.
1418
A previous version of the weave merge concluded that the two versions
1419
agreed on deleting line 2, and this could be a synchronization point.
1420
Line 1 was then considered in isolation, and thought to be deleted on
1423
It's better to consider the whole thing as a disagreement region.
1434
a's replacement line 2
1447
a's replacement line 2
1454
self._test_merge_from_strings(base, a, b, result)
1457
class TestKnitMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1459
def get_file(self, name='foo'):
1460
return make_file_knit(name, get_transport(self.get_url('.')),
1461
delta=True, create=True)
1463
def log_contents(self, w):
1467
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1469
def get_file(self, name='foo'):
1470
return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1472
def log_contents(self, w):
1473
self.log('weave is:')
1475
write_weave(w, tmpf)
1476
self.log(tmpf.getvalue())
1478
overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1479
'xxx', '>>>>>>> ', 'bbb']
1482
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1484
def test_select_adaptor(self):
1485
"""Test expected adapters exist."""
1486
# One scenario for each lookup combination we expect to use.
1487
# Each is source_kind, requested_kind, adapter class
1489
('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1490
('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1491
('knit-annotated-delta-gz', 'knit-delta-gz',
1492
_mod_knit.DeltaAnnotatedToUnannotated),
1493
('knit-annotated-delta-gz', 'fulltext',
1494
_mod_knit.DeltaAnnotatedToFullText),
1495
('knit-annotated-ft-gz', 'knit-ft-gz',
1496
_mod_knit.FTAnnotatedToUnannotated),
1497
('knit-annotated-ft-gz', 'fulltext',
1498
_mod_knit.FTAnnotatedToFullText),
1500
for source, requested, klass in scenarios:
1501
adapter_factory = versionedfile.adapter_registry.get(
1502
(source, requested))
1503
adapter = adapter_factory(None)
1504
self.assertIsInstance(adapter, klass)
1506
def get_knit(self, annotated=True):
1508
factory = KnitAnnotateFactory()
1510
factory = KnitPlainFactory()
1511
return make_file_knit('knit', self.get_transport('.'), delta=True,
1512
create=True, factory=factory)
1514
def helpGetBytes(self, f, ft_adapter, delta_adapter):
1515
"""Grab the interested adapted texts for tests."""
1516
# origin is a fulltext
1517
entries = f.get_record_stream(['origin'], 'unordered', False)
1518
base = entries.next()
1519
ft_data = ft_adapter.get_bytes(base, base.get_bytes_as(base.storage_kind))
1520
# merged is both a delta and multiple parents.
1521
entries = f.get_record_stream(['merged'], 'unordered', False)
1522
merged = entries.next()
1523
delta_data = delta_adapter.get_bytes(merged,
1524
merged.get_bytes_as(merged.storage_kind))
1525
return ft_data, delta_data
1527
def test_deannotation_noeol(self):
1528
"""Test converting annotated knits to unannotated knits."""
1529
# we need a full text, and a delta
1530
f, parents = get_diamond_vf(self.get_knit(), trailing_eol=False)
1531
ft_data, delta_data = self.helpGetBytes(f,
1532
_mod_knit.FTAnnotatedToUnannotated(None),
1533
_mod_knit.DeltaAnnotatedToUnannotated(None))
1535
'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1538
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1540
'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1541
'1,2,3\nleft\nright\nmerged\nend merged\n',
1542
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1544
def test_deannotation(self):
1545
"""Test converting annotated knits to unannotated knits."""
1546
# we need a full text, and a delta
1547
f, parents = get_diamond_vf(self.get_knit())
1548
ft_data, delta_data = self.helpGetBytes(f,
1549
_mod_knit.FTAnnotatedToUnannotated(None),
1550
_mod_knit.DeltaAnnotatedToUnannotated(None))
1552
'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1555
GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1557
'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1558
'2,2,2\nright\nmerged\nend merged\n',
1559
GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1561
def test_annotated_to_fulltext_no_eol(self):
1562
"""Test adapting annotated knits to full texts (for -> weaves)."""
1563
# we need a full text, and a delta
1564
f, parents = get_diamond_vf(self.get_knit(), trailing_eol=False)
1565
# Reconstructing a full text requires a backing versioned file, and it
1566
# must have the base lines requested from it.
1567
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1568
ft_data, delta_data = self.helpGetBytes(f,
1569
_mod_knit.FTAnnotatedToFullText(None),
1570
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1571
self.assertEqual('origin', ft_data)
1572
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1573
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1575
def test_annotated_to_fulltext(self):
1576
"""Test adapting annotated knits to full texts (for -> weaves)."""
1577
# we need a full text, and a delta
1578
f, parents = get_diamond_vf(self.get_knit())
1579
# Reconstructing a full text requires a backing versioned file, and it
1580
# must have the base lines requested from it.
1581
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1582
ft_data, delta_data = self.helpGetBytes(f,
1583
_mod_knit.FTAnnotatedToFullText(None),
1584
_mod_knit.DeltaAnnotatedToFullText(logged_vf))
1585
self.assertEqual('origin\n', ft_data)
1586
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1587
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1589
def test_unannotated_to_fulltext(self):
1590
"""Test adapting unannotated knits to full texts.
1592
This is used for -> weaves, and for -> annotated knits.
1594
# we need a full text, and a delta
1595
f, parents = get_diamond_vf(self.get_knit(annotated=False))
1596
# Reconstructing a full text requires a backing versioned file, and it
1597
# must have the base lines requested from it.
1598
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1599
ft_data, delta_data = self.helpGetBytes(f,
1600
_mod_knit.FTPlainToFullText(None),
1601
_mod_knit.DeltaPlainToFullText(logged_vf))
1602
self.assertEqual('origin\n', ft_data)
1603
self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1604
self.assertEqual([('get_lines', 'left')], logged_vf.calls)
1606
def test_unannotated_to_fulltext_no_eol(self):
1607
"""Test adapting unannotated knits to full texts.
1609
This is used for -> weaves, and for -> annotated knits.
1611
# we need a full text, and a delta
1612
f, parents = get_diamond_vf(self.get_knit(annotated=False),
1614
# Reconstructing a full text requires a backing versioned file, and it
1615
# must have the base lines requested from it.
1616
logged_vf = versionedfile.RecordingVersionedFileDecorator(f)
1617
ft_data, delta_data = self.helpGetBytes(f,
1618
_mod_knit.FTPlainToFullText(None),
1619
_mod_knit.DeltaPlainToFullText(logged_vf))
1620
self.assertEqual('origin', ft_data)
1621
self.assertEqual('base\nleft\nright\nmerged', delta_data)
1622
self.assertEqual([('get_lines', 'left')], logged_vf.calls)