/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to breezy/tests/per_versionedfile.py

  • Committer: Jelmer Vernooij
  • Date: 2017-07-23 22:06:41 UTC
  • mfrom: (6738 trunk)
  • mto: This revision was merged to the branch mainline in revision 6739.
  • Revision ID: jelmer@jelmer.uk-20170723220641-69eczax9bmv8d6kk
Merge trunk, address review comments.

Show diffs side-by-side

added added

removed removed

Lines of Context:
22
22
# considered typical and check that it can be detected/corrected.
23
23
 
24
24
from gzip import GzipFile
25
 
from io import BytesIO
26
25
import itertools
27
26
 
28
 
from ... import (
 
27
from .. import (
29
28
    errors,
30
29
    graph as _mod_graph,
31
30
    osutils,
33
32
    transport,
34
33
    ui,
35
34
    )
36
 
from .. import (
 
35
from ..bzr import (
37
36
    groupcompress,
38
37
    knit as _mod_knit,
39
38
    )
40
 
from ...errors import (
 
39
from ..errors import (
41
40
    RevisionNotPresent,
42
41
    RevisionAlreadyPresent,
43
42
    )
44
 
from ..knit import (
 
43
from ..bzr.knit import (
45
44
    cleanup_pack_knit,
46
45
    make_file_factory,
47
46
    make_pack_factory,
48
47
    )
49
 
from ...tests import (
 
48
from ..sixish import (
 
49
    BytesIO,
 
50
    zip,
 
51
    )
 
52
from . import (
50
53
    TestCase,
51
54
    TestCaseWithMemoryTransport,
52
55
    TestNotApplicable,
53
56
    TestSkipped,
54
57
    )
55
 
from ...tests.http_utils import TestCaseWithWebserver
56
 
from ...transport.memory import MemoryTransport
57
 
from .. import versionedfile as versionedfile
58
 
from ..versionedfile import (
59
 
    ChunkedContentFactory,
 
58
from .http_utils import TestCaseWithWebserver
 
59
from ..transport.memory import MemoryTransport
 
60
from ..bzr import versionedfile as versionedfile
 
61
from ..bzr.versionedfile import (
60
62
    ConstantMapper,
61
 
    ExistingContent,
62
63
    HashEscapedPrefixMapper,
63
64
    PrefixMapper,
64
 
    UnavailableRepresentation,
65
65
    VirtualVersionedFiles,
66
66
    make_versioned_files_factory,
67
67
    )
68
 
from ..weave import (
 
68
from ..bzr.weave import (
69
69
    WeaveFile,
70
70
    WeaveInvalidChecksum,
71
71
    )
72
 
from ..weavefile import write_weave
73
 
from ...tests.scenarios import load_tests_apply_scenarios
 
72
from ..bzr.weavefile import write_weave
 
73
from .scenarios import load_tests_apply_scenarios
74
74
 
75
75
 
76
76
load_tests = load_tests_apply_scenarios
82
82
    :param trailing_eol: If True end the last line with \n.
83
83
    """
84
84
    parents = {
85
 
        b'origin': (),
86
 
        b'base': ((b'origin',),),
87
 
        b'left': ((b'base',),),
88
 
        b'right': ((b'base',),),
89
 
        b'merged': ((b'left',), (b'right',)),
 
85
        'origin': (),
 
86
        'base': (('origin',),),
 
87
        'left': (('base',),),
 
88
        'right': (('base',),),
 
89
        'merged': (('left',), ('right',)),
90
90
        }
91
91
    # insert a diamond graph to exercise deltas and merges.
92
92
    if trailing_eol:
93
 
        last_char = b'\n'
 
93
        last_char = '\n'
94
94
    else:
95
 
        last_char = b''
96
 
    f.add_lines(b'origin', [], [b'origin' + last_char])
97
 
    f.add_lines(b'base', [b'origin'], [b'base' + last_char])
98
 
    f.add_lines(b'left', [b'base'], [b'base\n', b'left' + last_char])
 
95
        last_char = ''
 
96
    f.add_lines('origin', [], ['origin' + last_char])
 
97
    f.add_lines('base', ['origin'], ['base' + last_char])
 
98
    f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
99
99
    if not left_only:
100
 
        f.add_lines(b'right', [b'base'],
101
 
                    [b'base\n', b'right' + last_char])
102
 
        f.add_lines(b'merged', [b'left', b'right'],
103
 
                    [b'base\n', b'left\n', b'right\n', b'merged' + last_char])
 
100
        f.add_lines('right', ['base'],
 
101
            ['base\n', 'right' + last_char])
 
102
        f.add_lines('merged', ['left', 'right'],
 
103
            ['base\n', 'left\n', 'right\n', 'merged' + last_char])
104
104
    return f, parents
105
105
 
106
106
 
107
107
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
108
 
                      nograph=False, nokeys=False):
 
108
    nograph=False, nokeys=False):
109
109
    """Get a diamond graph to exercise deltas and merges.
110
110
 
111
111
    This creates a 5-node graph in files. If files supports 2-length keys two
127
127
    if key_length == 1:
128
128
        prefixes = [()]
129
129
    else:
130
 
        prefixes = [(b'FileA',), (b'FileB',)]
 
130
        prefixes = [('FileA',), ('FileB',)]
131
131
    # insert a diamond graph to exercise deltas and merges.
132
132
    if trailing_eol:
133
 
        last_char = b'\n'
 
133
        last_char = '\n'
134
134
    else:
135
 
        last_char = b''
 
135
        last_char = ''
136
136
    result = []
137
 
 
138
137
    def get_parents(suffix_list):
139
138
        if nograph:
140
139
            return ()
141
140
        else:
142
141
            result = [prefix + suffix for suffix in suffix_list]
143
142
            return result
144
 
 
145
143
    def get_key(suffix):
146
144
        if nokeys:
147
145
            return (None, )
150
148
    # we loop over each key because that spreads the inserts across prefixes,
151
149
    # which is how commit operates.
152
150
    for prefix in prefixes:
153
 
        result.append(files.add_lines(prefix + get_key(b'origin'), (),
154
 
                                      [b'origin' + last_char]))
155
 
    for prefix in prefixes:
156
 
        result.append(files.add_lines(prefix + get_key(b'base'),
157
 
                                      get_parents([(b'origin',)]), [b'base' + last_char]))
158
 
    for prefix in prefixes:
159
 
        result.append(files.add_lines(prefix + get_key(b'left'),
160
 
                                      get_parents([(b'base',)]),
161
 
                                      [b'base\n', b'left' + last_char]))
 
151
        result.append(files.add_lines(prefix + get_key('origin'), (),
 
152
            ['origin' + last_char]))
 
153
    for prefix in prefixes:
 
154
        result.append(files.add_lines(prefix + get_key('base'),
 
155
            get_parents([('origin',)]), ['base' + last_char]))
 
156
    for prefix in prefixes:
 
157
        result.append(files.add_lines(prefix + get_key('left'),
 
158
            get_parents([('base',)]),
 
159
            ['base\n', 'left' + last_char]))
162
160
    if not left_only:
163
161
        for prefix in prefixes:
164
 
            result.append(files.add_lines(prefix + get_key(b'right'),
165
 
                                          get_parents([(b'base',)]),
166
 
                                          [b'base\n', b'right' + last_char]))
 
162
            result.append(files.add_lines(prefix + get_key('right'),
 
163
                get_parents([('base',)]),
 
164
                ['base\n', 'right' + last_char]))
167
165
        for prefix in prefixes:
168
 
            result.append(files.add_lines(prefix + get_key(b'merged'),
169
 
                                          get_parents(
170
 
                                              [(b'left',), (b'right',)]),
171
 
                                          [b'base\n', b'left\n', b'right\n', b'merged' + last_char]))
 
166
            result.append(files.add_lines(prefix + get_key('merged'),
 
167
                get_parents([('left',), ('right',)]),
 
168
                ['base\n', 'left\n', 'right\n', 'merged' + last_char]))
172
169
    return result
173
170
 
174
171
 
187
184
 
188
185
    def test_add(self):
189
186
        f = self.get_file()
190
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
191
 
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
192
 
 
 
187
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
188
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
193
189
        def verify_file(f):
194
190
            versions = f.versions()
195
 
            self.assertTrue(b'r0' in versions)
196
 
            self.assertTrue(b'r1' in versions)
197
 
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
198
 
            self.assertEqual(f.get_text(b'r0'), b'a\nb\n')
199
 
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
 
191
            self.assertTrue('r0' in versions)
 
192
            self.assertTrue('r1' in versions)
 
193
            self.assertEqual(f.get_lines('r0'), ['a\n', 'b\n'])
 
194
            self.assertEqual(f.get_text('r0'), 'a\nb\n')
 
195
            self.assertEqual(f.get_lines('r1'), ['b\n', 'c\n'])
200
196
            self.assertEqual(2, len(f))
201
197
            self.assertEqual(2, f.num_versions())
202
198
 
203
199
            self.assertRaises(RevisionNotPresent,
204
 
                              f.add_lines, b'r2', [b'foo'], [])
 
200
                f.add_lines, 'r2', ['foo'], [])
205
201
            self.assertRaises(RevisionAlreadyPresent,
206
 
                              f.add_lines, b'r1', [], [])
 
202
                f.add_lines, 'r1', [], [])
207
203
        verify_file(f)
208
204
        # this checks that reopen with create=True does not break anything.
209
205
        f = self.reopen_file(create=True)
212
208
    def test_adds_with_parent_texts(self):
213
209
        f = self.get_file()
214
210
        parent_texts = {}
215
 
        _, _, parent_texts[b'r0'] = f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
211
        _, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
216
212
        try:
217
 
            _, _, parent_texts[b'r1'] = f.add_lines_with_ghosts(b'r1',
218
 
                                                                [b'r0', b'ghost'], [b'b\n', b'c\n'], parent_texts=parent_texts)
 
213
            _, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
 
214
                ['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
219
215
        except NotImplementedError:
220
216
            # if the format doesn't support ghosts, just add normally.
221
 
            _, _, parent_texts[b'r1'] = f.add_lines(b'r1',
222
 
                                                    [b'r0'], [b'b\n', b'c\n'], parent_texts=parent_texts)
223
 
        f.add_lines(b'r2', [b'r1'], [b'c\n', b'd\n'],
224
 
                    parent_texts=parent_texts)
225
 
        self.assertNotEqual(None, parent_texts[b'r0'])
226
 
        self.assertNotEqual(None, parent_texts[b'r1'])
227
 
 
 
217
            _, _, parent_texts['r1'] = f.add_lines('r1',
 
218
                ['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
 
219
        f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
 
220
        self.assertNotEqual(None, parent_texts['r0'])
 
221
        self.assertNotEqual(None, parent_texts['r1'])
228
222
        def verify_file(f):
229
223
            versions = f.versions()
230
 
            self.assertTrue(b'r0' in versions)
231
 
            self.assertTrue(b'r1' in versions)
232
 
            self.assertTrue(b'r2' in versions)
233
 
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
234
 
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
235
 
            self.assertEqual(f.get_lines(b'r2'), [b'c\n', b'd\n'])
 
224
            self.assertTrue('r0' in versions)
 
225
            self.assertTrue('r1' in versions)
 
226
            self.assertTrue('r2' in versions)
 
227
            self.assertEqual(f.get_lines('r0'), ['a\n', 'b\n'])
 
228
            self.assertEqual(f.get_lines('r1'), ['b\n', 'c\n'])
 
229
            self.assertEqual(f.get_lines('r2'), ['c\n', 'd\n'])
236
230
            self.assertEqual(3, f.num_versions())
237
 
            origins = f.annotate(b'r1')
238
 
            self.assertEqual(origins[0][0], b'r0')
239
 
            self.assertEqual(origins[1][0], b'r1')
240
 
            origins = f.annotate(b'r2')
241
 
            self.assertEqual(origins[0][0], b'r1')
242
 
            self.assertEqual(origins[1][0], b'r2')
 
231
            origins = f.annotate('r1')
 
232
            self.assertEqual(origins[0][0], 'r0')
 
233
            self.assertEqual(origins[1][0], 'r1')
 
234
            origins = f.annotate('r2')
 
235
            self.assertEqual(origins[0][0], 'r1')
 
236
            self.assertEqual(origins[1][0], 'r2')
243
237
 
244
238
        verify_file(f)
245
239
        f = self.reopen_file()
250
244
        # versioned files version sequences of bytes only.
251
245
        vf = self.get_file()
252
246
        self.assertRaises(errors.BzrBadParameterUnicode,
253
 
                          vf.add_lines, b'a', [], [b'a\n', u'b\n', b'c\n'])
 
247
            vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
254
248
        self.assertRaises(
255
249
            (errors.BzrBadParameterUnicode, NotImplementedError),
256
 
            vf.add_lines_with_ghosts, b'a', [], [b'a\n', u'b\n', b'c\n'])
 
250
            vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
257
251
 
258
252
    def test_add_follows_left_matching_blocks(self):
259
253
        """If we change left_matching_blocks, delta changes
264
258
        vf = self.get_file()
265
259
        if isinstance(vf, WeaveFile):
266
260
            raise TestSkipped("WeaveFile ignores left_matching_blocks")
267
 
        vf.add_lines(b'1', [], [b'a\n'])
268
 
        vf.add_lines(b'2', [b'1'], [b'a\n', b'a\n', b'a\n'],
 
261
        vf.add_lines('1', [], ['a\n'])
 
262
        vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
269
263
                     left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
270
 
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'2'))
271
 
        vf.add_lines(b'3', [b'1'], [b'a\n', b'a\n', b'a\n'],
 
264
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
 
265
        vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
272
266
                     left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
273
 
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'3'))
 
267
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
274
268
 
275
269
    def test_inline_newline_throws(self):
276
270
        # \r characters are not permitted in lines being added
277
271
        vf = self.get_file()
278
272
        self.assertRaises(errors.BzrBadParameterContainsNewline,
279
 
                          vf.add_lines, b'a', [], [b'a\n\n'])
 
273
            vf.add_lines, 'a', [], ['a\n\n'])
280
274
        self.assertRaises(
281
275
            (errors.BzrBadParameterContainsNewline, NotImplementedError),
282
 
            vf.add_lines_with_ghosts, b'a', [], [b'a\n\n'])
 
276
            vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
283
277
        # but inline CR's are allowed
284
 
        vf.add_lines(b'a', [], [b'a\r\n'])
 
278
        vf.add_lines('a', [], ['a\r\n'])
285
279
        try:
286
 
            vf.add_lines_with_ghosts(b'b', [], [b'a\r\n'])
 
280
            vf.add_lines_with_ghosts('b', [], ['a\r\n'])
287
281
        except NotImplementedError:
288
282
            pass
289
283
 
290
284
    def test_add_reserved(self):
291
285
        vf = self.get_file()
292
286
        self.assertRaises(errors.ReservedId,
293
 
                          vf.add_lines, b'a:', [], [b'a\n', b'b\n', b'c\n'])
 
287
            vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
294
288
 
295
289
    def test_add_lines_nostoresha(self):
296
290
        """When nostore_sha is supplied using old content raises."""
297
291
        vf = self.get_file()
298
 
        empty_text = (b'a', [])
299
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
300
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
292
        empty_text = ('a', [])
 
293
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
294
        sample_text_no_nl = ('c', ["foo\n", "bar"])
301
295
        shas = []
302
296
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
303
297
            sha, _, _ = vf.add_lines(version, [], lines)
304
298
            shas.append(sha)
305
299
        # we now have a copy of all the lines in the vf.
306
300
        for sha, (version, lines) in zip(
307
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
308
 
            self.assertRaises(ExistingContent,
309
 
                              vf.add_lines, version + b"2", [], lines,
310
 
                              nostore_sha=sha)
 
301
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
302
            self.assertRaises(errors.ExistingContent,
 
303
                vf.add_lines, version + "2", [], lines,
 
304
                nostore_sha=sha)
311
305
            # and no new version should have been added.
312
306
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
313
 
                              version + b"2")
 
307
                version + "2")
314
308
 
315
309
    def test_add_lines_with_ghosts_nostoresha(self):
316
310
        """When nostore_sha is supplied using old content raises."""
317
311
        vf = self.get_file()
318
 
        empty_text = (b'a', [])
319
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
320
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
312
        empty_text = ('a', [])
 
313
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
314
        sample_text_no_nl = ('c', ["foo\n", "bar"])
321
315
        shas = []
322
316
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
323
317
            sha, _, _ = vf.add_lines(version, [], lines)
325
319
        # we now have a copy of all the lines in the vf.
326
320
        # is the test applicable to this vf implementation?
327
321
        try:
328
 
            vf.add_lines_with_ghosts(b'd', [], [])
 
322
            vf.add_lines_with_ghosts('d', [], [])
329
323
        except NotImplementedError:
330
324
            raise TestSkipped("add_lines_with_ghosts is optional")
331
325
        for sha, (version, lines) in zip(
332
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
333
 
            self.assertRaises(ExistingContent,
334
 
                              vf.add_lines_with_ghosts, version + b"2", [], lines,
335
 
                              nostore_sha=sha)
 
326
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
327
            self.assertRaises(errors.ExistingContent,
 
328
                vf.add_lines_with_ghosts, version + "2", [], lines,
 
329
                nostore_sha=sha)
336
330
            # and no new version should have been added.
337
331
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
338
 
                              version + b"2")
 
332
                version + "2")
339
333
 
340
334
    def test_add_lines_return_value(self):
341
335
        # add_lines should return the sha1 and the text size.
342
336
        vf = self.get_file()
343
 
        empty_text = (b'a', [])
344
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
345
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
337
        empty_text = ('a', [])
 
338
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
339
        sample_text_no_nl = ('c', ["foo\n", "bar"])
346
340
        # check results for the three cases:
347
341
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
348
342
            # the first two elements are the same for all versioned files:
351
345
            result = vf.add_lines(version, [], lines)
352
346
            self.assertEqual(3, len(result))
353
347
            self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
354
 
                             result[0:2])
 
348
                result[0:2])
355
349
        # parents should not affect the result:
356
350
        lines = sample_text_nl[1]
357
351
        self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
358
 
                         vf.add_lines(b'd', [b'b', b'c'], lines)[0:2])
 
352
            vf.add_lines('d', ['b', 'c'], lines)[0:2])
359
353
 
360
354
    def test_get_reserved(self):
361
355
        vf = self.get_file()
362
 
        self.assertRaises(errors.ReservedId, vf.get_texts, [b'b:'])
363
 
        self.assertRaises(errors.ReservedId, vf.get_lines, b'b:')
364
 
        self.assertRaises(errors.ReservedId, vf.get_text, b'b:')
 
356
        self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
 
357
        self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
 
358
        self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
365
359
 
366
360
    def test_add_unchanged_last_line_noeol_snapshot(self):
367
361
        """Add a text with an unchanged last line with no eol should work."""
376
370
        for length in range(20):
377
371
            version_lines = {}
378
372
            vf = self.get_file('case-%d' % length)
379
 
            prefix = b'step-%d'
 
373
            prefix = 'step-%d'
380
374
            parents = []
381
375
            for step in range(length):
382
376
                version = prefix % step
383
 
                lines = ([b'prelude \n'] * step) + [b'line']
 
377
                lines = (['prelude \n'] * step) + ['line']
384
378
                vf.add_lines(version, parents, lines)
385
379
                version_lines[version] = lines
386
380
                parents = [version]
387
 
            vf.add_lines(b'no-eol', parents, [b'line'])
 
381
            vf.add_lines('no-eol', parents, ['line'])
388
382
            vf.get_texts(version_lines.keys())
389
 
            self.assertEqualDiff(b'line', vf.get_text(b'no-eol'))
 
383
            self.assertEqualDiff('line', vf.get_text('no-eol'))
390
384
 
391
385
    def test_get_texts_eol_variation(self):
392
386
        # similar to the failure in <http://bugs.launchpad.net/234748>
393
387
        vf = self.get_file()
394
 
        sample_text_nl = [b"line\n"]
395
 
        sample_text_no_nl = [b"line"]
 
388
        sample_text_nl = ["line\n"]
 
389
        sample_text_no_nl = ["line"]
396
390
        versions = []
397
391
        version_lines = {}
398
392
        parents = []
399
393
        for i in range(4):
400
 
            version = b'v%d' % i
 
394
            version = 'v%d' % i
401
395
            if i % 2:
402
396
                lines = sample_text_nl
403
397
            else:
409
403
            # (which is what this test tests) will generate a correct line
410
404
            # delta (which is to say, an empty delta).
411
405
            vf.add_lines(version, parents, lines,
412
 
                         left_matching_blocks=[(0, 0, 1)])
 
406
                left_matching_blocks=[(0, 0, 1)])
413
407
            parents = [version]
414
408
            versions.append(version)
415
409
            version_lines[version] = lines
427
421
        # Test adding this in two situations:
428
422
        # On top of a new insertion
429
423
        vf = self.get_file('fulltext')
430
 
        vf.add_lines(b'noeol', [], [b'line'])
431
 
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
432
 
                     left_matching_blocks=[(0, 1, 1)])
433
 
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
 
424
        vf.add_lines('noeol', [], ['line'])
 
425
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
 
426
            left_matching_blocks=[(0, 1, 1)])
 
427
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
434
428
        # On top of a delta
435
429
        vf = self.get_file('delta')
436
 
        vf.add_lines(b'base', [], [b'line'])
437
 
        vf.add_lines(b'noeol', [b'base'], [b'prelude\n', b'line'])
438
 
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
439
 
                     left_matching_blocks=[(1, 1, 1)])
440
 
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
 
430
        vf.add_lines('base', [], ['line'])
 
431
        vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
 
432
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
 
433
            left_matching_blocks=[(1, 1, 1)])
 
434
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
441
435
 
442
436
    def test_make_mpdiffs(self):
443
437
        from breezy import multiparent
454
448
    def test_make_mpdiffs_with_ghosts(self):
455
449
        vf = self.get_file('foo')
456
450
        try:
457
 
            vf.add_lines_with_ghosts(b'text', [b'ghost'], [b'line\n'])
 
451
            vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
458
452
        except NotImplementedError:
459
453
            # old Weave formats do not allow ghosts
460
454
            return
461
 
        self.assertRaises(errors.RevisionNotPresent,
462
 
                          vf.make_mpdiffs, [b'ghost'])
 
455
        self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
463
456
 
464
457
    def _setup_for_deltas(self, f):
465
458
        self.assertFalse(f.has_version('base'))
466
459
        # add texts that should trip the knit maximum delta chain threshold
467
460
        # as well as doing parallel chains of data in knits.
468
461
        # this is done by two chains of 25 insertions
469
 
        f.add_lines(b'base', [], [b'line\n'])
470
 
        f.add_lines(b'noeol', [b'base'], [b'line'])
 
462
        f.add_lines('base', [], ['line\n'])
 
463
        f.add_lines('noeol', ['base'], ['line'])
471
464
        # detailed eol tests:
472
465
        # shared last line with parent no-eol
473
 
        f.add_lines(b'noeolsecond', [b'noeol'], [b'line\n', b'line'])
 
466
        f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
474
467
        # differing last line with parent, both no-eol
475
 
        f.add_lines(b'noeolnotshared', [b'noeolsecond'], [b'line\n', b'phone'])
 
468
        f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
476
469
        # add eol following a noneol parent, change content
477
 
        f.add_lines(b'eol', [b'noeol'], [b'phone\n'])
 
470
        f.add_lines('eol', ['noeol'], ['phone\n'])
478
471
        # add eol following a noneol parent, no change content
479
 
        f.add_lines(b'eolline', [b'noeol'], [b'line\n'])
 
472
        f.add_lines('eolline', ['noeol'], ['line\n'])
480
473
        # noeol with no parents:
481
 
        f.add_lines(b'noeolbase', [], [b'line'])
 
474
        f.add_lines('noeolbase', [], ['line'])
482
475
        # noeol preceeding its leftmost parent in the output:
483
476
        # this is done by making it a merge of two parents with no common
484
477
        # anestry: noeolbase and noeol with the
485
478
        # later-inserted parent the leftmost.
486
 
        f.add_lines(b'eolbeforefirstparent', [
487
 
                    b'noeolbase', b'noeol'], [b'line'])
 
479
        f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
488
480
        # two identical eol texts
489
 
        f.add_lines(b'noeoldup', [b'noeol'], [b'line'])
490
 
        next_parent = b'base'
491
 
        text_name = b'chain1-'
492
 
        text = [b'line\n']
493
 
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
494
 
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
495
 
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
496
 
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
497
 
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
498
 
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
499
 
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
500
 
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
501
 
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
502
 
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
503
 
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
504
 
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
505
 
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
506
 
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
507
 
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
508
 
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
509
 
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
510
 
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
511
 
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
512
 
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
513
 
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
514
 
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
515
 
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
516
 
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
517
 
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
518
 
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
481
        f.add_lines('noeoldup', ['noeol'], ['line'])
 
482
        next_parent = 'base'
 
483
        text_name = 'chain1-'
 
484
        text = ['line\n']
 
485
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
486
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
487
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
488
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
489
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
490
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
491
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
492
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
493
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
 
494
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
495
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
496
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
497
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
498
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
499
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
 
500
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
501
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
502
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
503
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
504
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
 
505
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
506
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
507
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
508
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
509
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
510
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
519
511
                 }
520
512
        for depth in range(26):
521
 
            new_version = text_name + b'%d' % depth
522
 
            text = text + [b'line\n']
 
513
            new_version = text_name + '%s' % depth
 
514
            text = text + ['line\n']
523
515
            f.add_lines(new_version, [next_parent], text)
524
516
            next_parent = new_version
525
 
        next_parent = b'base'
526
 
        text_name = b'chain2-'
527
 
        text = [b'line\n']
 
517
        next_parent = 'base'
 
518
        text_name = 'chain2-'
 
519
        text = ['line\n']
528
520
        for depth in range(26):
529
 
            new_version = text_name + b'%d' % depth
530
 
            text = text + [b'line\n']
 
521
            new_version = text_name + '%s' % depth
 
522
            text = text + ['line\n']
531
523
            f.add_lines(new_version, [next_parent], text)
532
524
            next_parent = new_version
533
525
        return sha1s
534
526
 
535
527
    def test_ancestry(self):
536
528
        f = self.get_file()
537
 
        self.assertEqual(set(), f.get_ancestry([]))
538
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
539
 
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
540
 
        f.add_lines(b'r2', [b'r0'], [b'b\n', b'c\n'])
541
 
        f.add_lines(b'r3', [b'r2'], [b'b\n', b'c\n'])
542
 
        f.add_lines(b'rM', [b'r1', b'r2'], [b'b\n', b'c\n'])
543
 
        self.assertEqual(set(), f.get_ancestry([]))
544
 
        versions = f.get_ancestry([b'rM'])
 
529
        self.assertEqual([], f.get_ancestry([]))
 
530
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
531
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
 
532
        f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
 
533
        f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
 
534
        f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
 
535
        self.assertEqual([], f.get_ancestry([]))
 
536
        versions = f.get_ancestry(['rM'])
 
537
        # there are some possibilities:
 
538
        # r0 r1 r2 rM r3
 
539
        # r0 r1 r2 r3 rM
 
540
        # etc
 
541
        # so we check indexes
 
542
        r0 = versions.index('r0')
 
543
        r1 = versions.index('r1')
 
544
        r2 = versions.index('r2')
 
545
        self.assertFalse('r3' in versions)
 
546
        rM = versions.index('rM')
 
547
        self.assertTrue(r0 < r1)
 
548
        self.assertTrue(r0 < r2)
 
549
        self.assertTrue(r1 < rM)
 
550
        self.assertTrue(r2 < rM)
545
551
 
546
552
        self.assertRaises(RevisionNotPresent,
547
 
                          f.get_ancestry, [b'rM', b'rX'])
 
553
            f.get_ancestry, ['rM', 'rX'])
548
554
 
549
 
        self.assertEqual(set(f.get_ancestry(b'rM')),
550
 
                         set(f.get_ancestry(b'rM')))
 
555
        self.assertEqual(set(f.get_ancestry('rM')),
 
556
            set(f.get_ancestry('rM', topo_sorted=False)))
551
557
 
552
558
    def test_mutate_after_finish(self):
553
559
        self._transaction = 'before'
554
560
        f = self.get_file()
555
561
        self._transaction = 'after'
556
 
        self.assertRaises(errors.OutSideTransaction, f.add_lines, b'', [], [])
557
 
        self.assertRaises(errors.OutSideTransaction,
558
 
                          f.add_lines_with_ghosts, b'', [], [])
 
562
        self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
 
563
        self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
559
564
 
560
565
    def test_copy_to(self):
561
566
        f = self.get_file()
562
 
        f.add_lines(b'0', [], [b'a\n'])
 
567
        f.add_lines('0', [], ['a\n'])
563
568
        t = MemoryTransport()
564
569
        f.copy_to('foo', t)
565
570
        for suffix in self.get_factory().get_suffixes():
572
577
 
573
578
    def test_get_parent_map(self):
574
579
        f = self.get_file()
575
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
576
 
        self.assertEqual(
577
 
            {b'r0': ()}, f.get_parent_map([b'r0']))
578
 
        f.add_lines(b'r1', [b'r0'], [b'a\n', b'b\n'])
579
 
        self.assertEqual(
580
 
            {b'r1': (b'r0',)}, f.get_parent_map([b'r1']))
581
 
        self.assertEqual(
582
 
            {b'r0': (),
583
 
             b'r1': (b'r0',)},
584
 
            f.get_parent_map([b'r0', b'r1']))
585
 
        f.add_lines(b'r2', [], [b'a\n', b'b\n'])
586
 
        f.add_lines(b'r3', [], [b'a\n', b'b\n'])
587
 
        f.add_lines(b'm', [b'r0', b'r1', b'r2', b'r3'], [b'a\n', b'b\n'])
588
 
        self.assertEqual(
589
 
            {b'm': (b'r0', b'r1', b'r2', b'r3')}, f.get_parent_map([b'm']))
590
 
        self.assertEqual({}, f.get_parent_map(b'y'))
591
 
        self.assertEqual(
592
 
            {b'r0': (),
593
 
             b'r1': (b'r0',)},
594
 
            f.get_parent_map([b'r0', b'y', b'r1']))
 
580
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
581
        self.assertEqual(
 
582
            {'r0':()}, f.get_parent_map(['r0']))
 
583
        f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
 
584
        self.assertEqual(
 
585
            {'r1':('r0',)}, f.get_parent_map(['r1']))
 
586
        self.assertEqual(
 
587
            {'r0':(),
 
588
             'r1':('r0',)},
 
589
            f.get_parent_map(['r0', 'r1']))
 
590
        f.add_lines('r2', [], ['a\n', 'b\n'])
 
591
        f.add_lines('r3', [], ['a\n', 'b\n'])
 
592
        f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
 
593
        self.assertEqual(
 
594
            {'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
 
595
        self.assertEqual({}, f.get_parent_map('y'))
 
596
        self.assertEqual(
 
597
            {'r0':(),
 
598
             'r1':('r0',)},
 
599
            f.get_parent_map(['r0', 'y', 'r1']))
595
600
 
596
601
    def test_annotate(self):
597
602
        f = self.get_file()
598
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
599
 
        f.add_lines(b'r1', [b'r0'], [b'c\n', b'b\n'])
600
 
        origins = f.annotate(b'r1')
601
 
        self.assertEqual(origins[0][0], b'r1')
602
 
        self.assertEqual(origins[1][0], b'r0')
 
603
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
604
        f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
 
605
        origins = f.annotate('r1')
 
606
        self.assertEqual(origins[0][0], 'r1')
 
607
        self.assertEqual(origins[1][0], 'r0')
603
608
 
604
609
        self.assertRaises(RevisionNotPresent,
605
 
                          f.annotate, b'foo')
 
610
            f.annotate, 'foo')
606
611
 
607
612
    def test_detection(self):
608
613
        # Test weaves detect corruption.
613
618
 
614
619
        w = self.get_file_corrupted_text()
615
620
 
616
 
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
617
 
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
618
 
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
 
621
        self.assertEqual('hello\n', w.get_text('v1'))
 
622
        self.assertRaises(WeaveInvalidChecksum, w.get_text, 'v2')
 
623
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, 'v2')
619
624
        self.assertRaises(WeaveInvalidChecksum, w.check)
620
625
 
621
626
        w = self.get_file_corrupted_checksum()
622
627
 
623
 
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
624
 
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
625
 
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
 
628
        self.assertEqual('hello\n', w.get_text('v1'))
 
629
        self.assertRaises(WeaveInvalidChecksum, w.get_text, 'v2')
 
630
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, 'v2')
626
631
        self.assertRaises(WeaveInvalidChecksum, w.check)
627
632
 
628
633
    def get_file_corrupted_text(self):
650
655
 
651
656
        vf = self.get_file()
652
657
        # add a base to get included
653
 
        vf.add_lines(b'base', [], [b'base\n'])
 
658
        vf.add_lines('base', [], ['base\n'])
654
659
        # add a ancestor to be included on one side
655
 
        vf.add_lines(b'lancestor', [], [b'lancestor\n'])
 
660
        vf.add_lines('lancestor', [], ['lancestor\n'])
656
661
        # add a ancestor to be included on the other side
657
 
        vf.add_lines(b'rancestor', [b'base'], [b'rancestor\n'])
 
662
        vf.add_lines('rancestor', ['base'], ['rancestor\n'])
658
663
        # add a child of rancestor with no eofile-nl
659
 
        vf.add_lines(b'child', [b'rancestor'], [b'base\n', b'child\n'])
 
664
        vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
660
665
        # add a child of lancestor and base to join the two roots
661
 
        vf.add_lines(b'otherchild',
662
 
                     [b'lancestor', b'base'],
663
 
                     [b'base\n', b'lancestor\n', b'otherchild\n'])
664
 
 
 
666
        vf.add_lines('otherchild',
 
667
                     ['lancestor', 'base'],
 
668
                     ['base\n', 'lancestor\n', 'otherchild\n'])
665
669
        def iter_with_versions(versions, expected):
666
670
            # now we need to see what lines are returned, and how often.
667
671
            lines = {}
668
672
            progress = InstrumentedProgress()
669
673
            # iterate over the lines
670
674
            for line in vf.iter_lines_added_or_present_in_versions(versions,
671
 
                                                                   pb=progress):
 
675
                pb=progress):
672
676
                lines.setdefault(line, 0)
673
677
                lines[line] += 1
674
 
            if [] != progress.updates:
 
678
            if []!= progress.updates:
675
679
                self.assertEqual(expected, progress.updates)
676
680
            return lines
677
 
        lines = iter_with_versions([b'child', b'otherchild'],
 
681
        lines = iter_with_versions(['child', 'otherchild'],
678
682
                                   [('Walking content', 0, 2),
679
683
                                    ('Walking content', 1, 2),
680
684
                                    ('Walking content', 2, 2)])
681
685
        # we must see child and otherchild
682
 
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
683
 
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
 
686
        self.assertTrue(lines[('child\n', 'child')] > 0)
 
687
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
684
688
        # we dont care if we got more than that.
685
689
 
686
690
        # test all lines
691
695
                                          ('Walking content', 4, 5),
692
696
                                          ('Walking content', 5, 5)])
693
697
        # all lines must be seen at least once
694
 
        self.assertTrue(lines[(b'base\n', b'base')] > 0)
695
 
        self.assertTrue(lines[(b'lancestor\n', b'lancestor')] > 0)
696
 
        self.assertTrue(lines[(b'rancestor\n', b'rancestor')] > 0)
697
 
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
698
 
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
 
698
        self.assertTrue(lines[('base\n', 'base')] > 0)
 
699
        self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
 
700
        self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
 
701
        self.assertTrue(lines[('child\n', 'child')] > 0)
 
702
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
699
703
 
700
704
    def test_add_lines_with_ghosts(self):
701
705
        # some versioned file formats allow lines to be added with parent
708
712
        parent_id_unicode = u'b\xbfse'
709
713
        parent_id_utf8 = parent_id_unicode.encode('utf8')
710
714
        try:
711
 
            vf.add_lines_with_ghosts(b'notbxbfse', [parent_id_utf8], [])
 
715
            vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
712
716
        except NotImplementedError:
713
717
            # check the other ghost apis are also not implemented
714
 
            self.assertRaises(NotImplementedError,
715
 
                              vf.get_ancestry_with_ghosts, [b'foo'])
716
 
            self.assertRaises(NotImplementedError,
717
 
                              vf.get_parents_with_ghosts, b'foo')
 
718
            self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
 
719
            self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
718
720
            return
719
721
        vf = self.reopen_file()
720
722
        # test key graph related apis: getncestry, _graph, get_parents
721
723
        # has_version
722
724
        # - these are ghost unaware and must not be reflect ghosts
723
 
        self.assertEqual(set([b'notbxbfse']), vf.get_ancestry(b'notbxbfse'))
 
725
        self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
724
726
        self.assertFalse(vf.has_version(parent_id_utf8))
725
727
        # we have _with_ghost apis to give us ghost information.
726
 
        self.assertEqual(set([parent_id_utf8, b'notbxbfse']),
727
 
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
728
 
        self.assertEqual([parent_id_utf8],
729
 
                         vf.get_parents_with_ghosts(b'notbxbfse'))
 
728
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
 
729
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
730
730
        # if we add something that is a ghost of another, it should correct the
731
731
        # results of the prior apis
732
732
        vf.add_lines(parent_id_utf8, [], [])
733
 
        self.assertEqual(set([parent_id_utf8, b'notbxbfse']),
734
 
                         vf.get_ancestry([b'notbxbfse']))
735
 
        self.assertEqual({b'notbxbfse': (parent_id_utf8,)},
736
 
                         vf.get_parent_map([b'notbxbfse']))
 
733
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
 
734
        self.assertEqual({'notbxbfse':(parent_id_utf8,)},
 
735
            vf.get_parent_map(['notbxbfse']))
737
736
        self.assertTrue(vf.has_version(parent_id_utf8))
738
737
        # we have _with_ghost apis to give us ghost information.
739
 
        self.assertEqual(set([parent_id_utf8, b'notbxbfse']),
740
 
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
741
 
        self.assertEqual([parent_id_utf8],
742
 
                         vf.get_parents_with_ghosts(b'notbxbfse'))
 
738
        self.assertEqual([parent_id_utf8, 'notbxbfse'],
 
739
            vf.get_ancestry_with_ghosts(['notbxbfse']))
 
740
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
743
741
 
744
742
    def test_add_lines_with_ghosts_after_normal_revs(self):
745
743
        # some versioned file formats allow lines to be added with parent
749
747
        vf = self.get_file()
750
748
        # probe for ghost support
751
749
        try:
752
 
            vf.add_lines_with_ghosts(b'base', [], [b'line\n', b'line_b\n'])
 
750
            vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
753
751
        except NotImplementedError:
754
752
            return
755
 
        vf.add_lines_with_ghosts(b'references_ghost',
756
 
                                 [b'base', b'a_ghost'],
757
 
                                 [b'line\n', b'line_b\n', b'line_c\n'])
758
 
        origins = vf.annotate(b'references_ghost')
759
 
        self.assertEqual((b'base', b'line\n'), origins[0])
760
 
        self.assertEqual((b'base', b'line_b\n'), origins[1])
761
 
        self.assertEqual((b'references_ghost', b'line_c\n'), origins[2])
 
753
        vf.add_lines_with_ghosts('references_ghost',
 
754
                                 ['base', 'a_ghost'],
 
755
                                 ['line\n', 'line_b\n', 'line_c\n'])
 
756
        origins = vf.annotate('references_ghost')
 
757
        self.assertEqual(('base', 'line\n'), origins[0])
 
758
        self.assertEqual(('base', 'line_b\n'), origins[1])
 
759
        self.assertEqual(('references_ghost', 'line_c\n'), origins[2])
762
760
 
763
761
    def test_readonly_mode(self):
764
762
        t = self.get_transport()
765
763
        factory = self.get_factory()
766
764
        vf = factory('id', t, 0o777, create=True, access_mode='w')
767
765
        vf = factory('id', t, access_mode='r')
768
 
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, b'base', [], [])
 
766
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
769
767
        self.assertRaises(errors.ReadOnlyError,
770
768
                          vf.add_lines_with_ghosts,
771
 
                          b'base',
 
769
                          'base',
772
770
                          [],
773
771
                          [])
774
772
 
776
774
        # check the sha1 data is available
777
775
        vf = self.get_file()
778
776
        # a simple file
779
 
        vf.add_lines(b'a', [], [b'a\n'])
 
777
        vf.add_lines('a', [], ['a\n'])
780
778
        # the same file, different metadata
781
 
        vf.add_lines(b'b', [b'a'], [b'a\n'])
 
779
        vf.add_lines('b', ['a'], ['a\n'])
782
780
        # a file differing only in last newline.
783
 
        vf.add_lines(b'c', [], [b'a'])
 
781
        vf.add_lines('c', [], ['a'])
784
782
        self.assertEqual({
785
 
            b'a': b'3f786850e387550fdab836ed7e6dc881de23001b',
786
 
            b'c': b'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
787
 
            b'b': b'3f786850e387550fdab836ed7e6dc881de23001b',
 
783
            'a': '3f786850e387550fdab836ed7e6dc881de23001b',
 
784
            'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
 
785
            'b': '3f786850e387550fdab836ed7e6dc881de23001b',
788
786
            },
789
 
            vf.get_sha1s([b'a', b'c', b'b']))
 
787
            vf.get_sha1s(['a', 'c', 'b']))
790
788
 
791
789
 
792
790
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
800
798
        w = WeaveFile('foo', self.get_transport(),
801
799
                      create=True,
802
800
                      get_scope=self.get_transaction)
803
 
        w.add_lines(b'v1', [], [b'hello\n'])
804
 
        w.add_lines(b'v2', [b'v1'], [b'hello\n', b'there\n'])
 
801
        w.add_lines('v1', [], ['hello\n'])
 
802
        w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
805
803
 
806
804
        # We are going to invasively corrupt the text
807
805
        # Make sure the internals of weave are the same
808
 
        self.assertEqual([(b'{', 0), b'hello\n', (b'}', None), (b'{', 1), b'there\n', (b'}', None)
809
 
                          ], w._weave)
 
806
        self.assertEqual([('{', 0)
 
807
                        , 'hello\n'
 
808
                        , ('}', None)
 
809
                        , ('{', 1)
 
810
                        , 'there\n'
 
811
                        , ('}', None)
 
812
                        ], w._weave)
810
813
 
811
 
        self.assertEqual([b'f572d396fae9206628714fb2ce00f72e94f2258f', b'90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
812
 
                          ], w._sha1s)
 
814
        self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
 
815
                        , '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
816
                        ], w._sha1s)
813
817
        w.check()
814
818
 
815
819
        # Corrupted
816
 
        w._weave[4] = b'There\n'
 
820
        w._weave[4] = 'There\n'
817
821
        return w
818
822
 
819
823
    def get_file_corrupted_checksum(self):
820
824
        w = self.get_file_corrupted_text()
821
825
        # Corrected
822
 
        w._weave[4] = b'there\n'
823
 
        self.assertEqual(b'hello\nthere\n', w.get_text(b'v2'))
 
826
        w._weave[4] = 'there\n'
 
827
        self.assertEqual('hello\nthere\n', w.get_text('v2'))
824
828
 
825
 
        # Invalid checksum, first digit changed
826
 
        w._sha1s[1] = b'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
829
        #Invalid checksum, first digit changed
 
830
        w._sha1s[1] =  'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
827
831
        return w
828
832
 
829
833
    def reopen_file(self, name='foo', create=False):
854
858
        self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
855
859
 
856
860
    def test_add_lines(self):
857
 
        self.plan_merge_vf.add_lines((b'root', b'a:'), [], [])
858
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
859
 
                          (b'root', b'a'), [], [])
860
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
861
 
                          (b'root', b'a:'), None, [])
862
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
863
 
                          (b'root', b'a:'), [], None)
 
861
        self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
 
862
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
863
            ('root', 'a'), [], [])
 
864
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
865
            ('root', 'a:'), None, [])
 
866
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
867
            ('root', 'a:'), [], None)
864
868
 
865
869
    def setup_abcde(self):
866
 
        self.vf1.add_lines((b'root', b'A'), [], [b'a'])
867
 
        self.vf1.add_lines((b'root', b'B'), [(b'root', b'A')], [b'b'])
868
 
        self.vf2.add_lines((b'root', b'C'), [], [b'c'])
869
 
        self.vf2.add_lines((b'root', b'D'), [(b'root', b'C')], [b'd'])
870
 
        self.plan_merge_vf.add_lines((b'root', b'E:'),
871
 
                                     [(b'root', b'B'), (b'root', b'D')], [b'e'])
 
870
        self.vf1.add_lines(('root', 'A'), [], ['a'])
 
871
        self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
 
872
        self.vf2.add_lines(('root', 'C'), [], ['c'])
 
873
        self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
 
874
        self.plan_merge_vf.add_lines(('root', 'E:'),
 
875
            [('root', 'B'), ('root', 'D')], ['e'])
872
876
 
873
877
    def test_get_parents(self):
874
878
        self.setup_abcde()
875
 
        self.assertEqual({(b'root', b'B'): ((b'root', b'A'),)},
876
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'B')]))
877
 
        self.assertEqual({(b'root', b'D'): ((b'root', b'C'),)},
878
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'D')]))
879
 
        self.assertEqual({(b'root', b'E:'): ((b'root', b'B'), (b'root', b'D'))},
880
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'E:')]))
 
879
        self.assertEqual({('root', 'B'):(('root', 'A'),)},
 
880
            self.plan_merge_vf.get_parent_map([('root', 'B')]))
 
881
        self.assertEqual({('root', 'D'):(('root', 'C'),)},
 
882
            self.plan_merge_vf.get_parent_map([('root', 'D')]))
 
883
        self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
 
884
            self.plan_merge_vf.get_parent_map([('root', 'E:')]))
881
885
        self.assertEqual({},
882
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'F')]))
 
886
            self.plan_merge_vf.get_parent_map([('root', 'F')]))
883
887
        self.assertEqual({
884
 
            (b'root', b'B'): ((b'root', b'A'),),
885
 
            (b'root', b'D'): ((b'root', b'C'),),
886
 
            (b'root', b'E:'): ((b'root', b'B'), (b'root', b'D')),
887
 
            },
 
888
                ('root', 'B'):(('root', 'A'),),
 
889
                ('root', 'D'):(('root', 'C'),),
 
890
                ('root', 'E:'):(('root', 'B'),('root', 'D')),
 
891
                },
888
892
            self.plan_merge_vf.get_parent_map(
889
 
                [(b'root', b'B'), (b'root', b'D'), (b'root', b'E:'), (b'root', b'F')]))
 
893
                [('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
890
894
 
891
895
    def test_get_record_stream(self):
892
896
        self.setup_abcde()
893
 
 
894
897
        def get_record(suffix):
895
898
            return next(self.plan_merge_vf.get_record_stream(
896
 
                [(b'root', suffix)], 'unordered', True))
897
 
        self.assertEqual(b'a', get_record(b'A').get_bytes_as('fulltext'))
898
 
        self.assertEqual(b'a', b''.join(get_record(b'A').iter_bytes_as('chunked')))
899
 
        self.assertEqual(b'c', get_record(b'C').get_bytes_as('fulltext'))
900
 
        self.assertEqual(b'e', get_record(b'E:').get_bytes_as('fulltext'))
 
899
                [('root', suffix)], 'unordered', True))
 
900
        self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
 
901
        self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
 
902
        self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
901
903
        self.assertEqual('absent', get_record('F').storage_kind)
902
904
 
903
905
 
911
913
        vf = self.get_file()
912
914
        # try an empty file access
913
915
        readonly_vf = self.get_factory()('foo',
914
 
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
 
916
            transport.get_transport_from_url(self.get_readonly_url('.')))
915
917
        self.assertEqual([], readonly_vf.versions())
916
918
 
917
919
    def test_readonly_http_works_with_feeling(self):
918
920
        # we should be able to read from http with a versioned file.
919
921
        vf = self.get_file()
920
922
        # now with feeling.
921
 
        vf.add_lines(b'1', [], [b'a\n'])
922
 
        vf.add_lines(b'2', [b'1'], [b'b\n', b'a\n'])
 
923
        vf.add_lines('1', [], ['a\n'])
 
924
        vf.add_lines('2', ['1'], ['b\n', 'a\n'])
923
925
        readonly_vf = self.get_factory()('foo',
924
 
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
925
 
        self.assertEqual([b'1', b'2'], vf.versions())
926
 
        self.assertEqual([b'1', b'2'], readonly_vf.versions())
 
926
            transport.get_transport_from_url(self.get_readonly_url('.')))
 
927
        self.assertEqual(['1', '2'], vf.versions())
 
928
        self.assertEqual(['1', '2'], readonly_vf.versions())
927
929
        for version in readonly_vf.versions():
928
930
            readonly_vf.get_lines(version)
929
931
 
945
947
        from textwrap import dedent
946
948
 
947
949
        def addcrlf(x):
948
 
            return x + b'\n'
 
950
            return x + '\n'
949
951
 
950
952
        w = self.get_file()
951
 
        w.add_lines(b'text0', [], list(map(addcrlf, base)))
952
 
        w.add_lines(b'text1', [b'text0'], list(map(addcrlf, a)))
953
 
        w.add_lines(b'text2', [b'text0'], list(map(addcrlf, b)))
 
953
        w.add_lines('text0', [], list(map(addcrlf, base)))
 
954
        w.add_lines('text1', ['text0'], list(map(addcrlf, a)))
 
955
        w.add_lines('text2', ['text0'], list(map(addcrlf, b)))
954
956
 
955
957
        self.log_contents(w)
956
958
 
957
959
        self.log('merge plan:')
958
 
        p = list(w.plan_merge(b'text1', b'text2'))
 
960
        p = list(w.plan_merge('text1', 'text2'))
959
961
        for state, line in p:
960
962
            if line:
961
963
                self.log('%12s | %s' % (state, line[:-1]))
969
971
        mp = list(map(addcrlf, mp))
970
972
        self.assertEqual(mt.readlines(), mp)
971
973
 
 
974
 
972
975
    def testOneInsert(self):
973
976
        self.doMerge([],
974
 
                     [b'aa'],
 
977
                     ['aa'],
975
978
                     [],
976
 
                     [b'aa'])
 
979
                     ['aa'])
977
980
 
978
981
    def testSeparateInserts(self):
979
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
980
 
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
981
 
                     [b'aaa', b'bbb', b'yyy', b'ccc'],
982
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
 
982
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
983
                     ['aaa', 'xxx', 'bbb', 'ccc'],
 
984
                     ['aaa', 'bbb', 'yyy', 'ccc'],
 
985
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
983
986
 
984
987
    def testSameInsert(self):
985
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
986
 
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
987
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'],
988
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
989
 
    overlappedInsertExpected = [b'aaa', b'xxx', b'yyy', b'bbb']
990
 
 
 
988
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
989
                     ['aaa', 'xxx', 'bbb', 'ccc'],
 
990
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
 
991
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
 
992
    overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
991
993
    def testOverlappedInsert(self):
992
 
        self.doMerge([b'aaa', b'bbb'],
993
 
                     [b'aaa', b'xxx', b'yyy', b'bbb'],
994
 
                     [b'aaa', b'xxx', b'bbb'], self.overlappedInsertExpected)
 
994
        self.doMerge(['aaa', 'bbb'],
 
995
                     ['aaa', 'xxx', 'yyy', 'bbb'],
 
996
                     ['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
995
997
 
996
998
        # really it ought to reduce this to
997
 
        # [b'aaa', b'xxx', b'yyy', b'bbb']
 
999
        # ['aaa', 'xxx', 'yyy', 'bbb']
 
1000
 
998
1001
 
999
1002
    def testClashReplace(self):
1000
 
        self.doMerge([b'aaa'],
1001
 
                     [b'xxx'],
1002
 
                     [b'yyy', b'zzz'],
1003
 
                     [b'<<<<<<< ', b'xxx', b'=======', b'yyy', b'zzz',
1004
 
                      b'>>>>>>> '])
 
1003
        self.doMerge(['aaa'],
 
1004
                     ['xxx'],
 
1005
                     ['yyy', 'zzz'],
 
1006
                     ['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
 
1007
                      '>>>>>>> '])
1005
1008
 
1006
1009
    def testNonClashInsert1(self):
1007
 
        self.doMerge([b'aaa'],
1008
 
                     [b'xxx', b'aaa'],
1009
 
                     [b'yyy', b'zzz'],
1010
 
                     [b'<<<<<<< ', b'xxx', b'aaa', b'=======', b'yyy', b'zzz',
1011
 
                      b'>>>>>>> '])
 
1010
        self.doMerge(['aaa'],
 
1011
                     ['xxx', 'aaa'],
 
1012
                     ['yyy', 'zzz'],
 
1013
                     ['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
 
1014
                      '>>>>>>> '])
1012
1015
 
1013
1016
    def testNonClashInsert2(self):
1014
 
        self.doMerge([b'aaa'],
1015
 
                     [b'aaa'],
1016
 
                     [b'yyy', b'zzz'],
1017
 
                     [b'yyy', b'zzz'])
 
1017
        self.doMerge(['aaa'],
 
1018
                     ['aaa'],
 
1019
                     ['yyy', 'zzz'],
 
1020
                     ['yyy', 'zzz'])
 
1021
 
1018
1022
 
1019
1023
    def testDeleteAndModify(self):
1020
1024
        """Clashing delete and modification.
1027
1031
        # skippd, not working yet
1028
1032
        return
1029
1033
 
1030
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
1031
 
                     [b'aaa', b'ddd', b'ccc'],
1032
 
                     [b'aaa', b'ccc'],
1033
 
                     [b'<<<<<<<< ', b'aaa', b'=======', b'>>>>>>> ', b'ccc'])
 
1034
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
1035
                     ['aaa', 'ddd', 'ccc'],
 
1036
                     ['aaa', 'ccc'],
 
1037
                     ['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1034
1038
 
1035
1039
    def _test_merge_from_strings(self, base, a, b, expected):
1036
1040
        w = self.get_file()
1037
 
        w.add_lines(b'text0', [], base.splitlines(True))
1038
 
        w.add_lines(b'text1', [b'text0'], a.splitlines(True))
1039
 
        w.add_lines(b'text2', [b'text0'], b.splitlines(True))
 
1041
        w.add_lines('text0', [], base.splitlines(True))
 
1042
        w.add_lines('text1', ['text0'], a.splitlines(True))
 
1043
        w.add_lines('text2', ['text0'], b.splitlines(True))
1040
1044
        self.log('merge plan:')
1041
 
        p = list(w.plan_merge(b'text1', b'text2'))
 
1045
        p = list(w.plan_merge('text1', 'text2'))
1042
1046
        for state, line in p:
1043
1047
            if line:
1044
1048
                self.log('%12s | %s' % (state, line[:-1]))
1045
1049
        self.log('merge result:')
1046
 
        result_text = b''.join(w.weave_merge(p))
 
1050
        result_text = ''.join(w.weave_merge(p))
1047
1051
        self.log(result_text)
1048
1052
        self.assertEqualDiff(result_text, expected)
1049
1053
 
1050
1054
    def test_weave_merge_conflicts(self):
1051
1055
        # does weave merge properly handle plans that end with unchanged?
1052
 
        result = b''.join(self.get_file().weave_merge([('new-a', b'hello\n')]))
1053
 
        self.assertEqual(result, b'hello\n')
 
1056
        result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
 
1057
        self.assertEqual(result, 'hello\n')
1054
1058
 
1055
1059
    def test_deletion_extended(self):
1056
1060
        """One side deletes, the other deletes more.
1057
1061
        """
1058
 
        base = b"""\
 
1062
        base = """\
1059
1063
            line 1
1060
1064
            line 2
1061
1065
            line 3
1062
1066
            """
1063
 
        a = b"""\
 
1067
        a = """\
1064
1068
            line 1
1065
1069
            line 2
1066
1070
            """
1067
 
        b = b"""\
 
1071
        b = """\
1068
1072
            line 1
1069
1073
            """
1070
 
        result = b"""\
 
1074
        result = """\
1071
1075
            line 1
1072
1076
<<<<<<<\x20
1073
1077
            line 2
1082
1086
        Arguably it'd be better to treat these as agreement, rather than
1083
1087
        conflict, but for now conflict is safer.
1084
1088
        """
1085
 
        base = b"""\
 
1089
        base = """\
1086
1090
            start context
1087
1091
            int a() {}
1088
1092
            int b() {}
1089
1093
            int c() {}
1090
1094
            end context
1091
1095
            """
1092
 
        a = b"""\
 
1096
        a = """\
1093
1097
            start context
1094
1098
            int a() {}
1095
1099
            end context
1096
1100
            """
1097
 
        b = b"""\
 
1101
        b = """\
1098
1102
            start context
1099
1103
            int c() {}
1100
1104
            end context
1101
1105
            """
1102
 
        result = b"""\
 
1106
        result = """\
1103
1107
            start context
1104
1108
<<<<<<<\x20
1105
1109
            int a() {}
1112
1116
 
1113
1117
    def test_agreement_deletion(self):
1114
1118
        """Agree to delete some lines, without conflicts."""
1115
 
        base = b"""\
 
1119
        base = """\
1116
1120
            start context
1117
1121
            base line 1
1118
1122
            base line 2
1119
1123
            end context
1120
1124
            """
1121
 
        a = b"""\
1122
 
            start context
1123
 
            base line 1
1124
 
            end context
1125
 
            """
1126
 
        b = b"""\
1127
 
            start context
1128
 
            base line 1
1129
 
            end context
1130
 
            """
1131
 
        result = b"""\
 
1125
        a = """\
 
1126
            start context
 
1127
            base line 1
 
1128
            end context
 
1129
            """
 
1130
        b = """\
 
1131
            start context
 
1132
            base line 1
 
1133
            end context
 
1134
            """
 
1135
        result = """\
1132
1136
            start context
1133
1137
            base line 1
1134
1138
            end context
1145
1149
 
1146
1150
        It's better to consider the whole thing as a disagreement region.
1147
1151
        """
1148
 
        base = b"""\
 
1152
        base = """\
1149
1153
            start context
1150
1154
            base line 1
1151
1155
            base line 2
1152
1156
            end context
1153
1157
            """
1154
 
        a = b"""\
 
1158
        a = """\
1155
1159
            start context
1156
1160
            base line 1
1157
1161
            a's replacement line 2
1158
1162
            end context
1159
1163
            """
1160
 
        b = b"""\
 
1164
        b = """\
1161
1165
            start context
1162
1166
            b replaces
1163
1167
            both lines
1164
1168
            end context
1165
1169
            """
1166
 
        result = b"""\
 
1170
        result = """\
1167
1171
            start context
1168
1172
<<<<<<<\x20
1169
1173
            base line 1
1189
1193
        write_weave(w, tmpf)
1190
1194
        self.log(tmpf.getvalue())
1191
1195
 
1192
 
    overlappedInsertExpected = [b'aaa', b'<<<<<<< ', b'xxx', b'yyy', b'=======',
1193
 
                                b'xxx', b'>>>>>>> ', b'bbb']
 
1196
    overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
 
1197
                                'xxx', '>>>>>>> ', 'bbb']
1194
1198
 
1195
1199
 
1196
1200
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1201
1205
        # Each is source_kind, requested_kind, adapter class
1202
1206
        scenarios = [
1203
1207
            ('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1204
 
            ('knit-delta-gz', 'lines', _mod_knit.DeltaPlainToFullText),
1205
 
            ('knit-delta-gz', 'chunked', _mod_knit.DeltaPlainToFullText),
1206
1208
            ('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1207
 
            ('knit-ft-gz', 'lines', _mod_knit.FTPlainToFullText),
1208
 
            ('knit-ft-gz', 'chunked', _mod_knit.FTPlainToFullText),
1209
1209
            ('knit-annotated-delta-gz', 'knit-delta-gz',
1210
1210
                _mod_knit.DeltaAnnotatedToUnannotated),
1211
1211
            ('knit-annotated-delta-gz', 'fulltext',
1214
1214
                _mod_knit.FTAnnotatedToUnannotated),
1215
1215
            ('knit-annotated-ft-gz', 'fulltext',
1216
1216
                _mod_knit.FTAnnotatedToFullText),
1217
 
            ('knit-annotated-ft-gz', 'lines',
1218
 
                _mod_knit.FTAnnotatedToFullText),
1219
 
            ('knit-annotated-ft-gz', 'chunked',
1220
 
                _mod_knit.FTAnnotatedToFullText),
1221
1217
            ]
1222
1218
        for source, requested, klass in scenarios:
1223
1219
            adapter_factory = versionedfile.adapter_registry.get(
1230
1226
        transport = self.get_transport()
1231
1227
        return make_file_factory(annotated, mapper)(transport)
1232
1228
 
1233
 
    def helpGetBytes(self, f, ft_name, ft_adapter, delta_name, delta_adapter):
 
1229
    def helpGetBytes(self, f, ft_adapter, delta_adapter):
1234
1230
        """Grab the interested adapted texts for tests."""
1235
1231
        # origin is a fulltext
1236
 
        entries = f.get_record_stream([(b'origin',)], 'unordered', False)
 
1232
        entries = f.get_record_stream([('origin',)], 'unordered', False)
1237
1233
        base = next(entries)
1238
 
        ft_data = ft_adapter.get_bytes(base, ft_name)
 
1234
        ft_data = ft_adapter.get_bytes(base)
1239
1235
        # merged is both a delta and multiple parents.
1240
 
        entries = f.get_record_stream([(b'merged',)], 'unordered', False)
 
1236
        entries = f.get_record_stream([('merged',)], 'unordered', False)
1241
1237
        merged = next(entries)
1242
 
        delta_data = delta_adapter.get_bytes(merged, delta_name)
 
1238
        delta_data = delta_adapter.get_bytes(merged)
1243
1239
        return ft_data, delta_data
1244
1240
 
1245
1241
    def test_deannotation_noeol(self):
1247
1243
        # we need a full text, and a delta
1248
1244
        f = self.get_knit()
1249
1245
        get_diamond_files(f, 1, trailing_eol=False)
1250
 
        ft_data, delta_data = self.helpGetBytes(
1251
 
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1252
 
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1246
        ft_data, delta_data = self.helpGetBytes(f,
 
1247
            _mod_knit.FTAnnotatedToUnannotated(None),
 
1248
            _mod_knit.DeltaAnnotatedToUnannotated(None))
1253
1249
        self.assertEqual(
1254
 
            b'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1255
 
            b'origin\n'
1256
 
            b'end origin\n',
 
1250
            'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
 
1251
            'origin\n'
 
1252
            'end origin\n',
1257
1253
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1258
1254
        self.assertEqual(
1259
 
            b'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1260
 
            b'1,2,3\nleft\nright\nmerged\nend merged\n',
 
1255
            'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
 
1256
            '1,2,3\nleft\nright\nmerged\nend merged\n',
1261
1257
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1262
1258
 
1263
1259
    def test_deannotation(self):
1265
1261
        # we need a full text, and a delta
1266
1262
        f = self.get_knit()
1267
1263
        get_diamond_files(f, 1)
1268
 
        ft_data, delta_data = self.helpGetBytes(
1269
 
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1270
 
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1264
        ft_data, delta_data = self.helpGetBytes(f,
 
1265
            _mod_knit.FTAnnotatedToUnannotated(None),
 
1266
            _mod_knit.DeltaAnnotatedToUnannotated(None))
1271
1267
        self.assertEqual(
1272
 
            b'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1273
 
            b'origin\n'
1274
 
            b'end origin\n',
 
1268
            'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
 
1269
            'origin\n'
 
1270
            'end origin\n',
1275
1271
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1276
1272
        self.assertEqual(
1277
 
            b'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1278
 
            b'2,2,2\nright\nmerged\nend merged\n',
 
1273
            'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
 
1274
            '2,2,2\nright\nmerged\nend merged\n',
1279
1275
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1280
1276
 
1281
1277
    def test_annotated_to_fulltext_no_eol(self):
1286
1282
        # Reconstructing a full text requires a backing versioned file, and it
1287
1283
        # must have the base lines requested from it.
1288
1284
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1289
 
        ft_data, delta_data = self.helpGetBytes(
1290
 
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
1291
 
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1292
 
        self.assertEqual(b'origin', ft_data)
1293
 
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
1294
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1295
 
                           True)], logged_vf.calls)
 
1285
        ft_data, delta_data = self.helpGetBytes(f,
 
1286
            _mod_knit.FTAnnotatedToFullText(None),
 
1287
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1288
        self.assertEqual('origin', ft_data)
 
1289
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
 
1290
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1291
            True)], logged_vf.calls)
1296
1292
 
1297
1293
    def test_annotated_to_fulltext(self):
1298
1294
        """Test adapting annotated knits to full texts (for -> weaves)."""
1302
1298
        # Reconstructing a full text requires a backing versioned file, and it
1303
1299
        # must have the base lines requested from it.
1304
1300
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1305
 
        ft_data, delta_data = self.helpGetBytes(
1306
 
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
1307
 
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1308
 
        self.assertEqual(b'origin\n', ft_data)
1309
 
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
1310
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1311
 
                           True)], logged_vf.calls)
 
1301
        ft_data, delta_data = self.helpGetBytes(f,
 
1302
            _mod_knit.FTAnnotatedToFullText(None),
 
1303
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1304
        self.assertEqual('origin\n', ft_data)
 
1305
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
 
1306
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1307
            True)], logged_vf.calls)
1312
1308
 
1313
1309
    def test_unannotated_to_fulltext(self):
1314
1310
        """Test adapting unannotated knits to full texts.
1321
1317
        # Reconstructing a full text requires a backing versioned file, and it
1322
1318
        # must have the base lines requested from it.
1323
1319
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1324
 
        ft_data, delta_data = self.helpGetBytes(
1325
 
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
1326
 
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
1327
 
        self.assertEqual(b'origin\n', ft_data)
1328
 
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
1329
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1330
 
                           True)], logged_vf.calls)
 
1320
        ft_data, delta_data = self.helpGetBytes(f,
 
1321
            _mod_knit.FTPlainToFullText(None),
 
1322
            _mod_knit.DeltaPlainToFullText(logged_vf))
 
1323
        self.assertEqual('origin\n', ft_data)
 
1324
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
 
1325
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1326
            True)], logged_vf.calls)
1331
1327
 
1332
1328
    def test_unannotated_to_fulltext_no_eol(self):
1333
1329
        """Test adapting unannotated knits to full texts.
1340
1336
        # Reconstructing a full text requires a backing versioned file, and it
1341
1337
        # must have the base lines requested from it.
1342
1338
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1343
 
        ft_data, delta_data = self.helpGetBytes(
1344
 
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
1345
 
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
1346
 
        self.assertEqual(b'origin', ft_data)
1347
 
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
1348
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1349
 
                           True)], logged_vf.calls)
 
1339
        ft_data, delta_data = self.helpGetBytes(f,
 
1340
            _mod_knit.FTPlainToFullText(None),
 
1341
            _mod_knit.DeltaPlainToFullText(logged_vf))
 
1342
        self.assertEqual('origin', ft_data)
 
1343
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
 
1344
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1345
            True)], logged_vf.calls)
1350
1346
 
1351
1347
 
1352
1348
class TestKeyMapper(TestCaseWithMemoryTransport):
1354
1350
 
1355
1351
    def test_identity_mapper(self):
1356
1352
        mapper = versionedfile.ConstantMapper("inventory")
1357
 
        self.assertEqual("inventory", mapper.map((b'foo@ar',)))
1358
 
        self.assertEqual("inventory", mapper.map((b'quux',)))
 
1353
        self.assertEqual("inventory", mapper.map(('foo@ar',)))
 
1354
        self.assertEqual("inventory", mapper.map(('quux',)))
1359
1355
 
1360
1356
    def test_prefix_mapper(self):
1361
1357
        #format5: plain
1362
1358
        mapper = versionedfile.PrefixMapper()
1363
 
        self.assertEqual("file-id", mapper.map((b"file-id", b"revision-id")))
1364
 
        self.assertEqual("new-id", mapper.map((b"new-id", b"revision-id")))
1365
 
        self.assertEqual((b'file-id',), mapper.unmap("file-id"))
1366
 
        self.assertEqual((b'new-id',), mapper.unmap("new-id"))
 
1359
        self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
 
1360
        self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
 
1361
        self.assertEqual(('file-id',), mapper.unmap("file-id"))
 
1362
        self.assertEqual(('new-id',), mapper.unmap("new-id"))
1367
1363
 
1368
1364
    def test_hash_prefix_mapper(self):
1369
1365
        #format6: hash + plain
1370
1366
        mapper = versionedfile.HashPrefixMapper()
1371
 
        self.assertEqual(
1372
 
            "9b/file-id", mapper.map((b"file-id", b"revision-id")))
1373
 
        self.assertEqual("45/new-id", mapper.map((b"new-id", b"revision-id")))
1374
 
        self.assertEqual((b'file-id',), mapper.unmap("9b/file-id"))
1375
 
        self.assertEqual((b'new-id',), mapper.unmap("45/new-id"))
 
1367
        self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
 
1368
        self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
 
1369
        self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
 
1370
        self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1376
1371
 
1377
1372
    def test_hash_escaped_mapper(self):
1378
1373
        #knit1: hash + escaped
1379
1374
        mapper = versionedfile.HashEscapedPrefixMapper()
1380
 
        self.assertEqual("88/%2520", mapper.map((b" ", b"revision-id")))
1381
 
        self.assertEqual("ed/fil%2545-%2549d", mapper.map((b"filE-Id",
1382
 
                                                           b"revision-id")))
1383
 
        self.assertEqual("88/ne%2557-%2549d", mapper.map((b"neW-Id",
1384
 
                                                          b"revision-id")))
1385
 
        self.assertEqual((b'filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1386
 
        self.assertEqual((b'neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
 
1375
        self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
 
1376
        self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
 
1377
            "revision-id")))
 
1378
        self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
 
1379
            "revision-id")))
 
1380
        self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
 
1381
        self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1387
1382
 
1388
1383
 
1389
1384
class TestVersionedFiles(TestCaseWithMemoryTransport):
1401
1396
    # plain text knits in packs (texts)
1402
1397
    len_one_scenarios = [
1403
1398
        ('weave-named', {
1404
 
            'cleanup': None,
1405
 
            'factory': make_versioned_files_factory(WeaveFile,
1406
 
                                                    ConstantMapper('inventory')),
1407
 
            'graph': True,
1408
 
            'key_length': 1,
 
1399
            'cleanup':None,
 
1400
            'factory':make_versioned_files_factory(WeaveFile,
 
1401
                ConstantMapper('inventory')),
 
1402
            'graph':True,
 
1403
            'key_length':1,
1409
1404
            'support_partial_insertion': False,
1410
1405
            }),
1411
1406
        ('named-knit', {
1412
 
            'cleanup': None,
1413
 
            'factory': make_file_factory(False, ConstantMapper('revisions')),
1414
 
            'graph': True,
1415
 
            'key_length': 1,
 
1407
            'cleanup':None,
 
1408
            'factory':make_file_factory(False, ConstantMapper('revisions')),
 
1409
            'graph':True,
 
1410
            'key_length':1,
1416
1411
            'support_partial_insertion': False,
1417
1412
            }),
1418
1413
        ('named-nograph-nodelta-knit-pack', {
1419
 
            'cleanup': cleanup_pack_knit,
1420
 
            'factory': make_pack_factory(False, False, 1),
1421
 
            'graph': False,
1422
 
            'key_length': 1,
 
1414
            'cleanup':cleanup_pack_knit,
 
1415
            'factory':make_pack_factory(False, False, 1),
 
1416
            'graph':False,
 
1417
            'key_length':1,
1423
1418
            'support_partial_insertion': False,
1424
1419
            }),
1425
1420
        ('named-graph-knit-pack', {
1426
 
            'cleanup': cleanup_pack_knit,
1427
 
            'factory': make_pack_factory(True, True, 1),
1428
 
            'graph': True,
1429
 
            'key_length': 1,
 
1421
            'cleanup':cleanup_pack_knit,
 
1422
            'factory':make_pack_factory(True, True, 1),
 
1423
            'graph':True,
 
1424
            'key_length':1,
1430
1425
            'support_partial_insertion': True,
1431
1426
            }),
1432
1427
        ('named-graph-nodelta-knit-pack', {
1433
 
            'cleanup': cleanup_pack_knit,
1434
 
            'factory': make_pack_factory(True, False, 1),
1435
 
            'graph': True,
1436
 
            'key_length': 1,
 
1428
            'cleanup':cleanup_pack_knit,
 
1429
            'factory':make_pack_factory(True, False, 1),
 
1430
            'graph':True,
 
1431
            'key_length':1,
1437
1432
            'support_partial_insertion': False,
1438
1433
            }),
1439
1434
        ('groupcompress-nograph', {
1440
 
            'cleanup': groupcompress.cleanup_pack_group,
1441
 
            'factory': groupcompress.make_pack_factory(False, False, 1),
 
1435
            'cleanup':groupcompress.cleanup_pack_group,
 
1436
            'factory':groupcompress.make_pack_factory(False, False, 1),
1442
1437
            'graph': False,
1443
 
            'key_length': 1,
1444
 
            'support_partial_insertion': False,
 
1438
            'key_length':1,
 
1439
            'support_partial_insertion':False,
1445
1440
            }),
1446
1441
        ]
1447
1442
    len_two_scenarios = [
1448
1443
        ('weave-prefix', {
1449
 
            'cleanup': None,
1450
 
            'factory': make_versioned_files_factory(WeaveFile,
1451
 
                                                    PrefixMapper()),
1452
 
            'graph': True,
1453
 
            'key_length': 2,
 
1444
            'cleanup':None,
 
1445
            'factory':make_versioned_files_factory(WeaveFile,
 
1446
                PrefixMapper()),
 
1447
            'graph':True,
 
1448
            'key_length':2,
1454
1449
            'support_partial_insertion': False,
1455
1450
            }),
1456
1451
        ('annotated-knit-escape', {
1457
 
            'cleanup': None,
1458
 
            'factory': make_file_factory(True, HashEscapedPrefixMapper()),
1459
 
            'graph': True,
1460
 
            'key_length': 2,
 
1452
            'cleanup':None,
 
1453
            'factory':make_file_factory(True, HashEscapedPrefixMapper()),
 
1454
            'graph':True,
 
1455
            'key_length':2,
1461
1456
            'support_partial_insertion': False,
1462
1457
            }),
1463
1458
        ('plain-knit-pack', {
1464
 
            'cleanup': cleanup_pack_knit,
1465
 
            'factory': make_pack_factory(True, True, 2),
1466
 
            'graph': True,
1467
 
            'key_length': 2,
 
1459
            'cleanup':cleanup_pack_knit,
 
1460
            'factory':make_pack_factory(True, True, 2),
 
1461
            'graph':True,
 
1462
            'key_length':2,
1468
1463
            'support_partial_insertion': True,
1469
1464
            }),
1470
1465
        ('groupcompress', {
1471
 
            'cleanup': groupcompress.cleanup_pack_group,
1472
 
            'factory': groupcompress.make_pack_factory(True, False, 1),
 
1466
            'cleanup':groupcompress.cleanup_pack_group,
 
1467
            'factory':groupcompress.make_pack_factory(True, False, 1),
1473
1468
            'graph': True,
1474
 
            'key_length': 1,
1475
 
            'support_partial_insertion': False,
 
1469
            'key_length':1,
 
1470
            'support_partial_insertion':False,
1476
1471
            }),
1477
1472
        ]
1478
1473
 
1492
1487
        if self.key_length == 1:
1493
1488
            return (suffix,)
1494
1489
        else:
1495
 
            return (b'FileA',) + (suffix,)
 
1490
            return ('FileA',) + (suffix,)
1496
1491
 
1497
1492
    def test_add_fallback_implies_without_fallbacks(self):
1498
1493
        f = self.get_versionedfiles('files')
1500
1495
            raise TestNotApplicable("%s doesn't support fallbacks"
1501
1496
                                    % (f.__class__.__name__,))
1502
1497
        g = self.get_versionedfiles('fallback')
1503
 
        key_a = self.get_simple_key(b'a')
1504
 
        g.add_lines(key_a, [], [b'\n'])
 
1498
        key_a = self.get_simple_key('a')
 
1499
        g.add_lines(key_a, [], ['\n'])
1505
1500
        f.add_fallback_versioned_files(g)
1506
1501
        self.assertTrue(key_a in f.get_parent_map([key_a]))
1507
 
        self.assertFalse(
1508
 
            key_a in f.without_fallbacks().get_parent_map([key_a]))
 
1502
        self.assertFalse(key_a in f.without_fallbacks().get_parent_map([key_a]))
1509
1503
 
1510
1504
    def test_add_lines(self):
1511
1505
        f = self.get_versionedfiles()
1512
 
        key0 = self.get_simple_key(b'r0')
1513
 
        key1 = self.get_simple_key(b'r1')
1514
 
        key2 = self.get_simple_key(b'r2')
1515
 
        keyf = self.get_simple_key(b'foo')
1516
 
        f.add_lines(key0, [], [b'a\n', b'b\n'])
 
1506
        key0 = self.get_simple_key('r0')
 
1507
        key1 = self.get_simple_key('r1')
 
1508
        key2 = self.get_simple_key('r2')
 
1509
        keyf = self.get_simple_key('foo')
 
1510
        f.add_lines(key0, [], ['a\n', 'b\n'])
1517
1511
        if self.graph:
1518
 
            f.add_lines(key1, [key0], [b'b\n', b'c\n'])
 
1512
            f.add_lines(key1, [key0], ['b\n', 'c\n'])
1519
1513
        else:
1520
 
            f.add_lines(key1, [], [b'b\n', b'c\n'])
 
1514
            f.add_lines(key1, [], ['b\n', 'c\n'])
1521
1515
        keys = f.keys()
1522
1516
        self.assertTrue(key0 in keys)
1523
1517
        self.assertTrue(key1 in keys)
1525
1519
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1526
1520
            records.append((record.key, record.get_bytes_as('fulltext')))
1527
1521
        records.sort()
1528
 
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
 
1522
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1529
1523
 
1530
 
    def test_add_chunks(self):
 
1524
    def test__add_text(self):
1531
1525
        f = self.get_versionedfiles()
1532
 
        key0 = self.get_simple_key(b'r0')
1533
 
        key1 = self.get_simple_key(b'r1')
1534
 
        key2 = self.get_simple_key(b'r2')
1535
 
        keyf = self.get_simple_key(b'foo')
1536
 
        def add_chunks(key, parents, chunks):
1537
 
            factory = ChunkedContentFactory(
1538
 
                key, parents, osutils.sha_strings(chunks), chunks)
1539
 
            return f.add_content(factory)
1540
 
 
1541
 
        add_chunks(key0, [], [b'a', b'\nb\n'])
 
1526
        key0 = self.get_simple_key('r0')
 
1527
        key1 = self.get_simple_key('r1')
 
1528
        key2 = self.get_simple_key('r2')
 
1529
        keyf = self.get_simple_key('foo')
 
1530
        f._add_text(key0, [], 'a\nb\n')
1542
1531
        if self.graph:
1543
 
            add_chunks(key1, [key0], [b'b', b'\n', b'c\n'])
 
1532
            f._add_text(key1, [key0], 'b\nc\n')
1544
1533
        else:
1545
 
            add_chunks(key1, [], [b'b\n', b'c\n'])
 
1534
            f._add_text(key1, [], 'b\nc\n')
1546
1535
        keys = f.keys()
1547
 
        self.assertIn(key0, keys)
1548
 
        self.assertIn(key1, keys)
 
1536
        self.assertTrue(key0 in keys)
 
1537
        self.assertTrue(key1 in keys)
1549
1538
        records = []
1550
1539
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1551
1540
            records.append((record.key, record.get_bytes_as('fulltext')))
1552
1541
        records.sort()
1553
 
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
 
1542
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1554
1543
 
1555
1544
    def test_annotate(self):
1556
1545
        files = self.get_versionedfiles()
1558
1547
        if self.key_length == 1:
1559
1548
            prefix = ()
1560
1549
        else:
1561
 
            prefix = (b'FileA',)
 
1550
            prefix = ('FileA',)
1562
1551
        # introduced full text
1563
 
        origins = files.annotate(prefix + (b'origin',))
 
1552
        origins = files.annotate(prefix + ('origin',))
1564
1553
        self.assertEqual([
1565
 
            (prefix + (b'origin',), b'origin\n')],
 
1554
            (prefix + ('origin',), 'origin\n')],
1566
1555
            origins)
1567
1556
        # a delta
1568
 
        origins = files.annotate(prefix + (b'base',))
 
1557
        origins = files.annotate(prefix + ('base',))
1569
1558
        self.assertEqual([
1570
 
            (prefix + (b'base',), b'base\n')],
 
1559
            (prefix + ('base',), 'base\n')],
1571
1560
            origins)
1572
1561
        # a merge
1573
 
        origins = files.annotate(prefix + (b'merged',))
 
1562
        origins = files.annotate(prefix + ('merged',))
1574
1563
        if self.graph:
1575
1564
            self.assertEqual([
1576
 
                (prefix + (b'base',), b'base\n'),
1577
 
                (prefix + (b'left',), b'left\n'),
1578
 
                (prefix + (b'right',), b'right\n'),
1579
 
                (prefix + (b'merged',), b'merged\n')
 
1565
                (prefix + ('base',), 'base\n'),
 
1566
                (prefix + ('left',), 'left\n'),
 
1567
                (prefix + ('right',), 'right\n'),
 
1568
                (prefix + ('merged',), 'merged\n')
1580
1569
                ],
1581
1570
                origins)
1582
1571
        else:
1583
1572
            # Without a graph everything is new.
1584
1573
            self.assertEqual([
1585
 
                (prefix + (b'merged',), b'base\n'),
1586
 
                (prefix + (b'merged',), b'left\n'),
1587
 
                (prefix + (b'merged',), b'right\n'),
1588
 
                (prefix + (b'merged',), b'merged\n')
 
1574
                (prefix + ('merged',), 'base\n'),
 
1575
                (prefix + ('merged',), 'left\n'),
 
1576
                (prefix + ('merged',), 'right\n'),
 
1577
                (prefix + ('merged',), 'merged\n')
1589
1578
                ],
1590
1579
                origins)
1591
1580
        self.assertRaises(RevisionNotPresent,
1592
 
                          files.annotate, prefix + ('missing-key',))
 
1581
            files.annotate, prefix + ('missing-key',))
1593
1582
 
1594
1583
    def test_check_no_parameters(self):
1595
1584
        files = self.get_versionedfiles()
1609
1598
        seen = set()
1610
1599
        # Texts output should be fulltexts.
1611
1600
        self.capture_stream(files, entries, seen.add,
1612
 
                            files.get_parent_map(keys), require_fulltext=True)
 
1601
            files.get_parent_map(keys), require_fulltext=True)
1613
1602
        # All texts should be output.
1614
1603
        self.assertEqual(set(keys), seen)
1615
1604
 
1622
1611
        files = self.get_versionedfiles()
1623
1612
 
1624
1613
    def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1625
 
                          nokeys=False):
 
1614
        nokeys=False):
1626
1615
        return get_diamond_files(files, self.key_length,
1627
 
                                 trailing_eol=trailing_eol, nograph=not self.graph,
1628
 
                                 left_only=left_only, nokeys=nokeys)
 
1616
            trailing_eol=trailing_eol, nograph=not self.graph,
 
1617
            left_only=left_only, nokeys=nokeys)
1629
1618
 
1630
1619
    def _add_content_nostoresha(self, add_lines):
1631
1620
        """When nostore_sha is supplied using old content raises."""
1632
1621
        vf = self.get_versionedfiles()
1633
 
        empty_text = (b'a', [])
1634
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
1635
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
1622
        empty_text = ('a', [])
 
1623
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
1624
        sample_text_no_nl = ('c', ["foo\n", "bar"])
1636
1625
        shas = []
1637
1626
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1638
1627
            if add_lines:
1639
1628
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1640
1629
                                         lines)
1641
1630
            else:
1642
 
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1643
 
                                         lines)
 
1631
                sha, _, _ = vf._add_text(self.get_simple_key(version), [],
 
1632
                                         ''.join(lines))
1644
1633
            shas.append(sha)
1645
1634
        # we now have a copy of all the lines in the vf.
1646
1635
        for sha, (version, lines) in zip(
1647
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1648
 
            new_key = self.get_simple_key(version + b"2")
1649
 
            self.assertRaises(ExistingContent,
1650
 
                              vf.add_lines, new_key, [], lines,
1651
 
                              nostore_sha=sha)
1652
 
            self.assertRaises(ExistingContent,
1653
 
                              vf.add_lines, new_key, [], lines,
1654
 
                              nostore_sha=sha)
 
1636
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
1637
            new_key = self.get_simple_key(version + "2")
 
1638
            self.assertRaises(errors.ExistingContent,
 
1639
                vf.add_lines, new_key, [], lines,
 
1640
                nostore_sha=sha)
 
1641
            self.assertRaises(errors.ExistingContent,
 
1642
                vf._add_text, new_key, [], ''.join(lines),
 
1643
                nostore_sha=sha)
1655
1644
            # and no new version should have been added.
1656
1645
            record = next(vf.get_record_stream([new_key], 'unordered', True))
1657
1646
            self.assertEqual('absent', record.storage_kind)
1659
1648
    def test_add_lines_nostoresha(self):
1660
1649
        self._add_content_nostoresha(add_lines=True)
1661
1650
 
 
1651
    def test__add_text_nostoresha(self):
 
1652
        self._add_content_nostoresha(add_lines=False)
 
1653
 
1662
1654
    def test_add_lines_return(self):
1663
1655
        files = self.get_versionedfiles()
1664
1656
        # save code by using the stock data insertion helper.
1670
1662
            results.append(add[:2])
1671
1663
        if self.key_length == 1:
1672
1664
            self.assertEqual([
1673
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1674
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1675
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1676
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1677
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1665
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1666
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1667
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1668
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1669
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1678
1670
                results)
1679
1671
        elif self.key_length == 2:
1680
1672
            self.assertEqual([
1681
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1682
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1683
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1684
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1685
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1686
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1687
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1688
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1689
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1690
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1673
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1674
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1675
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1676
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1677
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1678
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1679
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1680
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1681
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1682
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1691
1683
                results)
1692
1684
 
1693
1685
    def test_add_lines_no_key_generates_chk_key(self):
1701
1693
            results.append(add[:2])
1702
1694
        if self.key_length == 1:
1703
1695
            self.assertEqual([
1704
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1705
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1706
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1707
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1708
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1696
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1697
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1698
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1699
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1700
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1709
1701
                results)
1710
1702
            # Check the added items got CHK keys.
1711
1703
            self.assertEqual({
1712
 
                (b'sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1713
 
                (b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1714
 
                (b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1715
 
                (b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1716
 
                (b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
 
1704
                ('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
 
1705
                ('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
 
1706
                ('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
 
1707
                ('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
 
1708
                ('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1717
1709
                },
1718
1710
                files.keys())
1719
1711
        elif self.key_length == 2:
1720
1712
            self.assertEqual([
1721
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1722
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1723
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1724
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1725
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1726
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1727
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1728
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1729
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1730
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1713
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1714
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1715
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1716
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1717
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1718
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1719
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1720
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1721
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1722
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1731
1723
                results)
1732
1724
            # Check the added items got CHK keys.
1733
1725
            self.assertEqual({
1734
 
                (b'FileA', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1735
 
                (b'FileA', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1736
 
                (b'FileA', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1737
 
                (b'FileA', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1738
 
                (b'FileA', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1739
 
                (b'FileB', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1740
 
                (b'FileB', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1741
 
                (b'FileB', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1742
 
                (b'FileB', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1743
 
                (b'FileB', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1726
                ('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1727
                ('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1728
                ('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1729
                ('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1730
                ('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1731
                ('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1732
                ('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1733
                ('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1734
                ('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1735
                ('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1744
1736
                },
1745
1737
                files.keys())
1746
1738
 
1747
1739
    def test_empty_lines(self):
1748
1740
        """Empty files can be stored."""
1749
1741
        f = self.get_versionedfiles()
1750
 
        key_a = self.get_simple_key(b'a')
 
1742
        key_a = self.get_simple_key('a')
1751
1743
        f.add_lines(key_a, [], [])
1752
 
        self.assertEqual(b'',
1753
 
                         next(f.get_record_stream([key_a], 'unordered', True
1754
 
                                                  )).get_bytes_as('fulltext'))
1755
 
        key_b = self.get_simple_key(b'b')
 
1744
        self.assertEqual('',
 
1745
            f.get_record_stream([key_a], 'unordered', True
 
1746
                ).next().get_bytes_as('fulltext'))
 
1747
        key_b = self.get_simple_key('b')
1756
1748
        f.add_lines(key_b, self.get_parents([key_a]), [])
1757
 
        self.assertEqual(b'',
1758
 
                         next(f.get_record_stream([key_b], 'unordered', True
1759
 
                                                  )).get_bytes_as('fulltext'))
 
1749
        self.assertEqual('',
 
1750
            f.get_record_stream([key_b], 'unordered', True
 
1751
                ).next().get_bytes_as('fulltext'))
1760
1752
 
1761
1753
    def test_newline_only(self):
1762
1754
        f = self.get_versionedfiles()
1763
 
        key_a = self.get_simple_key(b'a')
1764
 
        f.add_lines(key_a, [], [b'\n'])
1765
 
        self.assertEqual(b'\n',
1766
 
                         next(f.get_record_stream([key_a], 'unordered', True
1767
 
                                                  )).get_bytes_as('fulltext'))
1768
 
        key_b = self.get_simple_key(b'b')
1769
 
        f.add_lines(key_b, self.get_parents([key_a]), [b'\n'])
1770
 
        self.assertEqual(b'\n',
1771
 
                         next(f.get_record_stream([key_b], 'unordered', True
1772
 
                                                  )).get_bytes_as('fulltext'))
 
1755
        key_a = self.get_simple_key('a')
 
1756
        f.add_lines(key_a, [], ['\n'])
 
1757
        self.assertEqual('\n',
 
1758
            f.get_record_stream([key_a], 'unordered', True
 
1759
                ).next().get_bytes_as('fulltext'))
 
1760
        key_b = self.get_simple_key('b')
 
1761
        f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
 
1762
        self.assertEqual('\n',
 
1763
            f.get_record_stream([key_b], 'unordered', True
 
1764
                ).next().get_bytes_as('fulltext'))
1773
1765
 
1774
1766
    def test_get_known_graph_ancestry(self):
1775
1767
        f = self.get_versionedfiles()
1776
1768
        if not self.graph:
1777
1769
            raise TestNotApplicable('ancestry info only relevant with graph.')
1778
 
        key_a = self.get_simple_key(b'a')
1779
 
        key_b = self.get_simple_key(b'b')
1780
 
        key_c = self.get_simple_key(b'c')
 
1770
        key_a = self.get_simple_key('a')
 
1771
        key_b = self.get_simple_key('b')
 
1772
        key_c = self.get_simple_key('c')
1781
1773
        # A
1782
1774
        # |\
1783
1775
        # | B
1784
1776
        # |/
1785
1777
        # C
1786
 
        f.add_lines(key_a, [], [b'\n'])
1787
 
        f.add_lines(key_b, [key_a], [b'\n'])
1788
 
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
 
1778
        f.add_lines(key_a, [], ['\n'])
 
1779
        f.add_lines(key_b, [key_a], ['\n'])
 
1780
        f.add_lines(key_c, [key_a, key_b], ['\n'])
1789
1781
        kg = f.get_known_graph_ancestry([key_c])
1790
1782
        self.assertIsInstance(kg, _mod_graph.KnownGraph)
1791
1783
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1797
1789
        if getattr(f, 'add_fallback_versioned_files', None) is None:
1798
1790
            raise TestNotApplicable("%s doesn't support fallbacks"
1799
1791
                                    % (f.__class__.__name__,))
1800
 
        key_a = self.get_simple_key(b'a')
1801
 
        key_b = self.get_simple_key(b'b')
1802
 
        key_c = self.get_simple_key(b'c')
 
1792
        key_a = self.get_simple_key('a')
 
1793
        key_b = self.get_simple_key('b')
 
1794
        key_c = self.get_simple_key('c')
1803
1795
        # A     only in fallback
1804
1796
        # |\
1805
1797
        # | B
1806
1798
        # |/
1807
1799
        # C
1808
1800
        g = self.get_versionedfiles('fallback')
1809
 
        g.add_lines(key_a, [], [b'\n'])
 
1801
        g.add_lines(key_a, [], ['\n'])
1810
1802
        f.add_fallback_versioned_files(g)
1811
 
        f.add_lines(key_b, [key_a], [b'\n'])
1812
 
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
 
1803
        f.add_lines(key_b, [key_a], ['\n'])
 
1804
        f.add_lines(key_c, [key_a, key_b], ['\n'])
1813
1805
        kg = f.get_known_graph_ancestry([key_c])
1814
1806
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1815
1807
 
1822
1814
    def assertValidStorageKind(self, storage_kind):
1823
1815
        """Assert that storage_kind is a valid storage_kind."""
1824
1816
        self.assertSubset([storage_kind],
1825
 
                          ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1826
 
                           'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1827
 
                           'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1828
 
                           'knit-delta-gz',
1829
 
                           'knit-delta-closure', 'knit-delta-closure-ref',
1830
 
                           'groupcompress-block', 'groupcompress-block-ref'])
 
1817
            ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
 
1818
             'knit-ft', 'knit-delta', 'chunked', 'fulltext',
 
1819
             'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
 
1820
             'knit-delta-gz',
 
1821
             'knit-delta-closure', 'knit-delta-closure-ref',
 
1822
             'groupcompress-block', 'groupcompress-block-ref'])
1831
1823
 
1832
1824
    def capture_stream(self, f, entries, on_seen, parents,
1833
 
                       require_fulltext=False):
 
1825
        require_fulltext=False):
1834
1826
        """Capture a stream for testing."""
1835
1827
        for factory in entries:
1836
1828
            on_seen(factory.key)
1837
1829
            self.assertValidStorageKind(factory.storage_kind)
1838
1830
            if factory.sha1 is not None:
1839
1831
                self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1840
 
                                 factory.sha1)
 
1832
                    factory.sha1)
1841
1833
            self.assertEqual(parents[factory.key], factory.parents)
1842
1834
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1843
 
                                  bytes)
 
1835
                str)
1844
1836
            if require_fulltext:
1845
1837
                factory.get_bytes_as('fulltext')
1846
1838
 
1858
1850
    def get_keys_and_sort_order(self):
1859
1851
        """Get diamond test keys list, and their sort ordering."""
1860
1852
        if self.key_length == 1:
1861
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1862
 
            sort_order = {(b'merged',): 2, (b'left',): 1,
1863
 
                          (b'right',): 1, (b'base',): 0}
 
1853
            keys = [('merged',), ('left',), ('right',), ('base',)]
 
1854
            sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1864
1855
        else:
1865
1856
            keys = [
1866
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1867
 
                (b'FileA', b'base'),
1868
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1869
 
                (b'FileB', b'base'),
 
1857
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1858
                ('FileA', 'base'),
 
1859
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1860
                ('FileB', 'base'),
1870
1861
                ]
1871
1862
            sort_order = {
1872
 
                (b'FileA', b'merged'): 2, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1873
 
                (b'FileA', b'base'): 0,
1874
 
                (b'FileB', b'merged'): 2, (b'FileB', b'left'): 1, (b'FileB', b'right'): 1,
1875
 
                (b'FileB', b'base'): 0,
 
1863
                ('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
 
1864
                ('FileA', 'base'):0,
 
1865
                ('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
 
1866
                ('FileB', 'base'):0,
1876
1867
                }
1877
1868
        return keys, sort_order
1878
1869
 
1879
1870
    def get_keys_and_groupcompress_sort_order(self):
1880
1871
        """Get diamond test keys list, and their groupcompress sort ordering."""
1881
1872
        if self.key_length == 1:
1882
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1883
 
            sort_order = {(b'merged',): 0, (b'left',): 1,
1884
 
                          (b'right',): 1, (b'base',): 2}
 
1873
            keys = [('merged',), ('left',), ('right',), ('base',)]
 
1874
            sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1885
1875
        else:
1886
1876
            keys = [
1887
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1888
 
                (b'FileA', b'base'),
1889
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1890
 
                (b'FileB', b'base'),
 
1877
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1878
                ('FileA', 'base'),
 
1879
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1880
                ('FileB', 'base'),
1891
1881
                ]
1892
1882
            sort_order = {
1893
 
                (b'FileA', b'merged'): 0, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1894
 
                (b'FileA', b'base'): 2,
1895
 
                (b'FileB', b'merged'): 3, (b'FileB', b'left'): 4, (b'FileB', b'right'): 4,
1896
 
                (b'FileB', b'base'): 5,
 
1883
                ('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
 
1884
                ('FileA', 'base'):2,
 
1885
                ('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
 
1886
                ('FileB', 'base'):5,
1897
1887
                }
1898
1888
        return keys, sort_order
1899
1889
 
1920
1910
            seen.append(factory.key)
1921
1911
            self.assertValidStorageKind(factory.storage_kind)
1922
1912
            self.assertSubset([factory.sha1],
1923
 
                              [None, files.get_sha1s([factory.key])[factory.key]])
 
1913
                [None, files.get_sha1s([factory.key])[factory.key]])
1924
1914
            self.assertEqual(parent_map[factory.key], factory.parents)
1925
1915
            # self.assertEqual(files.get_text(factory.key),
1926
1916
            ft_bytes = factory.get_bytes_as('fulltext')
1927
 
            self.assertIsInstance(ft_bytes, bytes)
 
1917
            self.assertIsInstance(ft_bytes, str)
1928
1918
            chunked_bytes = factory.get_bytes_as('chunked')
1929
 
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
1930
 
            chunked_bytes = factory.iter_bytes_as('chunked')
1931
 
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
 
1919
            self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1932
1920
 
1933
1921
        self.assertStreamOrder(sort_order, seen, keys)
1934
1922
 
1946
1934
    def assertStreamOrder(self, sort_order, seen, keys):
1947
1935
        self.assertEqual(len(set(seen)), len(keys))
1948
1936
        if self.key_length == 1:
1949
 
            lows = {(): 0}
 
1937
            lows = {():0}
1950
1938
        else:
1951
 
            lows = {(b'FileA',): 0, (b'FileB',): 0}
 
1939
            lows = {('FileA',):0, ('FileB',):0}
1952
1940
        if not self.graph:
1953
1941
            self.assertEqual(set(keys), set(seen))
1954
1942
        else:
1955
1943
            for key in seen:
1956
1944
                sort_pos = sort_order[key]
1957
1945
                self.assertTrue(sort_pos >= lows[key[:-1]],
1958
 
                                "Out of order in sorted stream: %r, %r" % (key, seen))
 
1946
                    "Out of order in sorted stream: %r, %r" % (key, seen))
1959
1947
                lows[key[:-1]] = sort_pos
1960
1948
 
1961
1949
    def test_get_record_stream_unknown_storage_kind_raises(self):
1963
1951
        files = self.get_versionedfiles()
1964
1952
        self.get_diamond_files(files)
1965
1953
        if self.key_length == 1:
1966
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
 
1954
            keys = [('merged',), ('left',), ('right',), ('base',)]
1967
1955
        else:
1968
1956
            keys = [
1969
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1970
 
                (b'FileA', b'base'),
1971
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1972
 
                (b'FileB', b'base'),
 
1957
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1958
                ('FileA', 'base'),
 
1959
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1960
                ('FileB', 'base'),
1973
1961
                ]
1974
1962
        parent_map = files.get_parent_map(keys)
1975
1963
        entries = files.get_record_stream(keys, 'unordered', False)
1984
1972
                                 factory.sha1)
1985
1973
            self.assertEqual(parent_map[factory.key], factory.parents)
1986
1974
            # currently no stream emits mpdiff
1987
 
            self.assertRaises(UnavailableRepresentation,
1988
 
                              factory.get_bytes_as, 'mpdiff')
 
1975
            self.assertRaises(errors.UnavailableRepresentation,
 
1976
                factory.get_bytes_as, 'mpdiff')
1989
1977
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1990
 
                                  bytes)
 
1978
                str)
1991
1979
        self.assertEqual(set(keys), seen)
1992
1980
 
1993
1981
    def test_get_record_stream_missing_records_are_absent(self):
1994
1982
        files = self.get_versionedfiles()
1995
1983
        self.get_diamond_files(files)
1996
1984
        if self.key_length == 1:
1997
 
            keys = [(b'merged',), (b'left',), (b'right',),
1998
 
                    (b'absent',), (b'base',)]
 
1985
            keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
1999
1986
        else:
2000
1987
            keys = [
2001
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
2002
 
                (b'FileA', b'absent'), (b'FileA', b'base'),
2003
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
2004
 
                (b'FileB', b'absent'), (b'FileB', b'base'),
2005
 
                (b'absent', b'absent'),
 
1988
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1989
                ('FileA', 'absent'), ('FileA', 'base'),
 
1990
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1991
                ('FileB', 'absent'), ('FileB', 'base'),
 
1992
                ('absent', 'absent'),
2006
1993
                ]
2007
1994
        parent_map = files.get_parent_map(keys)
2008
1995
        entries = files.get_record_stream(keys, 'unordered', False)
2013
2000
    def assertRecordHasContent(self, record, bytes):
2014
2001
        """Assert that record has the bytes bytes."""
2015
2002
        self.assertEqual(bytes, record.get_bytes_as('fulltext'))
2016
 
        self.assertEqual(bytes, b''.join(record.get_bytes_as('chunked')))
 
2003
        self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
2017
2004
 
2018
2005
    def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
2019
2006
        files = self.get_versionedfiles()
2020
 
        key = self.get_simple_key(b'foo')
2021
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2007
        key = self.get_simple_key('foo')
 
2008
        files.add_lines(key, (), ['my text\n', 'content'])
2022
2009
        stream = files.get_record_stream([key], 'unordered', False)
2023
2010
        record = next(stream)
2024
2011
        if record.storage_kind in ('chunked', 'fulltext'):
2025
2012
            # chunked and fulltext representations are for direct use not wire
2026
2013
            # serialisation: check they are able to be used directly. To send
2027
2014
            # such records over the wire translation will be needed.
2028
 
            self.assertRecordHasContent(record, b"my text\ncontent")
 
2015
            self.assertRecordHasContent(record, "my text\ncontent")
2029
2016
        else:
2030
2017
            bytes = [record.get_bytes_as(record.storage_kind)]
2031
2018
            network_stream = versionedfile.NetworkRecordStream(bytes).read()
2034
2021
            for record in network_stream:
2035
2022
                records.append(record)
2036
2023
                self.assertEqual(source_record.storage_kind,
2037
 
                                 record.storage_kind)
 
2024
                    record.storage_kind)
2038
2025
                self.assertEqual(source_record.parents, record.parents)
2039
2026
                self.assertEqual(
2040
2027
                    source_record.get_bytes_as(source_record.storage_kind),
2057
2044
            yield record
2058
2045
 
2059
2046
    def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2060
 
                                        stream):
 
2047
        stream):
2061
2048
        """Convert a stream to a bytes iterator.
2062
2049
 
2063
2050
        :param skipped_records: A list with one element to increment when a
2078
2065
    def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2079
2066
        files = self.get_versionedfiles()
2080
2067
        target_files = self.get_versionedfiles('target')
2081
 
        key = self.get_simple_key(b'ft')
2082
 
        key_delta = self.get_simple_key(b'delta')
2083
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2068
        key = self.get_simple_key('ft')
 
2069
        key_delta = self.get_simple_key('delta')
 
2070
        files.add_lines(key, (), ['my text\n', 'content'])
2084
2071
        if self.graph:
2085
2072
            delta_parents = (key,)
2086
2073
        else:
2087
2074
            delta_parents = ()
2088
 
        files.add_lines(key_delta, delta_parents, [
2089
 
                        b'different\n', b'content\n'])
 
2075
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2090
2076
        local = files.get_record_stream([key, key_delta], 'unordered', False)
2091
2077
        ref = files.get_record_stream([key, key_delta], 'unordered', False)
2092
2078
        skipped_records = [0]
2093
2079
        full_texts = {
2094
 
            key: b"my text\ncontent",
2095
 
            key_delta: b"different\ncontent\n",
 
2080
            key: "my text\ncontent",
 
2081
            key_delta: "different\ncontent\n",
2096
2082
            }
2097
2083
        byte_stream = self.stream_to_bytes_or_skip_counter(
2098
2084
            skipped_records, full_texts, local)
2113
2099
        # copy a delta over the wire
2114
2100
        files = self.get_versionedfiles()
2115
2101
        target_files = self.get_versionedfiles('target')
2116
 
        key = self.get_simple_key(b'ft')
2117
 
        key_delta = self.get_simple_key(b'delta')
2118
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2102
        key = self.get_simple_key('ft')
 
2103
        key_delta = self.get_simple_key('delta')
 
2104
        files.add_lines(key, (), ['my text\n', 'content'])
2119
2105
        if self.graph:
2120
2106
            delta_parents = (key,)
2121
2107
        else:
2122
2108
            delta_parents = ()
2123
 
        files.add_lines(key_delta, delta_parents, [
2124
 
                        b'different\n', b'content\n'])
 
2109
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2125
2110
        # Copy the basis text across so we can reconstruct the delta during
2126
2111
        # insertion into target.
2127
2112
        target_files.insert_record_stream(files.get_record_stream([key],
2128
 
                                                                  'unordered', False))
 
2113
            'unordered', False))
2129
2114
        local = files.get_record_stream([key_delta], 'unordered', False)
2130
2115
        ref = files.get_record_stream([key_delta], 'unordered', False)
2131
2116
        skipped_records = [0]
2132
2117
        full_texts = {
2133
 
            key_delta: b"different\ncontent\n",
 
2118
            key_delta: "different\ncontent\n",
2134
2119
            }
2135
2120
        byte_stream = self.stream_to_bytes_or_skip_counter(
2136
2121
            skipped_records, full_texts, local)
2150
2135
    def test_get_record_stream_wire_ready_delta_closure_included(self):
2151
2136
        # copy a delta over the wire with the ability to get its full text.
2152
2137
        files = self.get_versionedfiles()
2153
 
        key = self.get_simple_key(b'ft')
2154
 
        key_delta = self.get_simple_key(b'delta')
2155
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2138
        key = self.get_simple_key('ft')
 
2139
        key_delta = self.get_simple_key('delta')
 
2140
        files.add_lines(key, (), ['my text\n', 'content'])
2156
2141
        if self.graph:
2157
2142
            delta_parents = (key,)
2158
2143
        else:
2159
2144
            delta_parents = ()
2160
 
        files.add_lines(key_delta, delta_parents, [
2161
 
                        b'different\n', b'content\n'])
 
2145
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2162
2146
        local = files.get_record_stream([key_delta], 'unordered', True)
2163
2147
        ref = files.get_record_stream([key_delta], 'unordered', True)
2164
2148
        skipped_records = [0]
2165
2149
        full_texts = {
2166
 
            key_delta: b"different\ncontent\n",
 
2150
            key_delta: "different\ncontent\n",
2167
2151
            }
2168
2152
        byte_stream = self.stream_to_bytes_or_skip_counter(
2169
2153
            skipped_records, full_texts, local)
2183
2167
        seen = set()
2184
2168
        for factory in entries:
2185
2169
            seen.add(factory.key)
2186
 
            if factory.key[-1] == b'absent':
 
2170
            if factory.key[-1] == 'absent':
2187
2171
                self.assertEqual('absent', factory.storage_kind)
2188
2172
                self.assertEqual(None, factory.sha1)
2189
2173
                self.assertEqual(None, factory.parents)
2194
2178
                    self.assertEqual(sha1, factory.sha1)
2195
2179
                self.assertEqual(parents[factory.key], factory.parents)
2196
2180
                self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2197
 
                                      bytes)
 
2181
                    str)
2198
2182
        self.assertEqual(set(keys), seen)
2199
2183
 
2200
2184
    def test_filter_absent_records(self):
2208
2192
        # absent keys is still delivered).
2209
2193
        present_keys = list(keys)
2210
2194
        if self.key_length == 1:
2211
 
            keys.insert(2, (b'extra',))
 
2195
            keys.insert(2, ('extra',))
2212
2196
        else:
2213
 
            keys.insert(2, (b'extra', b'extra'))
 
2197
            keys.insert(2, ('extra', 'extra'))
2214
2198
        entries = files.get_record_stream(keys, 'unordered', False)
2215
2199
        seen = set()
2216
2200
        self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2217
 
                            parent_map)
 
2201
            parent_map)
2218
2202
        self.assertEqual(set(present_keys), seen)
2219
2203
 
2220
2204
    def get_mapper(self):
2234
2218
    def test_get_annotator(self):
2235
2219
        files = self.get_versionedfiles()
2236
2220
        self.get_diamond_files(files)
2237
 
        origin_key = self.get_simple_key(b'origin')
2238
 
        base_key = self.get_simple_key(b'base')
2239
 
        left_key = self.get_simple_key(b'left')
2240
 
        right_key = self.get_simple_key(b'right')
2241
 
        merged_key = self.get_simple_key(b'merged')
 
2221
        origin_key = self.get_simple_key('origin')
 
2222
        base_key = self.get_simple_key('base')
 
2223
        left_key = self.get_simple_key('left')
 
2224
        right_key = self.get_simple_key('right')
 
2225
        merged_key = self.get_simple_key('merged')
2242
2226
        # annotator = files.get_annotator()
2243
2227
        # introduced full text
2244
2228
        origins, lines = files.get_annotator().annotate(origin_key)
2245
2229
        self.assertEqual([(origin_key,)], origins)
2246
 
        self.assertEqual([b'origin\n'], lines)
 
2230
        self.assertEqual(['origin\n'], lines)
2247
2231
        # a delta
2248
2232
        origins, lines = files.get_annotator().annotate(base_key)
2249
2233
        self.assertEqual([(base_key,)], origins)
2265
2249
                (merged_key,),
2266
2250
                ], origins)
2267
2251
        self.assertRaises(RevisionNotPresent,
2268
 
                          files.get_annotator().annotate, self.get_simple_key(b'missing-key'))
 
2252
            files.get_annotator().annotate, self.get_simple_key('missing-key'))
2269
2253
 
2270
2254
    def test_get_parent_map(self):
2271
2255
        files = self.get_versionedfiles()
2272
2256
        if self.key_length == 1:
2273
2257
            parent_details = [
2274
 
                ((b'r0',), self.get_parents(())),
2275
 
                ((b'r1',), self.get_parents(((b'r0',),))),
2276
 
                ((b'r2',), self.get_parents(())),
2277
 
                ((b'r3',), self.get_parents(())),
2278
 
                ((b'm',), self.get_parents(((b'r0',), (b'r1',), (b'r2',), (b'r3',)))),
 
2258
                (('r0',), self.get_parents(())),
 
2259
                (('r1',), self.get_parents((('r0',),))),
 
2260
                (('r2',), self.get_parents(())),
 
2261
                (('r3',), self.get_parents(())),
 
2262
                (('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2279
2263
                ]
2280
2264
        else:
2281
2265
            parent_details = [
2282
 
                ((b'FileA', b'r0'), self.get_parents(())),
2283
 
                ((b'FileA', b'r1'), self.get_parents(((b'FileA', b'r0'),))),
2284
 
                ((b'FileA', b'r2'), self.get_parents(())),
2285
 
                ((b'FileA', b'r3'), self.get_parents(())),
2286
 
                ((b'FileA', b'm'), self.get_parents(((b'FileA', b'r0'),
2287
 
                                                     (b'FileA', b'r1'), (b'FileA', b'r2'), (b'FileA', b'r3')))),
 
2266
                (('FileA', 'r0'), self.get_parents(())),
 
2267
                (('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
 
2268
                (('FileA', 'r2'), self.get_parents(())),
 
2269
                (('FileA', 'r3'), self.get_parents(())),
 
2270
                (('FileA', 'm'), self.get_parents((('FileA', 'r0'),
 
2271
                    ('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2288
2272
                ]
2289
2273
        for key, parents in parent_details:
2290
2274
            files.add_lines(key, parents, [])
2291
2275
            # immediately after adding it should be queryable.
2292
 
            self.assertEqual({key: parents}, files.get_parent_map([key]))
 
2276
            self.assertEqual({key:parents}, files.get_parent_map([key]))
2293
2277
        # We can ask for an empty set
2294
2278
        self.assertEqual({}, files.get_parent_map([]))
2295
2279
        # We can ask for many keys
2296
2280
        all_parents = dict(parent_details)
2297
2281
        self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2298
2282
        # Absent keys are just not included in the result.
2299
 
        keys = list(all_parents.keys())
 
2283
        keys = all_parents.keys()
2300
2284
        if self.key_length == 1:
2301
 
            keys.insert(1, (b'missing',))
 
2285
            keys.insert(1, ('missing',))
2302
2286
        else:
2303
 
            keys.insert(1, (b'missing', b'missing'))
 
2287
            keys.insert(1, ('missing', 'missing'))
2304
2288
        # Absent keys are just ignored
2305
2289
        self.assertEqual(all_parents, files.get_parent_map(keys))
2306
2290
 
2308
2292
        files = self.get_versionedfiles()
2309
2293
        self.get_diamond_files(files)
2310
2294
        if self.key_length == 1:
2311
 
            keys = [(b'base',), (b'origin',), (b'left',),
2312
 
                    (b'merged',), (b'right',)]
 
2295
            keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2313
2296
        else:
2314
2297
            # ask for shas from different prefixes.
2315
2298
            keys = [
2316
 
                (b'FileA', b'base'), (b'FileB', b'origin'), (b'FileA', b'left'),
2317
 
                (b'FileA', b'merged'), (b'FileB', b'right'),
 
2299
                ('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
 
2300
                ('FileA', 'merged'), ('FileB', 'right'),
2318
2301
                ]
2319
2302
        self.assertEqual({
2320
 
            keys[0]: b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2321
 
            keys[1]: b'00e364d235126be43292ab09cb4686cf703ddc17',
2322
 
            keys[2]: b'a8478686da38e370e32e42e8a0c220e33ee9132f',
2323
 
            keys[3]: b'ed8bce375198ea62444dc71952b22cfc2b09226d',
2324
 
            keys[4]: b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
 
2303
            keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
 
2304
            keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
 
2305
            keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
 
2306
            keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
 
2307
            keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2325
2308
            },
2326
2309
            files.get_sha1s(keys))
2327
2310
 
2335
2318
        self.assertEqual(set(actual.keys()), set(expected.keys()))
2336
2319
        actual_parents = actual.get_parent_map(actual.keys())
2337
2320
        if self.graph:
2338
 
            self.assertEqual(
2339
 
                actual_parents, expected.get_parent_map(expected.keys()))
 
2321
            self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2340
2322
        else:
2341
2323
            for key, parents in actual_parents.items():
2342
2324
                self.assertEqual(None, parents)
2343
2325
        for key in actual.keys():
2344
 
            actual_text = next(actual.get_record_stream(
2345
 
                [key], 'unordered', True)).get_bytes_as('fulltext')
2346
 
            expected_text = next(expected.get_record_stream(
2347
 
                [key], 'unordered', True)).get_bytes_as('fulltext')
 
2326
            actual_text = actual.get_record_stream(
 
2327
                [key], 'unordered', True).next().get_bytes_as('fulltext')
 
2328
            expected_text = expected.get_record_stream(
 
2329
                [key], 'unordered', True).next().get_bytes_as('fulltext')
2348
2330
            self.assertEqual(actual_text, expected_text)
2349
2331
 
2350
2332
    def test_insert_record_stream_fulltexts(self):
2358
2340
            source_transport)
2359
2341
        self.get_diamond_files(source, trailing_eol=False)
2360
2342
        stream = source.get_record_stream(source.keys(), 'topological',
2361
 
                                          False)
 
2343
            False)
2362
2344
        files.insert_record_stream(stream)
2363
2345
        self.assertIdenticalVersionedFile(source, files)
2364
2346
 
2373
2355
            source_transport)
2374
2356
        self.get_diamond_files(source, trailing_eol=False)
2375
2357
        stream = source.get_record_stream(source.keys(), 'topological',
2376
 
                                          False)
 
2358
            False)
2377
2359
        files.insert_record_stream(stream)
2378
2360
        self.assertIdenticalVersionedFile(source, files)
2379
2361
 
2386
2368
        source = make_file_factory(True, mapper)(source_transport)
2387
2369
        self.get_diamond_files(source)
2388
2370
        stream = source.get_record_stream(source.keys(), 'topological',
2389
 
                                          False)
 
2371
            False)
2390
2372
        files.insert_record_stream(stream)
2391
2373
        self.assertIdenticalVersionedFile(source, files)
2392
2374
 
2399
2381
        source = make_file_factory(True, mapper)(source_transport)
2400
2382
        self.get_diamond_files(source, trailing_eol=False)
2401
2383
        stream = source.get_record_stream(source.keys(), 'topological',
2402
 
                                          False)
 
2384
            False)
2403
2385
        files.insert_record_stream(stream)
2404
2386
        self.assertIdenticalVersionedFile(source, files)
2405
2387
 
2412
2394
        source = make_file_factory(False, mapper)(source_transport)
2413
2395
        self.get_diamond_files(source)
2414
2396
        stream = source.get_record_stream(source.keys(), 'topological',
2415
 
                                          False)
 
2397
            False)
2416
2398
        files.insert_record_stream(stream)
2417
2399
        self.assertIdenticalVersionedFile(source, files)
2418
2400
 
2425
2407
        source = make_file_factory(False, mapper)(source_transport)
2426
2408
        self.get_diamond_files(source, trailing_eol=False)
2427
2409
        stream = source.get_record_stream(source.keys(), 'topological',
2428
 
                                          False)
 
2410
            False)
2429
2411
        files.insert_record_stream(stream)
2430
2412
        self.assertIdenticalVersionedFile(source, files)
2431
2413
 
2437
2419
        # insert some keys into f.
2438
2420
        self.get_diamond_files(files, left_only=True)
2439
2421
        stream = source.get_record_stream(source.keys(), 'topological',
2440
 
                                          False)
 
2422
            False)
2441
2423
        files.insert_record_stream(stream)
2442
2424
        self.assertIdenticalVersionedFile(source, files)
2443
2425
 
2445
2427
        """Inserting a stream with absent keys should raise an error."""
2446
2428
        files = self.get_versionedfiles()
2447
2429
        source = self.get_versionedfiles('source')
2448
 
        stream = source.get_record_stream([(b'missing',) * self.key_length],
2449
 
                                          'topological', False)
 
2430
        stream = source.get_record_stream([('missing',) * self.key_length],
 
2431
            'topological', False)
2450
2432
        self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2451
 
                          stream)
 
2433
            stream)
2452
2434
 
2453
2435
    def test_insert_record_stream_out_of_order(self):
2454
2436
        """An out of order stream can either error or work."""
2456
2438
        source = self.get_versionedfiles('source')
2457
2439
        self.get_diamond_files(source)
2458
2440
        if self.key_length == 1:
2459
 
            origin_keys = [(b'origin',)]
2460
 
            end_keys = [(b'merged',), (b'left',)]
2461
 
            start_keys = [(b'right',), (b'base',)]
 
2441
            origin_keys = [('origin',)]
 
2442
            end_keys = [('merged',), ('left',)]
 
2443
            start_keys = [('right',), ('base',)]
2462
2444
        else:
2463
 
            origin_keys = [(b'FileA', b'origin'), (b'FileB', b'origin')]
2464
 
            end_keys = [(b'FileA', b'merged',), (b'FileA', b'left',),
2465
 
                        (b'FileB', b'merged',), (b'FileB', b'left',)]
2466
 
            start_keys = [(b'FileA', b'right',), (b'FileA', b'base',),
2467
 
                          (b'FileB', b'right',), (b'FileB', b'base',)]
2468
 
        origin_entries = source.get_record_stream(
2469
 
            origin_keys, 'unordered', False)
 
2445
            origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
 
2446
            end_keys = [('FileA', 'merged',), ('FileA', 'left',),
 
2447
                ('FileB', 'merged',), ('FileB', 'left',)]
 
2448
            start_keys = [('FileA', 'right',), ('FileA', 'base',),
 
2449
                ('FileB', 'right',), ('FileB', 'base',)]
 
2450
        origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2470
2451
        end_entries = source.get_record_stream(end_keys, 'topological', False)
2471
 
        start_entries = source.get_record_stream(
2472
 
            start_keys, 'topological', False)
 
2452
        start_entries = source.get_record_stream(start_keys, 'topological', False)
2473
2453
        entries = itertools.chain(origin_entries, end_entries, start_entries)
2474
2454
        try:
2475
2455
            files.insert_record_stream(entries)
2488
2468
        source = self.get_versionedfiles('source')
2489
2469
        parents = ()
2490
2470
        keys = []
2491
 
        content = [(b'same same %d\n' % n) for n in range(500)]
2492
 
        letters = b'abcdefghijklmnopqrstuvwxyz'
2493
 
        for i in range(len(letters)):
2494
 
            letter = letters[i:i + 1]
2495
 
            key = (b'key-' + letter,)
 
2471
        content = [('same same %d\n' % n) for n in range(500)]
 
2472
        for letter in 'abcdefghijklmnopqrstuvwxyz':
 
2473
            key = ('key-' + letter,)
2496
2474
            if self.key_length == 2:
2497
 
                key = (b'prefix',) + key
2498
 
            content.append(b'content for ' + letter + b'\n')
 
2475
                key = ('prefix',) + key
 
2476
            content.append('content for ' + letter + '\n')
2499
2477
            source.add_lines(key, parents, content)
2500
2478
            keys.append(key)
2501
2479
            parents = (key,)
2527
2505
        source_transport.mkdir('.')
2528
2506
        source = make_file_factory(False, mapper)(source_transport)
2529
2507
        get_diamond_files(source, self.key_length, trailing_eol=True,
2530
 
                          nograph=False, left_only=False)
 
2508
            nograph=False, left_only=False)
2531
2509
        return source
2532
2510
 
2533
2511
    def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2536
2514
        not added.
2537
2515
        """
2538
2516
        source = self.get_knit_delta_source()
2539
 
        keys = [self.get_simple_key(b'origin'), self.get_simple_key(b'merged')]
 
2517
        keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2540
2518
        entries = source.get_record_stream(keys, 'unordered', False)
2541
2519
        files = self.get_versionedfiles()
2542
2520
        if self.support_partial_insertion:
2543
2521
            self.assertEqual([],
2544
 
                             list(files.get_missing_compression_parent_keys()))
 
2522
                list(files.get_missing_compression_parent_keys()))
2545
2523
            files.insert_record_stream(entries)
2546
2524
            missing_bases = files.get_missing_compression_parent_keys()
2547
 
            self.assertEqual({self.get_simple_key(b'left')},
2548
 
                             set(missing_bases))
 
2525
            self.assertEqual({self.get_simple_key('left')},
 
2526
                set(missing_bases))
2549
2527
            self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2550
2528
        else:
2551
2529
            self.assertRaises(
2563
2541
            raise TestNotApplicable(
2564
2542
                'versioned file scenario does not support partial insertion')
2565
2543
        source = self.get_knit_delta_source()
2566
 
        entries = source.get_record_stream([self.get_simple_key(b'origin'),
2567
 
                                            self.get_simple_key(b'merged')], 'unordered', False)
 
2544
        entries = source.get_record_stream([self.get_simple_key('origin'),
 
2545
            self.get_simple_key('merged')], 'unordered', False)
2568
2546
        files = self.get_versionedfiles()
2569
2547
        files.insert_record_stream(entries)
2570
2548
        missing_bases = files.get_missing_compression_parent_keys()
2571
 
        self.assertEqual({self.get_simple_key(b'left')},
2572
 
                         set(missing_bases))
 
2549
        self.assertEqual({self.get_simple_key('left')},
 
2550
            set(missing_bases))
2573
2551
        # 'merged' is inserted (although a commit of a write group involving
2574
2552
        # this versionedfiles would fail).
2575
 
        merged_key = self.get_simple_key(b'merged')
 
2553
        merged_key = self.get_simple_key('merged')
2576
2554
        self.assertEqual(
2577
 
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
 
2555
            [merged_key], files.get_parent_map([merged_key]).keys())
2578
2556
        # Add the full delta closure of the missing records
2579
2557
        missing_entries = source.get_record_stream(
2580
2558
            missing_bases, 'unordered', True)
2582
2560
        # Now 'merged' is fully inserted (and a commit would succeed).
2583
2561
        self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2584
2562
        self.assertEqual(
2585
 
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
 
2563
            [merged_key], files.get_parent_map([merged_key]).keys())
2586
2564
        files.check()
2587
2565
 
2588
2566
    def test_iter_lines_added_or_present_in_keys(self):
2602
2580
 
2603
2581
        files = self.get_versionedfiles()
2604
2582
        # add a base to get included
2605
 
        files.add_lines(self.get_simple_key(b'base'), (), [b'base\n'])
 
2583
        files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2606
2584
        # add a ancestor to be included on one side
2607
 
        files.add_lines(self.get_simple_key(
2608
 
            b'lancestor'), (), [b'lancestor\n'])
 
2585
        files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2609
2586
        # add a ancestor to be included on the other side
2610
 
        files.add_lines(self.get_simple_key(b'rancestor'),
2611
 
                        self.get_parents([self.get_simple_key(b'base')]), [b'rancestor\n'])
 
2587
        files.add_lines(self.get_simple_key('rancestor'),
 
2588
            self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2612
2589
        # add a child of rancestor with no eofile-nl
2613
 
        files.add_lines(self.get_simple_key(b'child'),
2614
 
                        self.get_parents([self.get_simple_key(b'rancestor')]),
2615
 
                        [b'base\n', b'child\n'])
 
2590
        files.add_lines(self.get_simple_key('child'),
 
2591
            self.get_parents([self.get_simple_key('rancestor')]),
 
2592
            ['base\n', 'child\n'])
2616
2593
        # add a child of lancestor and base to join the two roots
2617
 
        files.add_lines(self.get_simple_key(b'otherchild'),
2618
 
                        self.get_parents([self.get_simple_key(b'lancestor'),
2619
 
                                          self.get_simple_key(b'base')]),
2620
 
                        [b'base\n', b'lancestor\n', b'otherchild\n'])
2621
 
 
 
2594
        files.add_lines(self.get_simple_key('otherchild'),
 
2595
            self.get_parents([self.get_simple_key('lancestor'),
 
2596
                self.get_simple_key('base')]),
 
2597
            ['base\n', 'lancestor\n', 'otherchild\n'])
2622
2598
        def iter_with_keys(keys, expected):
2623
2599
            # now we need to see what lines are returned, and how often.
2624
2600
            lines = {}
2625
2601
            progress = InstrumentedProgress()
2626
2602
            # iterate over the lines
2627
2603
            for line in files.iter_lines_added_or_present_in_keys(keys,
2628
 
                                                                  pb=progress):
 
2604
                pb=progress):
2629
2605
                lines.setdefault(line, 0)
2630
2606
                lines[line] += 1
2631
 
            if [] != progress.updates:
 
2607
            if []!= progress.updates:
2632
2608
                self.assertEqual(expected, progress.updates)
2633
2609
            return lines
2634
2610
        lines = iter_with_keys(
2635
 
            [self.get_simple_key(b'child'),
2636
 
             self.get_simple_key(b'otherchild')],
 
2611
            [self.get_simple_key('child'), self.get_simple_key('otherchild')],
2637
2612
            [('Walking content', 0, 2),
2638
2613
             ('Walking content', 1, 2),
2639
2614
             ('Walking content', 2, 2)])
2640
2615
        # we must see child and otherchild
2641
 
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
 
2616
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2642
2617
        self.assertTrue(
2643
 
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
 
2618
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2644
2619
        # we dont care if we got more than that.
2645
2620
 
2646
2621
        # test all lines
2647
2622
        lines = iter_with_keys(files.keys(),
2648
 
                               [('Walking content', 0, 5),
2649
 
                                ('Walking content', 1, 5),
2650
 
                                ('Walking content', 2, 5),
2651
 
                                ('Walking content', 3, 5),
2652
 
                                ('Walking content', 4, 5),
2653
 
                                ('Walking content', 5, 5)])
 
2623
            [('Walking content', 0, 5),
 
2624
             ('Walking content', 1, 5),
 
2625
             ('Walking content', 2, 5),
 
2626
             ('Walking content', 3, 5),
 
2627
             ('Walking content', 4, 5),
 
2628
             ('Walking content', 5, 5)])
2654
2629
        # all lines must be seen at least once
2655
 
        self.assertTrue(lines[(b'base\n', self.get_simple_key(b'base'))] > 0)
2656
 
        self.assertTrue(
2657
 
            lines[(b'lancestor\n', self.get_simple_key(b'lancestor'))] > 0)
2658
 
        self.assertTrue(
2659
 
            lines[(b'rancestor\n', self.get_simple_key(b'rancestor'))] > 0)
2660
 
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
2661
 
        self.assertTrue(
2662
 
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
 
2630
        self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
 
2631
        self.assertTrue(
 
2632
            lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
 
2633
        self.assertTrue(
 
2634
            lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
 
2635
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
 
2636
        self.assertTrue(
 
2637
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2663
2638
 
2664
2639
    def test_make_mpdiffs(self):
2665
2640
        from breezy import multiparent
2667
2642
        # add texts that should trip the knit maximum delta chain threshold
2668
2643
        # as well as doing parallel chains of data in knits.
2669
2644
        # this is done by two chains of 25 insertions
2670
 
        files.add_lines(self.get_simple_key(b'base'), [], [b'line\n'])
2671
 
        files.add_lines(self.get_simple_key(b'noeol'),
2672
 
                        self.get_parents([self.get_simple_key(b'base')]), [b'line'])
 
2645
        files.add_lines(self.get_simple_key('base'), [], ['line\n'])
 
2646
        files.add_lines(self.get_simple_key('noeol'),
 
2647
            self.get_parents([self.get_simple_key('base')]), ['line'])
2673
2648
        # detailed eol tests:
2674
2649
        # shared last line with parent no-eol
2675
 
        files.add_lines(self.get_simple_key(b'noeolsecond'),
2676
 
                        self.get_parents([self.get_simple_key(b'noeol')]),
2677
 
                        [b'line\n', b'line'])
 
2650
        files.add_lines(self.get_simple_key('noeolsecond'),
 
2651
            self.get_parents([self.get_simple_key('noeol')]),
 
2652
                ['line\n', 'line'])
2678
2653
        # differing last line with parent, both no-eol
2679
 
        files.add_lines(self.get_simple_key(b'noeolnotshared'),
2680
 
                        self.get_parents(
2681
 
                            [self.get_simple_key(b'noeolsecond')]),
2682
 
                        [b'line\n', b'phone'])
 
2654
        files.add_lines(self.get_simple_key('noeolnotshared'),
 
2655
            self.get_parents([self.get_simple_key('noeolsecond')]),
 
2656
                ['line\n', 'phone'])
2683
2657
        # add eol following a noneol parent, change content
2684
 
        files.add_lines(self.get_simple_key(b'eol'),
2685
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'phone\n'])
 
2658
        files.add_lines(self.get_simple_key('eol'),
 
2659
            self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2686
2660
        # add eol following a noneol parent, no change content
2687
 
        files.add_lines(self.get_simple_key(b'eolline'),
2688
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line\n'])
 
2661
        files.add_lines(self.get_simple_key('eolline'),
 
2662
            self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2689
2663
        # noeol with no parents:
2690
 
        files.add_lines(self.get_simple_key(b'noeolbase'), [], [b'line'])
 
2664
        files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2691
2665
        # noeol preceeding its leftmost parent in the output:
2692
2666
        # this is done by making it a merge of two parents with no common
2693
2667
        # anestry: noeolbase and noeol with the
2694
2668
        # later-inserted parent the leftmost.
2695
 
        files.add_lines(self.get_simple_key(b'eolbeforefirstparent'),
2696
 
                        self.get_parents([self.get_simple_key(b'noeolbase'),
2697
 
                                          self.get_simple_key(b'noeol')]),
2698
 
                        [b'line'])
 
2669
        files.add_lines(self.get_simple_key('eolbeforefirstparent'),
 
2670
            self.get_parents([self.get_simple_key('noeolbase'),
 
2671
                self.get_simple_key('noeol')]),
 
2672
            ['line'])
2699
2673
        # two identical eol texts
2700
 
        files.add_lines(self.get_simple_key(b'noeoldup'),
2701
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line'])
2702
 
        next_parent = self.get_simple_key(b'base')
2703
 
        text_name = b'chain1-'
2704
 
        text = [b'line\n']
2705
 
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2706
 
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2707
 
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2708
 
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2709
 
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
2710
 
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
2711
 
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2712
 
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2713
 
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
2714
 
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2715
 
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2716
 
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2717
 
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
2718
 
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2719
 
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
2720
 
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2721
 
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
2722
 
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2723
 
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
2724
 
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
2725
 
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2726
 
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2727
 
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2728
 
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2729
 
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2730
 
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
2674
        files.add_lines(self.get_simple_key('noeoldup'),
 
2675
            self.get_parents([self.get_simple_key('noeol')]), ['line'])
 
2676
        next_parent = self.get_simple_key('base')
 
2677
        text_name = 'chain1-'
 
2678
        text = ['line\n']
 
2679
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
2680
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
2681
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
2682
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
2683
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
2684
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
2685
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
2686
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
2687
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
 
2688
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
2689
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
2690
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
2691
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
2692
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
2693
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
 
2694
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
2695
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
2696
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
2697
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
2698
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
 
2699
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
2700
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
2701
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
2702
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
2703
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
2704
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2731
2705
                 }
2732
2706
        for depth in range(26):
2733
 
            new_version = self.get_simple_key(text_name + b'%d' % depth)
2734
 
            text = text + [b'line\n']
 
2707
            new_version = self.get_simple_key(text_name + '%s' % depth)
 
2708
            text = text + ['line\n']
2735
2709
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2736
2710
            next_parent = new_version
2737
 
        next_parent = self.get_simple_key(b'base')
2738
 
        text_name = b'chain2-'
2739
 
        text = [b'line\n']
 
2711
        next_parent = self.get_simple_key('base')
 
2712
        text_name = 'chain2-'
 
2713
        text = ['line\n']
2740
2714
        for depth in range(26):
2741
 
            new_version = self.get_simple_key(text_name + b'%d' % depth)
2742
 
            text = text + [b'line\n']
 
2715
            new_version = self.get_simple_key(text_name + '%s' % depth)
 
2716
            text = text + ['line\n']
2743
2717
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2744
2718
            next_parent = new_version
2745
2719
        target = self.get_versionedfiles('target')
2749
2723
            target.add_mpdiffs(
2750
2724
                [(key, parents, files.get_sha1s([key])[key], mpdiff)])
2751
2725
            self.assertEqualDiff(
2752
 
                next(files.get_record_stream([key], 'unordered',
2753
 
                                             True)).get_bytes_as('fulltext'),
2754
 
                next(target.get_record_stream([key], 'unordered',
2755
 
                                              True)).get_bytes_as('fulltext')
 
2726
                files.get_record_stream([key], 'unordered',
 
2727
                    True).next().get_bytes_as('fulltext'),
 
2728
                target.get_record_stream([key], 'unordered',
 
2729
                    True).next().get_bytes_as('fulltext')
2756
2730
                )
2757
2731
 
2758
2732
    def test_keys(self):
2761
2735
        files = self.get_versionedfiles()
2762
2736
        self.assertEqual(set(), set(files.keys()))
2763
2737
        if self.key_length == 1:
2764
 
            key = (b'foo',)
 
2738
            key = ('foo',)
2765
2739
        else:
2766
 
            key = (b'foo', b'bar',)
 
2740
            key = ('foo', 'bar',)
2767
2741
        files.add_lines(key, (), [])
2768
2742
        self.assertEqual({key}, set(files.keys()))
2769
2743
 
2787
2761
 
2788
2762
    def test_add_lines(self):
2789
2763
        self.assertRaises(NotImplementedError,
2790
 
                          self.texts.add_lines, b"foo", [], [])
 
2764
                self.texts.add_lines, "foo", [], [])
2791
2765
 
2792
2766
    def test_add_mpdiffs(self):
2793
2767
        self.assertRaises(NotImplementedError,
2794
 
                          self.texts.add_mpdiffs, [])
 
2768
                self.texts.add_mpdiffs, [])
2795
2769
 
2796
2770
    def test_check_noerrors(self):
2797
2771
        self.texts.check()
2801
2775
                          [])
2802
2776
 
2803
2777
    def test_get_sha1s_nonexistent(self):
2804
 
        self.assertEqual({}, self.texts.get_sha1s([(b"NONEXISTENT",)]))
 
2778
        self.assertEqual({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2805
2779
 
2806
2780
    def test_get_sha1s(self):
2807
 
        self._lines[b"key"] = [b"dataline1", b"dataline2"]
2808
 
        self.assertEqual({(b"key",): osutils.sha_strings(self._lines[b"key"])},
2809
 
                         self.texts.get_sha1s([(b"key",)]))
 
2781
        self._lines["key"] = ["dataline1", "dataline2"]
 
2782
        self.assertEqual({("key",): osutils.sha_strings(self._lines["key"])},
 
2783
                           self.texts.get_sha1s([("key",)]))
2810
2784
 
2811
2785
    def test_get_parent_map(self):
2812
 
        self._parent_map = {b"G": (b"A", b"B")}
2813
 
        self.assertEqual({(b"G",): ((b"A",), (b"B",))},
2814
 
                         self.texts.get_parent_map([(b"G",), (b"L",)]))
 
2786
        self._parent_map = {"G": ("A", "B")}
 
2787
        self.assertEqual({("G",): (("A",),("B",))},
 
2788
                          self.texts.get_parent_map([("G",), ("L",)]))
2815
2789
 
2816
2790
    def test_get_record_stream(self):
2817
 
        self._lines[b"A"] = [b"FOO", b"BAR"]
2818
 
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
 
2791
        self._lines["A"] = ["FOO", "BAR"]
 
2792
        it = self.texts.get_record_stream([("A",)], "unordered", True)
2819
2793
        record = next(it)
2820
2794
        self.assertEqual("chunked", record.storage_kind)
2821
 
        self.assertEqual(b"FOOBAR", record.get_bytes_as("fulltext"))
2822
 
        self.assertEqual([b"FOO", b"BAR"], record.get_bytes_as("chunked"))
 
2795
        self.assertEqual("FOOBAR", record.get_bytes_as("fulltext"))
 
2796
        self.assertEqual(["FOO", "BAR"], record.get_bytes_as("chunked"))
2823
2797
 
2824
2798
    def test_get_record_stream_absent(self):
2825
 
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
 
2799
        it = self.texts.get_record_stream([("A",)], "unordered", True)
2826
2800
        record = next(it)
2827
2801
        self.assertEqual("absent", record.storage_kind)
2828
2802
 
2829
2803
    def test_iter_lines_added_or_present_in_keys(self):
2830
 
        self._lines[b"A"] = [b"FOO", b"BAR"]
2831
 
        self._lines[b"B"] = [b"HEY"]
2832
 
        self._lines[b"C"] = [b"Alberta"]
2833
 
        it = self.texts.iter_lines_added_or_present_in_keys([(b"A",), (b"B",)])
2834
 
        self.assertEqual(sorted([(b"FOO", b"A"), (b"BAR", b"A"), (b"HEY", b"B")]),
2835
 
                         sorted(list(it)))
 
2804
        self._lines["A"] = ["FOO", "BAR"]
 
2805
        self._lines["B"] = ["HEY"]
 
2806
        self._lines["C"] = ["Alberta"]
 
2807
        it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
 
2808
        self.assertEqual(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
 
2809
            sorted(list(it)))
2836
2810
 
2837
2811
 
2838
2812
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2840
2814
    def get_ordering_vf(self, key_priority):
2841
2815
        builder = self.make_branch_builder('test')
2842
2816
        builder.start_series()
2843
 
        builder.build_snapshot(None, [
2844
 
            ('add', ('', b'TREE_ROOT', 'directory', None))],
2845
 
            revision_id=b'A')
2846
 
        builder.build_snapshot([b'A'], [], revision_id=b'B')
2847
 
        builder.build_snapshot([b'B'], [], revision_id=b'C')
2848
 
        builder.build_snapshot([b'C'], [], revision_id=b'D')
 
2817
        builder.build_snapshot('A', None, [
 
2818
            ('add', ('', 'TREE_ROOT', 'directory', None))])
 
2819
        builder.build_snapshot('B', ['A'], [])
 
2820
        builder.build_snapshot('C', ['B'], [])
 
2821
        builder.build_snapshot('D', ['C'], [])
2849
2822
        builder.finish_series()
2850
2823
        b = builder.get_branch()
2851
2824
        b.lock_read()
2858
2831
        self.assertEqual([], vf.calls)
2859
2832
 
2860
2833
    def test_get_record_stream_topological(self):
2861
 
        vf = self.get_ordering_vf(
2862
 
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2863
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2834
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
 
2835
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2864
2836
        keys = [r.key for r in vf.get_record_stream(request_keys,
2865
 
                                                    'topological', False)]
 
2837
                                    'topological', False)]
2866
2838
        # We should have gotten the keys in topological order
2867
 
        self.assertEqual([(b'A',), (b'B',), (b'C',), (b'D',)], keys)
 
2839
        self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2868
2840
        # And recorded that the request was made
2869
2841
        self.assertEqual([('get_record_stream', request_keys, 'topological',
2870
2842
                           False)], vf.calls)
2871
2843
 
2872
2844
    def test_get_record_stream_ordered(self):
2873
 
        vf = self.get_ordering_vf(
2874
 
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2875
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2845
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
 
2846
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2876
2847
        keys = [r.key for r in vf.get_record_stream(request_keys,
2877
 
                                                    'unordered', False)]
 
2848
                                   'unordered', False)]
2878
2849
        # They should be returned based on their priority
2879
 
        self.assertEqual([(b'D',), (b'B',), (b'A',), (b'C',)], keys)
 
2850
        self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2880
2851
        # And the request recorded
2881
2852
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2882
2853
                           False)], vf.calls)
2883
2854
 
2884
2855
    def test_get_record_stream_implicit_order(self):
2885
 
        vf = self.get_ordering_vf({(b'B',): 2, (b'D',): 1})
2886
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2856
        vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
 
2857
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2887
2858
        keys = [r.key for r in vf.get_record_stream(request_keys,
2888
 
                                                    'unordered', False)]
 
2859
                                   'unordered', False)]
2889
2860
        # A and C are not in the map, so they get sorted to the front. A comes
2890
2861
        # before C alphabetically, so it comes back first
2891
 
        self.assertEqual([(b'A',), (b'C',), (b'D',), (b'B',)], keys)
 
2862
        self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2892
2863
        # And the request recorded
2893
2864
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2894
2865
                           False)], vf.calls)