/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to breezy/tests/per_versionedfile.py

  • Committer: Jelmer Vernooij
  • Date: 2020-03-22 19:12:43 UTC
  • mfrom: (7490.7.6 work)
  • mto: (7490.7.7 work)
  • mto: This revision was merged to the branch mainline in revision 7501.
  • Revision ID: jelmer@jelmer.uk-20200322191243-yx8ils8lvfmfh7rq
Merge lp:brz/3.1.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2006-2010 Canonical Ltd
 
1
# Copyright (C) 2006-2012, 2016 Canonical Ltd
2
2
#
3
3
# Authors:
4
4
#   Johan Rydberg <jrydberg@gnu.org>
21
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
22
# considered typical and check that it can be detected/corrected.
23
23
 
24
 
from itertools import chain, izip
25
 
from StringIO import StringIO
 
24
from gzip import GzipFile
 
25
import itertools
26
26
 
27
 
from bzrlib import (
 
27
from .. import (
28
28
    errors,
29
29
    graph as _mod_graph,
30
 
    groupcompress,
31
 
    knit as _mod_knit,
32
30
    osutils,
33
31
    progress,
 
32
    transport,
34
33
    ui,
35
34
    )
36
 
from bzrlib.errors import (
37
 
                           RevisionNotPresent,
38
 
                           RevisionAlreadyPresent,
39
 
                           WeaveParentMismatch
40
 
                           )
41
 
from bzrlib.knit import (
 
35
from ..bzr import (
 
36
    groupcompress,
 
37
    knit as _mod_knit,
 
38
    )
 
39
from ..errors import (
 
40
    RevisionNotPresent,
 
41
    RevisionAlreadyPresent,
 
42
    )
 
43
from ..bzr.knit import (
42
44
    cleanup_pack_knit,
43
45
    make_file_factory,
44
46
    make_pack_factory,
45
 
    KnitAnnotateFactory,
46
 
    KnitPlainFactory,
47
 
    )
48
 
from bzrlib.tests import (
 
47
    )
 
48
from ..sixish import (
 
49
    BytesIO,
 
50
    zip,
 
51
    )
 
52
from . import (
49
53
    TestCase,
50
54
    TestCaseWithMemoryTransport,
51
55
    TestNotApplicable,
52
56
    TestSkipped,
53
 
    condition_isinstance,
54
 
    split_suite_by_condition,
55
 
    multiply_tests,
56
57
    )
57
 
from bzrlib.tests.http_utils import TestCaseWithWebserver
58
 
from bzrlib.trace import mutter
59
 
from bzrlib.transport import get_transport
60
 
from bzrlib.transport.memory import MemoryTransport
61
 
from bzrlib.tsort import topo_sort
62
 
from bzrlib.tuned_gzip import GzipFile
63
 
import bzrlib.versionedfile as versionedfile
64
 
from bzrlib.versionedfile import (
 
58
from .http_utils import TestCaseWithWebserver
 
59
from ..transport.memory import MemoryTransport
 
60
from ..bzr import versionedfile as versionedfile
 
61
from ..bzr.versionedfile import (
 
62
    ChunkedContentFactory,
65
63
    ConstantMapper,
66
64
    HashEscapedPrefixMapper,
67
65
    PrefixMapper,
68
66
    VirtualVersionedFiles,
69
67
    make_versioned_files_factory,
70
68
    )
71
 
from bzrlib.weave import WeaveFile
72
 
from bzrlib.weavefile import read_weave, write_weave
73
 
 
74
 
 
75
 
def load_tests(standard_tests, module, loader):
76
 
    """Parameterize VersionedFiles tests for different implementations."""
77
 
    to_adapt, result = split_suite_by_condition(
78
 
        standard_tests, condition_isinstance(TestVersionedFiles))
79
 
    # We want to be sure of behaviour for:
80
 
    # weaves prefix layout (weave texts)
81
 
    # individually named weaves (weave inventories)
82
 
    # annotated knits - prefix|hash|hash-escape layout, we test the third only
83
 
    #                   as it is the most complex mapper.
84
 
    # individually named knits
85
 
    # individual no-graph knits in packs (signatures)
86
 
    # individual graph knits in packs (inventories)
87
 
    # individual graph nocompression knits in packs (revisions)
88
 
    # plain text knits in packs (texts)
89
 
    len_one_scenarios = [
90
 
        ('weave-named', {
91
 
            'cleanup':None,
92
 
            'factory':make_versioned_files_factory(WeaveFile,
93
 
                ConstantMapper('inventory')),
94
 
            'graph':True,
95
 
            'key_length':1,
96
 
            'support_partial_insertion': False,
97
 
            }),
98
 
        ('named-knit', {
99
 
            'cleanup':None,
100
 
            'factory':make_file_factory(False, ConstantMapper('revisions')),
101
 
            'graph':True,
102
 
            'key_length':1,
103
 
            'support_partial_insertion': False,
104
 
            }),
105
 
        ('named-nograph-nodelta-knit-pack', {
106
 
            'cleanup':cleanup_pack_knit,
107
 
            'factory':make_pack_factory(False, False, 1),
108
 
            'graph':False,
109
 
            'key_length':1,
110
 
            'support_partial_insertion': False,
111
 
            }),
112
 
        ('named-graph-knit-pack', {
113
 
            'cleanup':cleanup_pack_knit,
114
 
            'factory':make_pack_factory(True, True, 1),
115
 
            'graph':True,
116
 
            'key_length':1,
117
 
            'support_partial_insertion': True,
118
 
            }),
119
 
        ('named-graph-nodelta-knit-pack', {
120
 
            'cleanup':cleanup_pack_knit,
121
 
            'factory':make_pack_factory(True, False, 1),
122
 
            'graph':True,
123
 
            'key_length':1,
124
 
            'support_partial_insertion': False,
125
 
            }),
126
 
        ('groupcompress-nograph', {
127
 
            'cleanup':groupcompress.cleanup_pack_group,
128
 
            'factory':groupcompress.make_pack_factory(False, False, 1),
129
 
            'graph': False,
130
 
            'key_length':1,
131
 
            'support_partial_insertion':False,
132
 
            }),
133
 
        ]
134
 
    len_two_scenarios = [
135
 
        ('weave-prefix', {
136
 
            'cleanup':None,
137
 
            'factory':make_versioned_files_factory(WeaveFile,
138
 
                PrefixMapper()),
139
 
            'graph':True,
140
 
            'key_length':2,
141
 
            'support_partial_insertion': False,
142
 
            }),
143
 
        ('annotated-knit-escape', {
144
 
            'cleanup':None,
145
 
            'factory':make_file_factory(True, HashEscapedPrefixMapper()),
146
 
            'graph':True,
147
 
            'key_length':2,
148
 
            'support_partial_insertion': False,
149
 
            }),
150
 
        ('plain-knit-pack', {
151
 
            'cleanup':cleanup_pack_knit,
152
 
            'factory':make_pack_factory(True, True, 2),
153
 
            'graph':True,
154
 
            'key_length':2,
155
 
            'support_partial_insertion': True,
156
 
            }),
157
 
        ('groupcompress', {
158
 
            'cleanup':groupcompress.cleanup_pack_group,
159
 
            'factory':groupcompress.make_pack_factory(True, False, 1),
160
 
            'graph': True,
161
 
            'key_length':1,
162
 
            'support_partial_insertion':False,
163
 
            }),
164
 
        ]
165
 
    scenarios = len_one_scenarios + len_two_scenarios
166
 
    return multiply_tests(to_adapt, scenarios, result)
 
69
from ..bzr.weave import (
 
70
    WeaveFile,
 
71
    WeaveInvalidChecksum,
 
72
    )
 
73
from ..bzr.weavefile import write_weave
 
74
from .scenarios import load_tests_apply_scenarios
 
75
 
 
76
 
 
77
load_tests = load_tests_apply_scenarios
167
78
 
168
79
 
169
80
def get_diamond_vf(f, trailing_eol=True, left_only=False):
172
83
    :param trailing_eol: If True end the last line with \n.
173
84
    """
174
85
    parents = {
175
 
        'origin': (),
176
 
        'base': (('origin',),),
177
 
        'left': (('base',),),
178
 
        'right': (('base',),),
179
 
        'merged': (('left',), ('right',)),
 
86
        b'origin': (),
 
87
        b'base': ((b'origin',),),
 
88
        b'left': ((b'base',),),
 
89
        b'right': ((b'base',),),
 
90
        b'merged': ((b'left',), (b'right',)),
180
91
        }
181
92
    # insert a diamond graph to exercise deltas and merges.
182
93
    if trailing_eol:
183
 
        last_char = '\n'
 
94
        last_char = b'\n'
184
95
    else:
185
 
        last_char = ''
186
 
    f.add_lines('origin', [], ['origin' + last_char])
187
 
    f.add_lines('base', ['origin'], ['base' + last_char])
188
 
    f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
 
96
        last_char = b''
 
97
    f.add_lines(b'origin', [], [b'origin' + last_char])
 
98
    f.add_lines(b'base', [b'origin'], [b'base' + last_char])
 
99
    f.add_lines(b'left', [b'base'], [b'base\n', b'left' + last_char])
189
100
    if not left_only:
190
 
        f.add_lines('right', ['base'],
191
 
            ['base\n', 'right' + last_char])
192
 
        f.add_lines('merged', ['left', 'right'],
193
 
            ['base\n', 'left\n', 'right\n', 'merged' + last_char])
 
101
        f.add_lines(b'right', [b'base'],
 
102
                    [b'base\n', b'right' + last_char])
 
103
        f.add_lines(b'merged', [b'left', b'right'],
 
104
                    [b'base\n', b'left\n', b'right\n', b'merged' + last_char])
194
105
    return f, parents
195
106
 
196
107
 
197
108
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
198
 
    nograph=False, nokeys=False):
 
109
                      nograph=False, nokeys=False):
199
110
    """Get a diamond graph to exercise deltas and merges.
200
111
 
201
112
    This creates a 5-node graph in files. If files supports 2-length keys two
217
128
    if key_length == 1:
218
129
        prefixes = [()]
219
130
    else:
220
 
        prefixes = [('FileA',), ('FileB',)]
 
131
        prefixes = [(b'FileA',), (b'FileB',)]
221
132
    # insert a diamond graph to exercise deltas and merges.
222
133
    if trailing_eol:
223
 
        last_char = '\n'
 
134
        last_char = b'\n'
224
135
    else:
225
 
        last_char = ''
 
136
        last_char = b''
226
137
    result = []
 
138
 
227
139
    def get_parents(suffix_list):
228
140
        if nograph:
229
141
            return ()
230
142
        else:
231
143
            result = [prefix + suffix for suffix in suffix_list]
232
144
            return result
 
145
 
233
146
    def get_key(suffix):
234
147
        if nokeys:
235
148
            return (None, )
238
151
    # we loop over each key because that spreads the inserts across prefixes,
239
152
    # which is how commit operates.
240
153
    for prefix in prefixes:
241
 
        result.append(files.add_lines(prefix + get_key('origin'), (),
242
 
            ['origin' + last_char]))
243
 
    for prefix in prefixes:
244
 
        result.append(files.add_lines(prefix + get_key('base'),
245
 
            get_parents([('origin',)]), ['base' + last_char]))
246
 
    for prefix in prefixes:
247
 
        result.append(files.add_lines(prefix + get_key('left'),
248
 
            get_parents([('base',)]),
249
 
            ['base\n', 'left' + last_char]))
 
154
        result.append(files.add_lines(prefix + get_key(b'origin'), (),
 
155
                                      [b'origin' + last_char]))
 
156
    for prefix in prefixes:
 
157
        result.append(files.add_lines(prefix + get_key(b'base'),
 
158
                                      get_parents([(b'origin',)]), [b'base' + last_char]))
 
159
    for prefix in prefixes:
 
160
        result.append(files.add_lines(prefix + get_key(b'left'),
 
161
                                      get_parents([(b'base',)]),
 
162
                                      [b'base\n', b'left' + last_char]))
250
163
    if not left_only:
251
164
        for prefix in prefixes:
252
 
            result.append(files.add_lines(prefix + get_key('right'),
253
 
                get_parents([('base',)]),
254
 
                ['base\n', 'right' + last_char]))
 
165
            result.append(files.add_lines(prefix + get_key(b'right'),
 
166
                                          get_parents([(b'base',)]),
 
167
                                          [b'base\n', b'right' + last_char]))
255
168
        for prefix in prefixes:
256
 
            result.append(files.add_lines(prefix + get_key('merged'),
257
 
                get_parents([('left',), ('right',)]),
258
 
                ['base\n', 'left\n', 'right\n', 'merged' + last_char]))
 
169
            result.append(files.add_lines(prefix + get_key(b'merged'),
 
170
                                          get_parents(
 
171
                                              [(b'left',), (b'right',)]),
 
172
                                          [b'base\n', b'left\n', b'right\n', b'merged' + last_char]))
259
173
    return result
260
174
 
261
175
 
274
188
 
275
189
    def test_add(self):
276
190
        f = self.get_file()
277
 
        f.add_lines('r0', [], ['a\n', 'b\n'])
278
 
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
 
191
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
192
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
 
193
 
279
194
        def verify_file(f):
280
195
            versions = f.versions()
281
 
            self.assertTrue('r0' in versions)
282
 
            self.assertTrue('r1' in versions)
283
 
            self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
284
 
            self.assertEquals(f.get_text('r0'), 'a\nb\n')
285
 
            self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
 
196
            self.assertTrue(b'r0' in versions)
 
197
            self.assertTrue(b'r1' in versions)
 
198
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
 
199
            self.assertEqual(f.get_text(b'r0'), b'a\nb\n')
 
200
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
286
201
            self.assertEqual(2, len(f))
287
202
            self.assertEqual(2, f.num_versions())
288
203
 
289
204
            self.assertRaises(RevisionNotPresent,
290
 
                f.add_lines, 'r2', ['foo'], [])
 
205
                              f.add_lines, b'r2', [b'foo'], [])
291
206
            self.assertRaises(RevisionAlreadyPresent,
292
 
                f.add_lines, 'r1', [], [])
 
207
                              f.add_lines, b'r1', [], [])
293
208
        verify_file(f)
294
209
        # this checks that reopen with create=True does not break anything.
295
210
        f = self.reopen_file(create=True)
298
213
    def test_adds_with_parent_texts(self):
299
214
        f = self.get_file()
300
215
        parent_texts = {}
301
 
        _, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
 
216
        _, _, parent_texts[b'r0'] = f.add_lines(b'r0', [], [b'a\n', b'b\n'])
302
217
        try:
303
 
            _, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
304
 
                ['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
 
218
            _, _, parent_texts[b'r1'] = f.add_lines_with_ghosts(b'r1',
 
219
                                                                [b'r0', b'ghost'], [b'b\n', b'c\n'], parent_texts=parent_texts)
305
220
        except NotImplementedError:
306
221
            # if the format doesn't support ghosts, just add normally.
307
 
            _, _, parent_texts['r1'] = f.add_lines('r1',
308
 
                ['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
309
 
        f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
310
 
        self.assertNotEqual(None, parent_texts['r0'])
311
 
        self.assertNotEqual(None, parent_texts['r1'])
 
222
            _, _, parent_texts[b'r1'] = f.add_lines(b'r1',
 
223
                                                    [b'r0'], [b'b\n', b'c\n'], parent_texts=parent_texts)
 
224
        f.add_lines(b'r2', [b'r1'], [b'c\n', b'd\n'],
 
225
                    parent_texts=parent_texts)
 
226
        self.assertNotEqual(None, parent_texts[b'r0'])
 
227
        self.assertNotEqual(None, parent_texts[b'r1'])
 
228
 
312
229
        def verify_file(f):
313
230
            versions = f.versions()
314
 
            self.assertTrue('r0' in versions)
315
 
            self.assertTrue('r1' in versions)
316
 
            self.assertTrue('r2' in versions)
317
 
            self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
318
 
            self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
319
 
            self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
 
231
            self.assertTrue(b'r0' in versions)
 
232
            self.assertTrue(b'r1' in versions)
 
233
            self.assertTrue(b'r2' in versions)
 
234
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
 
235
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
 
236
            self.assertEqual(f.get_lines(b'r2'), [b'c\n', b'd\n'])
320
237
            self.assertEqual(3, f.num_versions())
321
 
            origins = f.annotate('r1')
322
 
            self.assertEquals(origins[0][0], 'r0')
323
 
            self.assertEquals(origins[1][0], 'r1')
324
 
            origins = f.annotate('r2')
325
 
            self.assertEquals(origins[0][0], 'r1')
326
 
            self.assertEquals(origins[1][0], 'r2')
 
238
            origins = f.annotate(b'r1')
 
239
            self.assertEqual(origins[0][0], b'r0')
 
240
            self.assertEqual(origins[1][0], b'r1')
 
241
            origins = f.annotate(b'r2')
 
242
            self.assertEqual(origins[0][0], b'r1')
 
243
            self.assertEqual(origins[1][0], b'r2')
327
244
 
328
245
        verify_file(f)
329
246
        f = self.reopen_file()
334
251
        # versioned files version sequences of bytes only.
335
252
        vf = self.get_file()
336
253
        self.assertRaises(errors.BzrBadParameterUnicode,
337
 
            vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
 
254
                          vf.add_lines, b'a', [], [b'a\n', u'b\n', b'c\n'])
338
255
        self.assertRaises(
339
256
            (errors.BzrBadParameterUnicode, NotImplementedError),
340
 
            vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
 
257
            vf.add_lines_with_ghosts, b'a', [], [b'a\n', u'b\n', b'c\n'])
341
258
 
342
259
    def test_add_follows_left_matching_blocks(self):
343
260
        """If we change left_matching_blocks, delta changes
348
265
        vf = self.get_file()
349
266
        if isinstance(vf, WeaveFile):
350
267
            raise TestSkipped("WeaveFile ignores left_matching_blocks")
351
 
        vf.add_lines('1', [], ['a\n'])
352
 
        vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
 
268
        vf.add_lines(b'1', [], [b'a\n'])
 
269
        vf.add_lines(b'2', [b'1'], [b'a\n', b'a\n', b'a\n'],
353
270
                     left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
354
 
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
355
 
        vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
 
271
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'2'))
 
272
        vf.add_lines(b'3', [b'1'], [b'a\n', b'a\n', b'a\n'],
356
273
                     left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
357
 
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
 
274
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'3'))
358
275
 
359
276
    def test_inline_newline_throws(self):
360
277
        # \r characters are not permitted in lines being added
361
278
        vf = self.get_file()
362
279
        self.assertRaises(errors.BzrBadParameterContainsNewline,
363
 
            vf.add_lines, 'a', [], ['a\n\n'])
 
280
                          vf.add_lines, b'a', [], [b'a\n\n'])
364
281
        self.assertRaises(
365
282
            (errors.BzrBadParameterContainsNewline, NotImplementedError),
366
 
            vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
 
283
            vf.add_lines_with_ghosts, b'a', [], [b'a\n\n'])
367
284
        # but inline CR's are allowed
368
 
        vf.add_lines('a', [], ['a\r\n'])
 
285
        vf.add_lines(b'a', [], [b'a\r\n'])
369
286
        try:
370
 
            vf.add_lines_with_ghosts('b', [], ['a\r\n'])
 
287
            vf.add_lines_with_ghosts(b'b', [], [b'a\r\n'])
371
288
        except NotImplementedError:
372
289
            pass
373
290
 
374
291
    def test_add_reserved(self):
375
292
        vf = self.get_file()
376
293
        self.assertRaises(errors.ReservedId,
377
 
            vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
 
294
                          vf.add_lines, b'a:', [], [b'a\n', b'b\n', b'c\n'])
378
295
 
379
296
    def test_add_lines_nostoresha(self):
380
297
        """When nostore_sha is supplied using old content raises."""
381
298
        vf = self.get_file()
382
 
        empty_text = ('a', [])
383
 
        sample_text_nl = ('b', ["foo\n", "bar\n"])
384
 
        sample_text_no_nl = ('c', ["foo\n", "bar"])
 
299
        empty_text = (b'a', [])
 
300
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
 
301
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
385
302
        shas = []
386
303
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
387
304
            sha, _, _ = vf.add_lines(version, [], lines)
388
305
            shas.append(sha)
389
306
        # we now have a copy of all the lines in the vf.
390
307
        for sha, (version, lines) in zip(
391
 
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
308
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
392
309
            self.assertRaises(errors.ExistingContent,
393
 
                vf.add_lines, version + "2", [], lines,
394
 
                nostore_sha=sha)
 
310
                              vf.add_lines, version + b"2", [], lines,
 
311
                              nostore_sha=sha)
395
312
            # and no new version should have been added.
396
313
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
397
 
                version + "2")
 
314
                              version + b"2")
398
315
 
399
316
    def test_add_lines_with_ghosts_nostoresha(self):
400
317
        """When nostore_sha is supplied using old content raises."""
401
318
        vf = self.get_file()
402
 
        empty_text = ('a', [])
403
 
        sample_text_nl = ('b', ["foo\n", "bar\n"])
404
 
        sample_text_no_nl = ('c', ["foo\n", "bar"])
 
319
        empty_text = (b'a', [])
 
320
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
 
321
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
405
322
        shas = []
406
323
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
407
324
            sha, _, _ = vf.add_lines(version, [], lines)
409
326
        # we now have a copy of all the lines in the vf.
410
327
        # is the test applicable to this vf implementation?
411
328
        try:
412
 
            vf.add_lines_with_ghosts('d', [], [])
 
329
            vf.add_lines_with_ghosts(b'd', [], [])
413
330
        except NotImplementedError:
414
331
            raise TestSkipped("add_lines_with_ghosts is optional")
415
332
        for sha, (version, lines) in zip(
416
 
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
333
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
417
334
            self.assertRaises(errors.ExistingContent,
418
 
                vf.add_lines_with_ghosts, version + "2", [], lines,
419
 
                nostore_sha=sha)
 
335
                              vf.add_lines_with_ghosts, version + b"2", [], lines,
 
336
                              nostore_sha=sha)
420
337
            # and no new version should have been added.
421
338
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
422
 
                version + "2")
 
339
                              version + b"2")
423
340
 
424
341
    def test_add_lines_return_value(self):
425
342
        # add_lines should return the sha1 and the text size.
426
343
        vf = self.get_file()
427
 
        empty_text = ('a', [])
428
 
        sample_text_nl = ('b', ["foo\n", "bar\n"])
429
 
        sample_text_no_nl = ('c', ["foo\n", "bar"])
 
344
        empty_text = (b'a', [])
 
345
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
 
346
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
430
347
        # check results for the three cases:
431
348
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
432
349
            # the first two elements are the same for all versioned files:
435
352
            result = vf.add_lines(version, [], lines)
436
353
            self.assertEqual(3, len(result))
437
354
            self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
438
 
                result[0:2])
 
355
                             result[0:2])
439
356
        # parents should not affect the result:
440
357
        lines = sample_text_nl[1]
441
358
        self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
442
 
            vf.add_lines('d', ['b', 'c'], lines)[0:2])
 
359
                         vf.add_lines(b'd', [b'b', b'c'], lines)[0:2])
443
360
 
444
361
    def test_get_reserved(self):
445
362
        vf = self.get_file()
446
 
        self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
447
 
        self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
448
 
        self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
 
363
        self.assertRaises(errors.ReservedId, vf.get_texts, [b'b:'])
 
364
        self.assertRaises(errors.ReservedId, vf.get_lines, b'b:')
 
365
        self.assertRaises(errors.ReservedId, vf.get_text, b'b:')
449
366
 
450
367
    def test_add_unchanged_last_line_noeol_snapshot(self):
451
368
        """Add a text with an unchanged last line with no eol should work."""
460
377
        for length in range(20):
461
378
            version_lines = {}
462
379
            vf = self.get_file('case-%d' % length)
463
 
            prefix = 'step-%d'
 
380
            prefix = b'step-%d'
464
381
            parents = []
465
382
            for step in range(length):
466
383
                version = prefix % step
467
 
                lines = (['prelude \n'] * step) + ['line']
 
384
                lines = ([b'prelude \n'] * step) + [b'line']
468
385
                vf.add_lines(version, parents, lines)
469
386
                version_lines[version] = lines
470
387
                parents = [version]
471
 
            vf.add_lines('no-eol', parents, ['line'])
 
388
            vf.add_lines(b'no-eol', parents, [b'line'])
472
389
            vf.get_texts(version_lines.keys())
473
 
            self.assertEqualDiff('line', vf.get_text('no-eol'))
 
390
            self.assertEqualDiff(b'line', vf.get_text(b'no-eol'))
474
391
 
475
392
    def test_get_texts_eol_variation(self):
476
393
        # similar to the failure in <http://bugs.launchpad.net/234748>
477
394
        vf = self.get_file()
478
 
        sample_text_nl = ["line\n"]
479
 
        sample_text_no_nl = ["line"]
 
395
        sample_text_nl = [b"line\n"]
 
396
        sample_text_no_nl = [b"line"]
480
397
        versions = []
481
398
        version_lines = {}
482
399
        parents = []
483
400
        for i in range(4):
484
 
            version = 'v%d' % i
 
401
            version = b'v%d' % i
485
402
            if i % 2:
486
403
                lines = sample_text_nl
487
404
            else:
493
410
            # (which is what this test tests) will generate a correct line
494
411
            # delta (which is to say, an empty delta).
495
412
            vf.add_lines(version, parents, lines,
496
 
                left_matching_blocks=[(0, 0, 1)])
 
413
                         left_matching_blocks=[(0, 0, 1)])
497
414
            parents = [version]
498
415
            versions.append(version)
499
416
            version_lines[version] = lines
503
420
 
504
421
    def test_add_lines_with_matching_blocks_noeol_last_line(self):
505
422
        """Add a text with an unchanged last line with no eol should work."""
506
 
        from bzrlib import multiparent
 
423
        from breezy import multiparent
507
424
        # Hand verified sha1 of the text we're adding.
508
425
        sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
509
426
        # Create a mpdiff which adds a new line before the trailing line, and
511
428
        # Test adding this in two situations:
512
429
        # On top of a new insertion
513
430
        vf = self.get_file('fulltext')
514
 
        vf.add_lines('noeol', [], ['line'])
515
 
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
516
 
            left_matching_blocks=[(0, 1, 1)])
517
 
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
 
431
        vf.add_lines(b'noeol', [], [b'line'])
 
432
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
 
433
                     left_matching_blocks=[(0, 1, 1)])
 
434
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
518
435
        # On top of a delta
519
436
        vf = self.get_file('delta')
520
 
        vf.add_lines('base', [], ['line'])
521
 
        vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
522
 
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
523
 
            left_matching_blocks=[(1, 1, 1)])
524
 
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
 
437
        vf.add_lines(b'base', [], [b'line'])
 
438
        vf.add_lines(b'noeol', [b'base'], [b'prelude\n', b'line'])
 
439
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
 
440
                     left_matching_blocks=[(1, 1, 1)])
 
441
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
525
442
 
526
443
    def test_make_mpdiffs(self):
527
 
        from bzrlib import multiparent
 
444
        from breezy import multiparent
528
445
        vf = self.get_file('foo')
529
446
        sha1s = self._setup_for_deltas(vf)
530
447
        new_vf = self.get_file('bar')
538
455
    def test_make_mpdiffs_with_ghosts(self):
539
456
        vf = self.get_file('foo')
540
457
        try:
541
 
            vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
 
458
            vf.add_lines_with_ghosts(b'text', [b'ghost'], [b'line\n'])
542
459
        except NotImplementedError:
543
460
            # old Weave formats do not allow ghosts
544
461
            return
545
 
        self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
 
462
        self.assertRaises(errors.RevisionNotPresent,
 
463
                          vf.make_mpdiffs, [b'ghost'])
546
464
 
547
465
    def _setup_for_deltas(self, f):
548
466
        self.assertFalse(f.has_version('base'))
549
467
        # add texts that should trip the knit maximum delta chain threshold
550
468
        # as well as doing parallel chains of data in knits.
551
469
        # this is done by two chains of 25 insertions
552
 
        f.add_lines('base', [], ['line\n'])
553
 
        f.add_lines('noeol', ['base'], ['line'])
 
470
        f.add_lines(b'base', [], [b'line\n'])
 
471
        f.add_lines(b'noeol', [b'base'], [b'line'])
554
472
        # detailed eol tests:
555
473
        # shared last line with parent no-eol
556
 
        f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
 
474
        f.add_lines(b'noeolsecond', [b'noeol'], [b'line\n', b'line'])
557
475
        # differing last line with parent, both no-eol
558
 
        f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
 
476
        f.add_lines(b'noeolnotshared', [b'noeolsecond'], [b'line\n', b'phone'])
559
477
        # add eol following a noneol parent, change content
560
 
        f.add_lines('eol', ['noeol'], ['phone\n'])
 
478
        f.add_lines(b'eol', [b'noeol'], [b'phone\n'])
561
479
        # add eol following a noneol parent, no change content
562
 
        f.add_lines('eolline', ['noeol'], ['line\n'])
 
480
        f.add_lines(b'eolline', [b'noeol'], [b'line\n'])
563
481
        # noeol with no parents:
564
 
        f.add_lines('noeolbase', [], ['line'])
 
482
        f.add_lines(b'noeolbase', [], [b'line'])
565
483
        # noeol preceeding its leftmost parent in the output:
566
484
        # this is done by making it a merge of two parents with no common
567
485
        # anestry: noeolbase and noeol with the
568
486
        # later-inserted parent the leftmost.
569
 
        f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
 
487
        f.add_lines(b'eolbeforefirstparent', [
 
488
                    b'noeolbase', b'noeol'], [b'line'])
570
489
        # two identical eol texts
571
 
        f.add_lines('noeoldup', ['noeol'], ['line'])
572
 
        next_parent = 'base'
573
 
        text_name = 'chain1-'
574
 
        text = ['line\n']
575
 
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
576
 
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
577
 
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
578
 
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
579
 
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
580
 
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
581
 
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
582
 
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
583
 
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
584
 
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
585
 
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
586
 
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
587
 
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
588
 
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
589
 
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
590
 
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
591
 
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
592
 
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
593
 
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
594
 
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
595
 
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
596
 
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
597
 
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
598
 
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
599
 
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
600
 
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
490
        f.add_lines(b'noeoldup', [b'noeol'], [b'line'])
 
491
        next_parent = b'base'
 
492
        text_name = b'chain1-'
 
493
        text = [b'line\n']
 
494
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
495
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
496
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
497
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
498
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
499
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
500
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
501
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
502
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
 
503
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
504
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
505
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
506
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
507
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
508
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
 
509
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
510
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
511
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
512
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
513
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
 
514
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
515
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
516
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
517
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
518
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
519
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
601
520
                 }
602
521
        for depth in range(26):
603
 
            new_version = text_name + '%s' % depth
604
 
            text = text + ['line\n']
 
522
            new_version = text_name + b'%d' % depth
 
523
            text = text + [b'line\n']
605
524
            f.add_lines(new_version, [next_parent], text)
606
525
            next_parent = new_version
607
 
        next_parent = 'base'
608
 
        text_name = 'chain2-'
609
 
        text = ['line\n']
 
526
        next_parent = b'base'
 
527
        text_name = b'chain2-'
 
528
        text = [b'line\n']
610
529
        for depth in range(26):
611
 
            new_version = text_name + '%s' % depth
612
 
            text = text + ['line\n']
 
530
            new_version = text_name + b'%d' % depth
 
531
            text = text + [b'line\n']
613
532
            f.add_lines(new_version, [next_parent], text)
614
533
            next_parent = new_version
615
534
        return sha1s
617
536
    def test_ancestry(self):
618
537
        f = self.get_file()
619
538
        self.assertEqual([], f.get_ancestry([]))
620
 
        f.add_lines('r0', [], ['a\n', 'b\n'])
621
 
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
622
 
        f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
623
 
        f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
624
 
        f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
 
539
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
540
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
 
541
        f.add_lines(b'r2', [b'r0'], [b'b\n', b'c\n'])
 
542
        f.add_lines(b'r3', [b'r2'], [b'b\n', b'c\n'])
 
543
        f.add_lines(b'rM', [b'r1', b'r2'], [b'b\n', b'c\n'])
625
544
        self.assertEqual([], f.get_ancestry([]))
626
 
        versions = f.get_ancestry(['rM'])
 
545
        versions = f.get_ancestry([b'rM'])
627
546
        # there are some possibilities:
628
547
        # r0 r1 r2 rM r3
629
548
        # r0 r1 r2 r3 rM
630
549
        # etc
631
550
        # so we check indexes
632
 
        r0 = versions.index('r0')
633
 
        r1 = versions.index('r1')
634
 
        r2 = versions.index('r2')
635
 
        self.assertFalse('r3' in versions)
636
 
        rM = versions.index('rM')
 
551
        r0 = versions.index(b'r0')
 
552
        r1 = versions.index(b'r1')
 
553
        r2 = versions.index(b'r2')
 
554
        self.assertFalse(b'r3' in versions)
 
555
        rM = versions.index(b'rM')
637
556
        self.assertTrue(r0 < r1)
638
557
        self.assertTrue(r0 < r2)
639
558
        self.assertTrue(r1 < rM)
640
559
        self.assertTrue(r2 < rM)
641
560
 
642
561
        self.assertRaises(RevisionNotPresent,
643
 
            f.get_ancestry, ['rM', 'rX'])
 
562
                          f.get_ancestry, [b'rM', b'rX'])
644
563
 
645
 
        self.assertEqual(set(f.get_ancestry('rM')),
646
 
            set(f.get_ancestry('rM', topo_sorted=False)))
 
564
        self.assertEqual(set(f.get_ancestry(b'rM')),
 
565
                         set(f.get_ancestry(b'rM', topo_sorted=False)))
647
566
 
648
567
    def test_mutate_after_finish(self):
649
568
        self._transaction = 'before'
650
569
        f = self.get_file()
651
570
        self._transaction = 'after'
652
 
        self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
653
 
        self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
 
571
        self.assertRaises(errors.OutSideTransaction, f.add_lines, b'', [], [])
 
572
        self.assertRaises(errors.OutSideTransaction,
 
573
                          f.add_lines_with_ghosts, b'', [], [])
654
574
 
655
575
    def test_copy_to(self):
656
576
        f = self.get_file()
657
 
        f.add_lines('0', [], ['a\n'])
 
577
        f.add_lines(b'0', [], [b'a\n'])
658
578
        t = MemoryTransport()
659
579
        f.copy_to('foo', t)
660
580
        for suffix in self.get_factory().get_suffixes():
667
587
 
668
588
    def test_get_parent_map(self):
669
589
        f = self.get_file()
670
 
        f.add_lines('r0', [], ['a\n', 'b\n'])
671
 
        self.assertEqual(
672
 
            {'r0':()}, f.get_parent_map(['r0']))
673
 
        f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
674
 
        self.assertEqual(
675
 
            {'r1':('r0',)}, f.get_parent_map(['r1']))
676
 
        self.assertEqual(
677
 
            {'r0':(),
678
 
             'r1':('r0',)},
679
 
            f.get_parent_map(['r0', 'r1']))
680
 
        f.add_lines('r2', [], ['a\n', 'b\n'])
681
 
        f.add_lines('r3', [], ['a\n', 'b\n'])
682
 
        f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
683
 
        self.assertEqual(
684
 
            {'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
685
 
        self.assertEqual({}, f.get_parent_map('y'))
686
 
        self.assertEqual(
687
 
            {'r0':(),
688
 
             'r1':('r0',)},
689
 
            f.get_parent_map(['r0', 'y', 'r1']))
 
590
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
591
        self.assertEqual(
 
592
            {b'r0': ()}, f.get_parent_map([b'r0']))
 
593
        f.add_lines(b'r1', [b'r0'], [b'a\n', b'b\n'])
 
594
        self.assertEqual(
 
595
            {b'r1': (b'r0',)}, f.get_parent_map([b'r1']))
 
596
        self.assertEqual(
 
597
            {b'r0': (),
 
598
             b'r1': (b'r0',)},
 
599
            f.get_parent_map([b'r0', b'r1']))
 
600
        f.add_lines(b'r2', [], [b'a\n', b'b\n'])
 
601
        f.add_lines(b'r3', [], [b'a\n', b'b\n'])
 
602
        f.add_lines(b'm', [b'r0', b'r1', b'r2', b'r3'], [b'a\n', b'b\n'])
 
603
        self.assertEqual(
 
604
            {b'm': (b'r0', b'r1', b'r2', b'r3')}, f.get_parent_map([b'm']))
 
605
        self.assertEqual({}, f.get_parent_map(b'y'))
 
606
        self.assertEqual(
 
607
            {b'r0': (),
 
608
             b'r1': (b'r0',)},
 
609
            f.get_parent_map([b'r0', b'y', b'r1']))
690
610
 
691
611
    def test_annotate(self):
692
612
        f = self.get_file()
693
 
        f.add_lines('r0', [], ['a\n', 'b\n'])
694
 
        f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
695
 
        origins = f.annotate('r1')
696
 
        self.assertEquals(origins[0][0], 'r1')
697
 
        self.assertEquals(origins[1][0], 'r0')
 
613
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
614
        f.add_lines(b'r1', [b'r0'], [b'c\n', b'b\n'])
 
615
        origins = f.annotate(b'r1')
 
616
        self.assertEqual(origins[0][0], b'r1')
 
617
        self.assertEqual(origins[1][0], b'r0')
698
618
 
699
619
        self.assertRaises(RevisionNotPresent,
700
 
            f.annotate, 'foo')
 
620
                          f.annotate, b'foo')
701
621
 
702
622
    def test_detection(self):
703
623
        # Test weaves detect corruption.
708
628
 
709
629
        w = self.get_file_corrupted_text()
710
630
 
711
 
        self.assertEqual('hello\n', w.get_text('v1'))
712
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
713
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
714
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.check)
 
631
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
 
632
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
 
633
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
 
634
        self.assertRaises(WeaveInvalidChecksum, w.check)
715
635
 
716
636
        w = self.get_file_corrupted_checksum()
717
637
 
718
 
        self.assertEqual('hello\n', w.get_text('v1'))
719
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
720
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
721
 
        self.assertRaises(errors.WeaveInvalidChecksum, w.check)
 
638
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
 
639
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
 
640
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
 
641
        self.assertRaises(WeaveInvalidChecksum, w.check)
722
642
 
723
643
    def get_file_corrupted_text(self):
724
644
        """Return a versioned file with corrupt text but valid metadata."""
745
665
 
746
666
        vf = self.get_file()
747
667
        # add a base to get included
748
 
        vf.add_lines('base', [], ['base\n'])
 
668
        vf.add_lines(b'base', [], [b'base\n'])
749
669
        # add a ancestor to be included on one side
750
 
        vf.add_lines('lancestor', [], ['lancestor\n'])
 
670
        vf.add_lines(b'lancestor', [], [b'lancestor\n'])
751
671
        # add a ancestor to be included on the other side
752
 
        vf.add_lines('rancestor', ['base'], ['rancestor\n'])
 
672
        vf.add_lines(b'rancestor', [b'base'], [b'rancestor\n'])
753
673
        # add a child of rancestor with no eofile-nl
754
 
        vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
 
674
        vf.add_lines(b'child', [b'rancestor'], [b'base\n', b'child\n'])
755
675
        # add a child of lancestor and base to join the two roots
756
 
        vf.add_lines('otherchild',
757
 
                     ['lancestor', 'base'],
758
 
                     ['base\n', 'lancestor\n', 'otherchild\n'])
 
676
        vf.add_lines(b'otherchild',
 
677
                     [b'lancestor', b'base'],
 
678
                     [b'base\n', b'lancestor\n', b'otherchild\n'])
 
679
 
759
680
        def iter_with_versions(versions, expected):
760
681
            # now we need to see what lines are returned, and how often.
761
682
            lines = {}
762
683
            progress = InstrumentedProgress()
763
684
            # iterate over the lines
764
685
            for line in vf.iter_lines_added_or_present_in_versions(versions,
765
 
                pb=progress):
 
686
                                                                   pb=progress):
766
687
                lines.setdefault(line, 0)
767
688
                lines[line] += 1
768
 
            if []!= progress.updates:
 
689
            if [] != progress.updates:
769
690
                self.assertEqual(expected, progress.updates)
770
691
            return lines
771
 
        lines = iter_with_versions(['child', 'otherchild'],
 
692
        lines = iter_with_versions([b'child', b'otherchild'],
772
693
                                   [('Walking content', 0, 2),
773
694
                                    ('Walking content', 1, 2),
774
695
                                    ('Walking content', 2, 2)])
775
696
        # we must see child and otherchild
776
 
        self.assertTrue(lines[('child\n', 'child')] > 0)
777
 
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
 
697
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
 
698
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
778
699
        # we dont care if we got more than that.
779
700
 
780
701
        # test all lines
785
706
                                          ('Walking content', 4, 5),
786
707
                                          ('Walking content', 5, 5)])
787
708
        # all lines must be seen at least once
788
 
        self.assertTrue(lines[('base\n', 'base')] > 0)
789
 
        self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
790
 
        self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
791
 
        self.assertTrue(lines[('child\n', 'child')] > 0)
792
 
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
 
709
        self.assertTrue(lines[(b'base\n', b'base')] > 0)
 
710
        self.assertTrue(lines[(b'lancestor\n', b'lancestor')] > 0)
 
711
        self.assertTrue(lines[(b'rancestor\n', b'rancestor')] > 0)
 
712
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
 
713
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
793
714
 
794
715
    def test_add_lines_with_ghosts(self):
795
716
        # some versioned file formats allow lines to be added with parent
802
723
        parent_id_unicode = u'b\xbfse'
803
724
        parent_id_utf8 = parent_id_unicode.encode('utf8')
804
725
        try:
805
 
            vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
 
726
            vf.add_lines_with_ghosts(b'notbxbfse', [parent_id_utf8], [])
806
727
        except NotImplementedError:
807
728
            # check the other ghost apis are also not implemented
808
 
            self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
809
 
            self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
 
729
            self.assertRaises(NotImplementedError,
 
730
                              vf.get_ancestry_with_ghosts, [b'foo'])
 
731
            self.assertRaises(NotImplementedError,
 
732
                              vf.get_parents_with_ghosts, b'foo')
810
733
            return
811
734
        vf = self.reopen_file()
812
735
        # test key graph related apis: getncestry, _graph, get_parents
813
736
        # has_version
814
737
        # - these are ghost unaware and must not be reflect ghosts
815
 
        self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
 
738
        self.assertEqual([b'notbxbfse'], vf.get_ancestry(b'notbxbfse'))
816
739
        self.assertFalse(vf.has_version(parent_id_utf8))
817
740
        # we have _with_ghost apis to give us ghost information.
818
 
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
819
 
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
 
741
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
 
742
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
 
743
        self.assertEqual([parent_id_utf8],
 
744
                         vf.get_parents_with_ghosts(b'notbxbfse'))
820
745
        # if we add something that is a ghost of another, it should correct the
821
746
        # results of the prior apis
822
747
        vf.add_lines(parent_id_utf8, [], [])
823
 
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
824
 
        self.assertEqual({'notbxbfse':(parent_id_utf8,)},
825
 
            vf.get_parent_map(['notbxbfse']))
 
748
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
 
749
                         vf.get_ancestry([b'notbxbfse']))
 
750
        self.assertEqual({b'notbxbfse': (parent_id_utf8,)},
 
751
                         vf.get_parent_map([b'notbxbfse']))
826
752
        self.assertTrue(vf.has_version(parent_id_utf8))
827
753
        # we have _with_ghost apis to give us ghost information.
828
 
        self.assertEqual([parent_id_utf8, 'notbxbfse'],
829
 
            vf.get_ancestry_with_ghosts(['notbxbfse']))
830
 
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
 
754
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
 
755
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
 
756
        self.assertEqual([parent_id_utf8],
 
757
                         vf.get_parents_with_ghosts(b'notbxbfse'))
831
758
 
832
759
    def test_add_lines_with_ghosts_after_normal_revs(self):
833
760
        # some versioned file formats allow lines to be added with parent
837
764
        vf = self.get_file()
838
765
        # probe for ghost support
839
766
        try:
840
 
            vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
 
767
            vf.add_lines_with_ghosts(b'base', [], [b'line\n', b'line_b\n'])
841
768
        except NotImplementedError:
842
769
            return
843
 
        vf.add_lines_with_ghosts('references_ghost',
844
 
                                 ['base', 'a_ghost'],
845
 
                                 ['line\n', 'line_b\n', 'line_c\n'])
846
 
        origins = vf.annotate('references_ghost')
847
 
        self.assertEquals(('base', 'line\n'), origins[0])
848
 
        self.assertEquals(('base', 'line_b\n'), origins[1])
849
 
        self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
 
770
        vf.add_lines_with_ghosts(b'references_ghost',
 
771
                                 [b'base', b'a_ghost'],
 
772
                                 [b'line\n', b'line_b\n', b'line_c\n'])
 
773
        origins = vf.annotate(b'references_ghost')
 
774
        self.assertEqual((b'base', b'line\n'), origins[0])
 
775
        self.assertEqual((b'base', b'line_b\n'), origins[1])
 
776
        self.assertEqual((b'references_ghost', b'line_c\n'), origins[2])
850
777
 
851
778
    def test_readonly_mode(self):
852
 
        transport = get_transport(self.get_url('.'))
 
779
        t = self.get_transport()
853
780
        factory = self.get_factory()
854
 
        vf = factory('id', transport, 0777, create=True, access_mode='w')
855
 
        vf = factory('id', transport, access_mode='r')
856
 
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
 
781
        vf = factory('id', t, 0o777, create=True, access_mode='w')
 
782
        vf = factory('id', t, access_mode='r')
 
783
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, b'base', [], [])
857
784
        self.assertRaises(errors.ReadOnlyError,
858
785
                          vf.add_lines_with_ghosts,
859
 
                          'base',
 
786
                          b'base',
860
787
                          [],
861
788
                          [])
862
789
 
864
791
        # check the sha1 data is available
865
792
        vf = self.get_file()
866
793
        # a simple file
867
 
        vf.add_lines('a', [], ['a\n'])
 
794
        vf.add_lines(b'a', [], [b'a\n'])
868
795
        # the same file, different metadata
869
 
        vf.add_lines('b', ['a'], ['a\n'])
 
796
        vf.add_lines(b'b', [b'a'], [b'a\n'])
870
797
        # a file differing only in last newline.
871
 
        vf.add_lines('c', [], ['a'])
 
798
        vf.add_lines(b'c', [], [b'a'])
872
799
        self.assertEqual({
873
 
            'a': '3f786850e387550fdab836ed7e6dc881de23001b',
874
 
            'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
875
 
            'b': '3f786850e387550fdab836ed7e6dc881de23001b',
 
800
            b'a': b'3f786850e387550fdab836ed7e6dc881de23001b',
 
801
            b'c': b'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
 
802
            b'b': b'3f786850e387550fdab836ed7e6dc881de23001b',
876
803
            },
877
 
            vf.get_sha1s(['a', 'c', 'b']))
 
804
            vf.get_sha1s([b'a', b'c', b'b']))
878
805
 
879
806
 
880
807
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
881
808
 
882
809
    def get_file(self, name='foo'):
883
 
        return WeaveFile(name, get_transport(self.get_url('.')), create=True,
884
 
            get_scope=self.get_transaction)
 
810
        return WeaveFile(name, self.get_transport(),
 
811
                         create=True,
 
812
                         get_scope=self.get_transaction)
885
813
 
886
814
    def get_file_corrupted_text(self):
887
 
        w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
888
 
            get_scope=self.get_transaction)
889
 
        w.add_lines('v1', [], ['hello\n'])
890
 
        w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
 
815
        w = WeaveFile('foo', self.get_transport(),
 
816
                      create=True,
 
817
                      get_scope=self.get_transaction)
 
818
        w.add_lines(b'v1', [], [b'hello\n'])
 
819
        w.add_lines(b'v2', [b'v1'], [b'hello\n', b'there\n'])
891
820
 
892
821
        # We are going to invasively corrupt the text
893
822
        # Make sure the internals of weave are the same
894
 
        self.assertEqual([('{', 0)
895
 
                        , 'hello\n'
896
 
                        , ('}', None)
897
 
                        , ('{', 1)
898
 
                        , 'there\n'
899
 
                        , ('}', None)
900
 
                        ], w._weave)
 
823
        self.assertEqual([(b'{', 0), b'hello\n', (b'}', None), (b'{', 1), b'there\n', (b'}', None)
 
824
                          ], w._weave)
901
825
 
902
 
        self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
903
 
                        , '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
904
 
                        ], w._sha1s)
 
826
        self.assertEqual([b'f572d396fae9206628714fb2ce00f72e94f2258f', b'90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
827
                          ], w._sha1s)
905
828
        w.check()
906
829
 
907
830
        # Corrupted
908
 
        w._weave[4] = 'There\n'
 
831
        w._weave[4] = b'There\n'
909
832
        return w
910
833
 
911
834
    def get_file_corrupted_checksum(self):
912
835
        w = self.get_file_corrupted_text()
913
836
        # Corrected
914
 
        w._weave[4] = 'there\n'
915
 
        self.assertEqual('hello\nthere\n', w.get_text('v2'))
 
837
        w._weave[4] = b'there\n'
 
838
        self.assertEqual(b'hello\nthere\n', w.get_text(b'v2'))
916
839
 
917
 
        #Invalid checksum, first digit changed
918
 
        w._sha1s[1] =  'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
840
        # Invalid checksum, first digit changed
 
841
        w._sha1s[1] = b'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
919
842
        return w
920
843
 
921
844
    def reopen_file(self, name='foo', create=False):
922
 
        return WeaveFile(name, get_transport(self.get_url('.')), create=create,
923
 
            get_scope=self.get_transaction)
 
845
        return WeaveFile(name, self.get_transport(),
 
846
                         create=create,
 
847
                         get_scope=self.get_transaction)
924
848
 
925
849
    def test_no_implicit_create(self):
926
850
        self.assertRaises(errors.NoSuchFile,
927
851
                          WeaveFile,
928
852
                          'foo',
929
 
                          get_transport(self.get_url('.')),
 
853
                          self.get_transport(),
930
854
                          get_scope=self.get_transaction)
931
855
 
932
856
    def get_factory(self):
936
860
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
937
861
 
938
862
    def setUp(self):
939
 
        TestCaseWithMemoryTransport.setUp(self)
 
863
        super(TestPlanMergeVersionedFile, self).setUp()
940
864
        mapper = PrefixMapper()
941
865
        factory = make_file_factory(True, mapper)
942
866
        self.vf1 = factory(self.get_transport('root-1'))
945
869
        self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
946
870
 
947
871
    def test_add_lines(self):
948
 
        self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
949
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
950
 
            ('root', 'a'), [], [])
951
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
952
 
            ('root', 'a:'), None, [])
953
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
954
 
            ('root', 'a:'), [], None)
 
872
        self.plan_merge_vf.add_lines((b'root', b'a:'), [], [])
 
873
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
874
                          (b'root', b'a'), [], [])
 
875
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
876
                          (b'root', b'a:'), None, [])
 
877
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
878
                          (b'root', b'a:'), [], None)
955
879
 
956
880
    def setup_abcde(self):
957
 
        self.vf1.add_lines(('root', 'A'), [], ['a'])
958
 
        self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
959
 
        self.vf2.add_lines(('root', 'C'), [], ['c'])
960
 
        self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
961
 
        self.plan_merge_vf.add_lines(('root', 'E:'),
962
 
            [('root', 'B'), ('root', 'D')], ['e'])
 
881
        self.vf1.add_lines((b'root', b'A'), [], [b'a'])
 
882
        self.vf1.add_lines((b'root', b'B'), [(b'root', b'A')], [b'b'])
 
883
        self.vf2.add_lines((b'root', b'C'), [], [b'c'])
 
884
        self.vf2.add_lines((b'root', b'D'), [(b'root', b'C')], [b'd'])
 
885
        self.plan_merge_vf.add_lines((b'root', b'E:'),
 
886
                                     [(b'root', b'B'), (b'root', b'D')], [b'e'])
963
887
 
964
888
    def test_get_parents(self):
965
889
        self.setup_abcde()
966
 
        self.assertEqual({('root', 'B'):(('root', 'A'),)},
967
 
            self.plan_merge_vf.get_parent_map([('root', 'B')]))
968
 
        self.assertEqual({('root', 'D'):(('root', 'C'),)},
969
 
            self.plan_merge_vf.get_parent_map([('root', 'D')]))
970
 
        self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
971
 
            self.plan_merge_vf.get_parent_map([('root', 'E:')]))
 
890
        self.assertEqual({(b'root', b'B'): ((b'root', b'A'),)},
 
891
                         self.plan_merge_vf.get_parent_map([(b'root', b'B')]))
 
892
        self.assertEqual({(b'root', b'D'): ((b'root', b'C'),)},
 
893
                         self.plan_merge_vf.get_parent_map([(b'root', b'D')]))
 
894
        self.assertEqual({(b'root', b'E:'): ((b'root', b'B'), (b'root', b'D'))},
 
895
                         self.plan_merge_vf.get_parent_map([(b'root', b'E:')]))
972
896
        self.assertEqual({},
973
 
            self.plan_merge_vf.get_parent_map([('root', 'F')]))
 
897
                         self.plan_merge_vf.get_parent_map([(b'root', b'F')]))
974
898
        self.assertEqual({
975
 
                ('root', 'B'):(('root', 'A'),),
976
 
                ('root', 'D'):(('root', 'C'),),
977
 
                ('root', 'E:'):(('root', 'B'),('root', 'D')),
978
 
                },
 
899
            (b'root', b'B'): ((b'root', b'A'),),
 
900
            (b'root', b'D'): ((b'root', b'C'),),
 
901
            (b'root', b'E:'): ((b'root', b'B'), (b'root', b'D')),
 
902
            },
979
903
            self.plan_merge_vf.get_parent_map(
980
 
                [('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
 
904
                [(b'root', b'B'), (b'root', b'D'), (b'root', b'E:'), (b'root', b'F')]))
981
905
 
982
906
    def test_get_record_stream(self):
983
907
        self.setup_abcde()
 
908
 
984
909
        def get_record(suffix):
985
 
            return self.plan_merge_vf.get_record_stream(
986
 
                [('root', suffix)], 'unordered', True).next()
987
 
        self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
988
 
        self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
989
 
        self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
 
910
            return next(self.plan_merge_vf.get_record_stream(
 
911
                [(b'root', suffix)], 'unordered', True))
 
912
        self.assertEqual(b'a', get_record(b'A').get_bytes_as('fulltext'))
 
913
        self.assertEqual(b'a', b''.join(get_record(b'A').iter_bytes_as('chunked')))
 
914
        self.assertEqual(b'c', get_record(b'C').get_bytes_as('fulltext'))
 
915
        self.assertEqual(b'e', get_record(b'E:').get_bytes_as('fulltext'))
990
916
        self.assertEqual('absent', get_record('F').storage_kind)
991
917
 
992
918
 
999
925
        # we should be able to read from http with a versioned file.
1000
926
        vf = self.get_file()
1001
927
        # try an empty file access
1002
 
        readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
 
928
        readonly_vf = self.get_factory()('foo',
 
929
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
1003
930
        self.assertEqual([], readonly_vf.versions())
 
931
 
 
932
    def test_readonly_http_works_with_feeling(self):
 
933
        # we should be able to read from http with a versioned file.
 
934
        vf = self.get_file()
1004
935
        # now with feeling.
1005
 
        vf.add_lines('1', [], ['a\n'])
1006
 
        vf.add_lines('2', ['1'], ['b\n', 'a\n'])
1007
 
        readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
1008
 
        self.assertEqual(['1', '2'], vf.versions())
 
936
        vf.add_lines(b'1', [], [b'a\n'])
 
937
        vf.add_lines(b'2', [b'1'], [b'b\n', b'a\n'])
 
938
        readonly_vf = self.get_factory()('foo',
 
939
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
 
940
        self.assertEqual([b'1', b'2'], vf.versions())
 
941
        self.assertEqual([b'1', b'2'], readonly_vf.versions())
1009
942
        for version in readonly_vf.versions():
1010
943
            readonly_vf.get_lines(version)
1011
944
 
1013
946
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
1014
947
 
1015
948
    def get_file(self):
1016
 
        return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
1017
 
            get_scope=self.get_transaction)
 
949
        return WeaveFile('foo', self.get_transport(),
 
950
                         create=True,
 
951
                         get_scope=self.get_transaction)
1018
952
 
1019
953
    def get_factory(self):
1020
954
        return WeaveFile
1023
957
class MergeCasesMixin(object):
1024
958
 
1025
959
    def doMerge(self, base, a, b, mp):
1026
 
        from cStringIO import StringIO
1027
960
        from textwrap import dedent
1028
961
 
1029
962
        def addcrlf(x):
1030
 
            return x + '\n'
 
963
            return x + b'\n'
1031
964
 
1032
965
        w = self.get_file()
1033
 
        w.add_lines('text0', [], map(addcrlf, base))
1034
 
        w.add_lines('text1', ['text0'], map(addcrlf, a))
1035
 
        w.add_lines('text2', ['text0'], map(addcrlf, b))
 
966
        w.add_lines(b'text0', [], list(map(addcrlf, base)))
 
967
        w.add_lines(b'text1', [b'text0'], list(map(addcrlf, a)))
 
968
        w.add_lines(b'text2', [b'text0'], list(map(addcrlf, b)))
1036
969
 
1037
970
        self.log_contents(w)
1038
971
 
1039
972
        self.log('merge plan:')
1040
 
        p = list(w.plan_merge('text1', 'text2'))
 
973
        p = list(w.plan_merge(b'text1', b'text2'))
1041
974
        for state, line in p:
1042
975
            if line:
1043
976
                self.log('%12s | %s' % (state, line[:-1]))
1044
977
 
1045
978
        self.log('merge:')
1046
 
        mt = StringIO()
 
979
        mt = BytesIO()
1047
980
        mt.writelines(w.weave_merge(p))
1048
981
        mt.seek(0)
1049
982
        self.log(mt.getvalue())
1050
983
 
1051
 
        mp = map(addcrlf, mp)
 
984
        mp = list(map(addcrlf, mp))
1052
985
        self.assertEqual(mt.readlines(), mp)
1053
986
 
1054
 
 
1055
987
    def testOneInsert(self):
1056
988
        self.doMerge([],
1057
 
                     ['aa'],
 
989
                     [b'aa'],
1058
990
                     [],
1059
 
                     ['aa'])
 
991
                     [b'aa'])
1060
992
 
1061
993
    def testSeparateInserts(self):
1062
 
        self.doMerge(['aaa', 'bbb', 'ccc'],
1063
 
                     ['aaa', 'xxx', 'bbb', 'ccc'],
1064
 
                     ['aaa', 'bbb', 'yyy', 'ccc'],
1065
 
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
 
994
        self.doMerge([b'aaa', b'bbb', b'ccc'],
 
995
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
 
996
                     [b'aaa', b'bbb', b'yyy', b'ccc'],
 
997
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
1066
998
 
1067
999
    def testSameInsert(self):
1068
 
        self.doMerge(['aaa', 'bbb', 'ccc'],
1069
 
                     ['aaa', 'xxx', 'bbb', 'ccc'],
1070
 
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
1071
 
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
1072
 
    overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
 
1000
        self.doMerge([b'aaa', b'bbb', b'ccc'],
 
1001
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
 
1002
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'],
 
1003
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
 
1004
    overlappedInsertExpected = [b'aaa', b'xxx', b'yyy', b'bbb']
 
1005
 
1073
1006
    def testOverlappedInsert(self):
1074
 
        self.doMerge(['aaa', 'bbb'],
1075
 
                     ['aaa', 'xxx', 'yyy', 'bbb'],
1076
 
                     ['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
 
1007
        self.doMerge([b'aaa', b'bbb'],
 
1008
                     [b'aaa', b'xxx', b'yyy', b'bbb'],
 
1009
                     [b'aaa', b'xxx', b'bbb'], self.overlappedInsertExpected)
1077
1010
 
1078
1011
        # really it ought to reduce this to
1079
 
        # ['aaa', 'xxx', 'yyy', 'bbb']
1080
 
 
 
1012
        # [b'aaa', b'xxx', b'yyy', b'bbb']
1081
1013
 
1082
1014
    def testClashReplace(self):
1083
 
        self.doMerge(['aaa'],
1084
 
                     ['xxx'],
1085
 
                     ['yyy', 'zzz'],
1086
 
                     ['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
1087
 
                      '>>>>>>> '])
 
1015
        self.doMerge([b'aaa'],
 
1016
                     [b'xxx'],
 
1017
                     [b'yyy', b'zzz'],
 
1018
                     [b'<<<<<<< ', b'xxx', b'=======', b'yyy', b'zzz',
 
1019
                      b'>>>>>>> '])
1088
1020
 
1089
1021
    def testNonClashInsert1(self):
1090
 
        self.doMerge(['aaa'],
1091
 
                     ['xxx', 'aaa'],
1092
 
                     ['yyy', 'zzz'],
1093
 
                     ['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
1094
 
                      '>>>>>>> '])
 
1022
        self.doMerge([b'aaa'],
 
1023
                     [b'xxx', b'aaa'],
 
1024
                     [b'yyy', b'zzz'],
 
1025
                     [b'<<<<<<< ', b'xxx', b'aaa', b'=======', b'yyy', b'zzz',
 
1026
                      b'>>>>>>> '])
1095
1027
 
1096
1028
    def testNonClashInsert2(self):
1097
 
        self.doMerge(['aaa'],
1098
 
                     ['aaa'],
1099
 
                     ['yyy', 'zzz'],
1100
 
                     ['yyy', 'zzz'])
1101
 
 
 
1029
        self.doMerge([b'aaa'],
 
1030
                     [b'aaa'],
 
1031
                     [b'yyy', b'zzz'],
 
1032
                     [b'yyy', b'zzz'])
1102
1033
 
1103
1034
    def testDeleteAndModify(self):
1104
1035
        """Clashing delete and modification.
1111
1042
        # skippd, not working yet
1112
1043
        return
1113
1044
 
1114
 
        self.doMerge(['aaa', 'bbb', 'ccc'],
1115
 
                     ['aaa', 'ddd', 'ccc'],
1116
 
                     ['aaa', 'ccc'],
1117
 
                     ['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
 
1045
        self.doMerge([b'aaa', b'bbb', b'ccc'],
 
1046
                     [b'aaa', b'ddd', b'ccc'],
 
1047
                     [b'aaa', b'ccc'],
 
1048
                     [b'<<<<<<<< ', b'aaa', b'=======', b'>>>>>>> ', b'ccc'])
1118
1049
 
1119
1050
    def _test_merge_from_strings(self, base, a, b, expected):
1120
1051
        w = self.get_file()
1121
 
        w.add_lines('text0', [], base.splitlines(True))
1122
 
        w.add_lines('text1', ['text0'], a.splitlines(True))
1123
 
        w.add_lines('text2', ['text0'], b.splitlines(True))
 
1052
        w.add_lines(b'text0', [], base.splitlines(True))
 
1053
        w.add_lines(b'text1', [b'text0'], a.splitlines(True))
 
1054
        w.add_lines(b'text2', [b'text0'], b.splitlines(True))
1124
1055
        self.log('merge plan:')
1125
 
        p = list(w.plan_merge('text1', 'text2'))
 
1056
        p = list(w.plan_merge(b'text1', b'text2'))
1126
1057
        for state, line in p:
1127
1058
            if line:
1128
1059
                self.log('%12s | %s' % (state, line[:-1]))
1129
1060
        self.log('merge result:')
1130
 
        result_text = ''.join(w.weave_merge(p))
 
1061
        result_text = b''.join(w.weave_merge(p))
1131
1062
        self.log(result_text)
1132
1063
        self.assertEqualDiff(result_text, expected)
1133
1064
 
1134
1065
    def test_weave_merge_conflicts(self):
1135
1066
        # does weave merge properly handle plans that end with unchanged?
1136
 
        result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
1137
 
        self.assertEqual(result, 'hello\n')
 
1067
        result = b''.join(self.get_file().weave_merge([('new-a', b'hello\n')]))
 
1068
        self.assertEqual(result, b'hello\n')
1138
1069
 
1139
1070
    def test_deletion_extended(self):
1140
1071
        """One side deletes, the other deletes more.
1141
1072
        """
1142
 
        base = """\
 
1073
        base = b"""\
1143
1074
            line 1
1144
1075
            line 2
1145
1076
            line 3
1146
1077
            """
1147
 
        a = """\
 
1078
        a = b"""\
1148
1079
            line 1
1149
1080
            line 2
1150
1081
            """
1151
 
        b = """\
 
1082
        b = b"""\
1152
1083
            line 1
1153
1084
            """
1154
 
        result = """\
 
1085
        result = b"""\
1155
1086
            line 1
1156
1087
<<<<<<<\x20
1157
1088
            line 2
1166
1097
        Arguably it'd be better to treat these as agreement, rather than
1167
1098
        conflict, but for now conflict is safer.
1168
1099
        """
1169
 
        base = """\
 
1100
        base = b"""\
1170
1101
            start context
1171
1102
            int a() {}
1172
1103
            int b() {}
1173
1104
            int c() {}
1174
1105
            end context
1175
1106
            """
1176
 
        a = """\
 
1107
        a = b"""\
1177
1108
            start context
1178
1109
            int a() {}
1179
1110
            end context
1180
1111
            """
1181
 
        b = """\
 
1112
        b = b"""\
1182
1113
            start context
1183
1114
            int c() {}
1184
1115
            end context
1185
1116
            """
1186
 
        result = """\
 
1117
        result = b"""\
1187
1118
            start context
1188
1119
<<<<<<<\x20
1189
1120
            int a() {}
1196
1127
 
1197
1128
    def test_agreement_deletion(self):
1198
1129
        """Agree to delete some lines, without conflicts."""
1199
 
        base = """\
 
1130
        base = b"""\
1200
1131
            start context
1201
1132
            base line 1
1202
1133
            base line 2
1203
1134
            end context
1204
1135
            """
1205
 
        a = """\
1206
 
            start context
1207
 
            base line 1
1208
 
            end context
1209
 
            """
1210
 
        b = """\
1211
 
            start context
1212
 
            base line 1
1213
 
            end context
1214
 
            """
1215
 
        result = """\
 
1136
        a = b"""\
 
1137
            start context
 
1138
            base line 1
 
1139
            end context
 
1140
            """
 
1141
        b = b"""\
 
1142
            start context
 
1143
            base line 1
 
1144
            end context
 
1145
            """
 
1146
        result = b"""\
1216
1147
            start context
1217
1148
            base line 1
1218
1149
            end context
1229
1160
 
1230
1161
        It's better to consider the whole thing as a disagreement region.
1231
1162
        """
1232
 
        base = """\
 
1163
        base = b"""\
1233
1164
            start context
1234
1165
            base line 1
1235
1166
            base line 2
1236
1167
            end context
1237
1168
            """
1238
 
        a = """\
 
1169
        a = b"""\
1239
1170
            start context
1240
1171
            base line 1
1241
1172
            a's replacement line 2
1242
1173
            end context
1243
1174
            """
1244
 
        b = """\
 
1175
        b = b"""\
1245
1176
            start context
1246
1177
            b replaces
1247
1178
            both lines
1248
1179
            end context
1249
1180
            """
1250
 
        result = """\
 
1181
        result = b"""\
1251
1182
            start context
1252
1183
<<<<<<<\x20
1253
1184
            base line 1
1264
1195
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1265
1196
 
1266
1197
    def get_file(self, name='foo'):
1267
 
        return WeaveFile(name, get_transport(self.get_url('.')), create=True)
 
1198
        return WeaveFile(name, self.get_transport(),
 
1199
                         create=True)
1268
1200
 
1269
1201
    def log_contents(self, w):
1270
1202
        self.log('weave is:')
1271
 
        tmpf = StringIO()
 
1203
        tmpf = BytesIO()
1272
1204
        write_weave(w, tmpf)
1273
1205
        self.log(tmpf.getvalue())
1274
1206
 
1275
 
    overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
1276
 
                                'xxx', '>>>>>>> ', 'bbb']
 
1207
    overlappedInsertExpected = [b'aaa', b'<<<<<<< ', b'xxx', b'yyy', b'=======',
 
1208
                                b'xxx', b'>>>>>>> ', b'bbb']
1277
1209
 
1278
1210
 
1279
1211
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1284
1216
        # Each is source_kind, requested_kind, adapter class
1285
1217
        scenarios = [
1286
1218
            ('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
 
1219
            ('knit-delta-gz', 'lines', _mod_knit.DeltaPlainToFullText),
 
1220
            ('knit-delta-gz', 'chunked', _mod_knit.DeltaPlainToFullText),
1287
1221
            ('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
 
1222
            ('knit-ft-gz', 'lines', _mod_knit.FTPlainToFullText),
 
1223
            ('knit-ft-gz', 'chunked', _mod_knit.FTPlainToFullText),
1288
1224
            ('knit-annotated-delta-gz', 'knit-delta-gz',
1289
1225
                _mod_knit.DeltaAnnotatedToUnannotated),
1290
1226
            ('knit-annotated-delta-gz', 'fulltext',
1293
1229
                _mod_knit.FTAnnotatedToUnannotated),
1294
1230
            ('knit-annotated-ft-gz', 'fulltext',
1295
1231
                _mod_knit.FTAnnotatedToFullText),
 
1232
            ('knit-annotated-ft-gz', 'lines',
 
1233
                _mod_knit.FTAnnotatedToFullText),
 
1234
            ('knit-annotated-ft-gz', 'chunked',
 
1235
                _mod_knit.FTAnnotatedToFullText),
1296
1236
            ]
1297
1237
        for source, requested, klass in scenarios:
1298
1238
            adapter_factory = versionedfile.adapter_registry.get(
1305
1245
        transport = self.get_transport()
1306
1246
        return make_file_factory(annotated, mapper)(transport)
1307
1247
 
1308
 
    def helpGetBytes(self, f, ft_adapter, delta_adapter):
 
1248
    def helpGetBytes(self, f, ft_name, ft_adapter, delta_name, delta_adapter):
1309
1249
        """Grab the interested adapted texts for tests."""
1310
1250
        # origin is a fulltext
1311
 
        entries = f.get_record_stream([('origin',)], 'unordered', False)
1312
 
        base = entries.next()
1313
 
        ft_data = ft_adapter.get_bytes(base)
 
1251
        entries = f.get_record_stream([(b'origin',)], 'unordered', False)
 
1252
        base = next(entries)
 
1253
        ft_data = ft_adapter.get_bytes(base, ft_name)
1314
1254
        # merged is both a delta and multiple parents.
1315
 
        entries = f.get_record_stream([('merged',)], 'unordered', False)
1316
 
        merged = entries.next()
1317
 
        delta_data = delta_adapter.get_bytes(merged)
 
1255
        entries = f.get_record_stream([(b'merged',)], 'unordered', False)
 
1256
        merged = next(entries)
 
1257
        delta_data = delta_adapter.get_bytes(merged, delta_name)
1318
1258
        return ft_data, delta_data
1319
1259
 
1320
1260
    def test_deannotation_noeol(self):
1322
1262
        # we need a full text, and a delta
1323
1263
        f = self.get_knit()
1324
1264
        get_diamond_files(f, 1, trailing_eol=False)
1325
 
        ft_data, delta_data = self.helpGetBytes(f,
1326
 
            _mod_knit.FTAnnotatedToUnannotated(None),
1327
 
            _mod_knit.DeltaAnnotatedToUnannotated(None))
1328
 
        self.assertEqual(
1329
 
            'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1330
 
            'origin\n'
1331
 
            'end origin\n',
1332
 
            GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1333
 
        self.assertEqual(
1334
 
            'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1335
 
            '1,2,3\nleft\nright\nmerged\nend merged\n',
1336
 
            GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
 
1265
        ft_data, delta_data = self.helpGetBytes(
 
1266
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
 
1267
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1268
        self.assertEqual(
 
1269
            b'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
 
1270
            b'origin\n'
 
1271
            b'end origin\n',
 
1272
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
 
1273
        self.assertEqual(
 
1274
            b'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
 
1275
            b'1,2,3\nleft\nright\nmerged\nend merged\n',
 
1276
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1337
1277
 
1338
1278
    def test_deannotation(self):
1339
1279
        """Test converting annotated knits to unannotated knits."""
1340
1280
        # we need a full text, and a delta
1341
1281
        f = self.get_knit()
1342
1282
        get_diamond_files(f, 1)
1343
 
        ft_data, delta_data = self.helpGetBytes(f,
1344
 
            _mod_knit.FTAnnotatedToUnannotated(None),
1345
 
            _mod_knit.DeltaAnnotatedToUnannotated(None))
1346
 
        self.assertEqual(
1347
 
            'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1348
 
            'origin\n'
1349
 
            'end origin\n',
1350
 
            GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
1351
 
        self.assertEqual(
1352
 
            'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1353
 
            '2,2,2\nright\nmerged\nend merged\n',
1354
 
            GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
 
1283
        ft_data, delta_data = self.helpGetBytes(
 
1284
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
 
1285
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1286
        self.assertEqual(
 
1287
            b'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
 
1288
            b'origin\n'
 
1289
            b'end origin\n',
 
1290
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
 
1291
        self.assertEqual(
 
1292
            b'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
 
1293
            b'2,2,2\nright\nmerged\nend merged\n',
 
1294
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
1355
1295
 
1356
1296
    def test_annotated_to_fulltext_no_eol(self):
1357
1297
        """Test adapting annotated knits to full texts (for -> weaves)."""
1361
1301
        # Reconstructing a full text requires a backing versioned file, and it
1362
1302
        # must have the base lines requested from it.
1363
1303
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1364
 
        ft_data, delta_data = self.helpGetBytes(f,
1365
 
            _mod_knit.FTAnnotatedToFullText(None),
1366
 
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1367
 
        self.assertEqual('origin', ft_data)
1368
 
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
1369
 
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1370
 
            True)], logged_vf.calls)
 
1304
        ft_data, delta_data = self.helpGetBytes(
 
1305
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
 
1306
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1307
        self.assertEqual(b'origin', ft_data)
 
1308
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
 
1309
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
 
1310
                           True)], logged_vf.calls)
1371
1311
 
1372
1312
    def test_annotated_to_fulltext(self):
1373
1313
        """Test adapting annotated knits to full texts (for -> weaves)."""
1377
1317
        # Reconstructing a full text requires a backing versioned file, and it
1378
1318
        # must have the base lines requested from it.
1379
1319
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1380
 
        ft_data, delta_data = self.helpGetBytes(f,
1381
 
            _mod_knit.FTAnnotatedToFullText(None),
1382
 
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1383
 
        self.assertEqual('origin\n', ft_data)
1384
 
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1385
 
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1386
 
            True)], logged_vf.calls)
 
1320
        ft_data, delta_data = self.helpGetBytes(
 
1321
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
 
1322
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1323
        self.assertEqual(b'origin\n', ft_data)
 
1324
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
 
1325
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
 
1326
                           True)], logged_vf.calls)
1387
1327
 
1388
1328
    def test_unannotated_to_fulltext(self):
1389
1329
        """Test adapting unannotated knits to full texts.
1396
1336
        # Reconstructing a full text requires a backing versioned file, and it
1397
1337
        # must have the base lines requested from it.
1398
1338
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1399
 
        ft_data, delta_data = self.helpGetBytes(f,
1400
 
            _mod_knit.FTPlainToFullText(None),
1401
 
            _mod_knit.DeltaPlainToFullText(logged_vf))
1402
 
        self.assertEqual('origin\n', ft_data)
1403
 
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
1404
 
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1405
 
            True)], logged_vf.calls)
 
1339
        ft_data, delta_data = self.helpGetBytes(
 
1340
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
 
1341
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
 
1342
        self.assertEqual(b'origin\n', ft_data)
 
1343
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
 
1344
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
 
1345
                           True)], logged_vf.calls)
1406
1346
 
1407
1347
    def test_unannotated_to_fulltext_no_eol(self):
1408
1348
        """Test adapting unannotated knits to full texts.
1415
1355
        # Reconstructing a full text requires a backing versioned file, and it
1416
1356
        # must have the base lines requested from it.
1417
1357
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1418
 
        ft_data, delta_data = self.helpGetBytes(f,
1419
 
            _mod_knit.FTPlainToFullText(None),
1420
 
            _mod_knit.DeltaPlainToFullText(logged_vf))
1421
 
        self.assertEqual('origin', ft_data)
1422
 
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
1423
 
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
1424
 
            True)], logged_vf.calls)
 
1358
        ft_data, delta_data = self.helpGetBytes(
 
1359
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
 
1360
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
 
1361
        self.assertEqual(b'origin', ft_data)
 
1362
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
 
1363
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
 
1364
                           True)], logged_vf.calls)
1425
1365
 
1426
1366
 
1427
1367
class TestKeyMapper(TestCaseWithMemoryTransport):
1429
1369
 
1430
1370
    def test_identity_mapper(self):
1431
1371
        mapper = versionedfile.ConstantMapper("inventory")
1432
 
        self.assertEqual("inventory", mapper.map(('foo@ar',)))
1433
 
        self.assertEqual("inventory", mapper.map(('quux',)))
 
1372
        self.assertEqual("inventory", mapper.map((b'foo@ar',)))
 
1373
        self.assertEqual("inventory", mapper.map((b'quux',)))
1434
1374
 
1435
1375
    def test_prefix_mapper(self):
1436
1376
        #format5: plain
1437
1377
        mapper = versionedfile.PrefixMapper()
1438
 
        self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
1439
 
        self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
1440
 
        self.assertEqual(('file-id',), mapper.unmap("file-id"))
1441
 
        self.assertEqual(('new-id',), mapper.unmap("new-id"))
 
1378
        self.assertEqual("file-id", mapper.map((b"file-id", b"revision-id")))
 
1379
        self.assertEqual("new-id", mapper.map((b"new-id", b"revision-id")))
 
1380
        self.assertEqual((b'file-id',), mapper.unmap("file-id"))
 
1381
        self.assertEqual((b'new-id',), mapper.unmap("new-id"))
1442
1382
 
1443
1383
    def test_hash_prefix_mapper(self):
1444
1384
        #format6: hash + plain
1445
1385
        mapper = versionedfile.HashPrefixMapper()
1446
 
        self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
1447
 
        self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
1448
 
        self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
1449
 
        self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
 
1386
        self.assertEqual(
 
1387
            "9b/file-id", mapper.map((b"file-id", b"revision-id")))
 
1388
        self.assertEqual("45/new-id", mapper.map((b"new-id", b"revision-id")))
 
1389
        self.assertEqual((b'file-id',), mapper.unmap("9b/file-id"))
 
1390
        self.assertEqual((b'new-id',), mapper.unmap("45/new-id"))
1450
1391
 
1451
1392
    def test_hash_escaped_mapper(self):
1452
1393
        #knit1: hash + escaped
1453
1394
        mapper = versionedfile.HashEscapedPrefixMapper()
1454
 
        self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
1455
 
        self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
1456
 
            "revision-id")))
1457
 
        self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
1458
 
            "revision-id")))
1459
 
        self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1460
 
        self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
 
1395
        self.assertEqual("88/%2520", mapper.map((b" ", b"revision-id")))
 
1396
        self.assertEqual("ed/fil%2545-%2549d", mapper.map((b"filE-Id",
 
1397
                                                           b"revision-id")))
 
1398
        self.assertEqual("88/ne%2557-%2549d", mapper.map((b"neW-Id",
 
1399
                                                          b"revision-id")))
 
1400
        self.assertEqual((b'filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
 
1401
        self.assertEqual((b'neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1461
1402
 
1462
1403
 
1463
1404
class TestVersionedFiles(TestCaseWithMemoryTransport):
1464
1405
    """Tests for the multiple-file variant of VersionedFile."""
1465
1406
 
 
1407
    # We want to be sure of behaviour for:
 
1408
    # weaves prefix layout (weave texts)
 
1409
    # individually named weaves (weave inventories)
 
1410
    # annotated knits - prefix|hash|hash-escape layout, we test the third only
 
1411
    #                   as it is the most complex mapper.
 
1412
    # individually named knits
 
1413
    # individual no-graph knits in packs (signatures)
 
1414
    # individual graph knits in packs (inventories)
 
1415
    # individual graph nocompression knits in packs (revisions)
 
1416
    # plain text knits in packs (texts)
 
1417
    len_one_scenarios = [
 
1418
        ('weave-named', {
 
1419
            'cleanup': None,
 
1420
            'factory': make_versioned_files_factory(WeaveFile,
 
1421
                                                    ConstantMapper('inventory')),
 
1422
            'graph': True,
 
1423
            'key_length': 1,
 
1424
            'support_partial_insertion': False,
 
1425
            }),
 
1426
        ('named-knit', {
 
1427
            'cleanup': None,
 
1428
            'factory': make_file_factory(False, ConstantMapper('revisions')),
 
1429
            'graph': True,
 
1430
            'key_length': 1,
 
1431
            'support_partial_insertion': False,
 
1432
            }),
 
1433
        ('named-nograph-nodelta-knit-pack', {
 
1434
            'cleanup': cleanup_pack_knit,
 
1435
            'factory': make_pack_factory(False, False, 1),
 
1436
            'graph': False,
 
1437
            'key_length': 1,
 
1438
            'support_partial_insertion': False,
 
1439
            }),
 
1440
        ('named-graph-knit-pack', {
 
1441
            'cleanup': cleanup_pack_knit,
 
1442
            'factory': make_pack_factory(True, True, 1),
 
1443
            'graph': True,
 
1444
            'key_length': 1,
 
1445
            'support_partial_insertion': True,
 
1446
            }),
 
1447
        ('named-graph-nodelta-knit-pack', {
 
1448
            'cleanup': cleanup_pack_knit,
 
1449
            'factory': make_pack_factory(True, False, 1),
 
1450
            'graph': True,
 
1451
            'key_length': 1,
 
1452
            'support_partial_insertion': False,
 
1453
            }),
 
1454
        ('groupcompress-nograph', {
 
1455
            'cleanup': groupcompress.cleanup_pack_group,
 
1456
            'factory': groupcompress.make_pack_factory(False, False, 1),
 
1457
            'graph': False,
 
1458
            'key_length': 1,
 
1459
            'support_partial_insertion': False,
 
1460
            }),
 
1461
        ]
 
1462
    len_two_scenarios = [
 
1463
        ('weave-prefix', {
 
1464
            'cleanup': None,
 
1465
            'factory': make_versioned_files_factory(WeaveFile,
 
1466
                                                    PrefixMapper()),
 
1467
            'graph': True,
 
1468
            'key_length': 2,
 
1469
            'support_partial_insertion': False,
 
1470
            }),
 
1471
        ('annotated-knit-escape', {
 
1472
            'cleanup': None,
 
1473
            'factory': make_file_factory(True, HashEscapedPrefixMapper()),
 
1474
            'graph': True,
 
1475
            'key_length': 2,
 
1476
            'support_partial_insertion': False,
 
1477
            }),
 
1478
        ('plain-knit-pack', {
 
1479
            'cleanup': cleanup_pack_knit,
 
1480
            'factory': make_pack_factory(True, True, 2),
 
1481
            'graph': True,
 
1482
            'key_length': 2,
 
1483
            'support_partial_insertion': True,
 
1484
            }),
 
1485
        ('groupcompress', {
 
1486
            'cleanup': groupcompress.cleanup_pack_group,
 
1487
            'factory': groupcompress.make_pack_factory(True, False, 1),
 
1488
            'graph': True,
 
1489
            'key_length': 1,
 
1490
            'support_partial_insertion': False,
 
1491
            }),
 
1492
        ]
 
1493
 
 
1494
    scenarios = len_one_scenarios + len_two_scenarios
 
1495
 
1466
1496
    def get_versionedfiles(self, relpath='files'):
1467
1497
        transport = self.get_transport(relpath)
1468
1498
        if relpath != '.':
1477
1507
        if self.key_length == 1:
1478
1508
            return (suffix,)
1479
1509
        else:
1480
 
            return ('FileA',) + (suffix,)
 
1510
            return (b'FileA',) + (suffix,)
 
1511
 
 
1512
    def test_add_fallback_implies_without_fallbacks(self):
 
1513
        f = self.get_versionedfiles('files')
 
1514
        if getattr(f, 'add_fallback_versioned_files', None) is None:
 
1515
            raise TestNotApplicable("%s doesn't support fallbacks"
 
1516
                                    % (f.__class__.__name__,))
 
1517
        g = self.get_versionedfiles('fallback')
 
1518
        key_a = self.get_simple_key(b'a')
 
1519
        g.add_lines(key_a, [], [b'\n'])
 
1520
        f.add_fallback_versioned_files(g)
 
1521
        self.assertTrue(key_a in f.get_parent_map([key_a]))
 
1522
        self.assertFalse(
 
1523
            key_a in f.without_fallbacks().get_parent_map([key_a]))
1481
1524
 
1482
1525
    def test_add_lines(self):
1483
1526
        f = self.get_versionedfiles()
1484
 
        key0 = self.get_simple_key('r0')
1485
 
        key1 = self.get_simple_key('r1')
1486
 
        key2 = self.get_simple_key('r2')
1487
 
        keyf = self.get_simple_key('foo')
1488
 
        f.add_lines(key0, [], ['a\n', 'b\n'])
 
1527
        key0 = self.get_simple_key(b'r0')
 
1528
        key1 = self.get_simple_key(b'r1')
 
1529
        key2 = self.get_simple_key(b'r2')
 
1530
        keyf = self.get_simple_key(b'foo')
 
1531
        f.add_lines(key0, [], [b'a\n', b'b\n'])
1489
1532
        if self.graph:
1490
 
            f.add_lines(key1, [key0], ['b\n', 'c\n'])
 
1533
            f.add_lines(key1, [key0], [b'b\n', b'c\n'])
1491
1534
        else:
1492
 
            f.add_lines(key1, [], ['b\n', 'c\n'])
 
1535
            f.add_lines(key1, [], [b'b\n', b'c\n'])
1493
1536
        keys = f.keys()
1494
1537
        self.assertTrue(key0 in keys)
1495
1538
        self.assertTrue(key1 in keys)
1497
1540
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1498
1541
            records.append((record.key, record.get_bytes_as('fulltext')))
1499
1542
        records.sort()
1500
 
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
 
1543
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
1501
1544
 
1502
 
    def test__add_text(self):
 
1545
    def test_add_chunks(self):
1503
1546
        f = self.get_versionedfiles()
1504
 
        key0 = self.get_simple_key('r0')
1505
 
        key1 = self.get_simple_key('r1')
1506
 
        key2 = self.get_simple_key('r2')
1507
 
        keyf = self.get_simple_key('foo')
1508
 
        f._add_text(key0, [], 'a\nb\n')
 
1547
        key0 = self.get_simple_key(b'r0')
 
1548
        key1 = self.get_simple_key(b'r1')
 
1549
        key2 = self.get_simple_key(b'r2')
 
1550
        keyf = self.get_simple_key(b'foo')
 
1551
        def add_chunks(key, parents, chunks):
 
1552
            factory = ChunkedContentFactory(
 
1553
                key, parents, osutils.sha_strings(chunks), chunks)
 
1554
            return f.add_content(factory)
 
1555
 
 
1556
        add_chunks(key0, [], [b'a', b'\nb\n'])
1509
1557
        if self.graph:
1510
 
            f._add_text(key1, [key0], 'b\nc\n')
 
1558
            add_chunks(key1, [key0], [b'b', b'\n', b'c\n'])
1511
1559
        else:
1512
 
            f._add_text(key1, [], 'b\nc\n')
 
1560
            add_chunks(key1, [], [b'b\n', b'c\n'])
1513
1561
        keys = f.keys()
1514
 
        self.assertTrue(key0 in keys)
1515
 
        self.assertTrue(key1 in keys)
 
1562
        self.assertIn(key0, keys)
 
1563
        self.assertIn(key1, keys)
1516
1564
        records = []
1517
1565
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1518
1566
            records.append((record.key, record.get_bytes_as('fulltext')))
1519
1567
        records.sort()
1520
 
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
 
1568
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
1521
1569
 
1522
1570
    def test_annotate(self):
1523
1571
        files = self.get_versionedfiles()
1525
1573
        if self.key_length == 1:
1526
1574
            prefix = ()
1527
1575
        else:
1528
 
            prefix = ('FileA',)
 
1576
            prefix = (b'FileA',)
1529
1577
        # introduced full text
1530
 
        origins = files.annotate(prefix + ('origin',))
 
1578
        origins = files.annotate(prefix + (b'origin',))
1531
1579
        self.assertEqual([
1532
 
            (prefix + ('origin',), 'origin\n')],
 
1580
            (prefix + (b'origin',), b'origin\n')],
1533
1581
            origins)
1534
1582
        # a delta
1535
 
        origins = files.annotate(prefix + ('base',))
 
1583
        origins = files.annotate(prefix + (b'base',))
1536
1584
        self.assertEqual([
1537
 
            (prefix + ('base',), 'base\n')],
 
1585
            (prefix + (b'base',), b'base\n')],
1538
1586
            origins)
1539
1587
        # a merge
1540
 
        origins = files.annotate(prefix + ('merged',))
 
1588
        origins = files.annotate(prefix + (b'merged',))
1541
1589
        if self.graph:
1542
1590
            self.assertEqual([
1543
 
                (prefix + ('base',), 'base\n'),
1544
 
                (prefix + ('left',), 'left\n'),
1545
 
                (prefix + ('right',), 'right\n'),
1546
 
                (prefix + ('merged',), 'merged\n')
 
1591
                (prefix + (b'base',), b'base\n'),
 
1592
                (prefix + (b'left',), b'left\n'),
 
1593
                (prefix + (b'right',), b'right\n'),
 
1594
                (prefix + (b'merged',), b'merged\n')
1547
1595
                ],
1548
1596
                origins)
1549
1597
        else:
1550
1598
            # Without a graph everything is new.
1551
1599
            self.assertEqual([
1552
 
                (prefix + ('merged',), 'base\n'),
1553
 
                (prefix + ('merged',), 'left\n'),
1554
 
                (prefix + ('merged',), 'right\n'),
1555
 
                (prefix + ('merged',), 'merged\n')
 
1600
                (prefix + (b'merged',), b'base\n'),
 
1601
                (prefix + (b'merged',), b'left\n'),
 
1602
                (prefix + (b'merged',), b'right\n'),
 
1603
                (prefix + (b'merged',), b'merged\n')
1556
1604
                ],
1557
1605
                origins)
1558
1606
        self.assertRaises(RevisionNotPresent,
1559
 
            files.annotate, prefix + ('missing-key',))
 
1607
                          files.annotate, prefix + ('missing-key',))
1560
1608
 
1561
1609
    def test_check_no_parameters(self):
1562
1610
        files = self.get_versionedfiles()
1576
1624
        seen = set()
1577
1625
        # Texts output should be fulltexts.
1578
1626
        self.capture_stream(files, entries, seen.add,
1579
 
            files.get_parent_map(keys), require_fulltext=True)
 
1627
                            files.get_parent_map(keys), require_fulltext=True)
1580
1628
        # All texts should be output.
1581
1629
        self.assertEqual(set(keys), seen)
1582
1630
 
1589
1637
        files = self.get_versionedfiles()
1590
1638
 
1591
1639
    def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1592
 
        nokeys=False):
 
1640
                          nokeys=False):
1593
1641
        return get_diamond_files(files, self.key_length,
1594
 
            trailing_eol=trailing_eol, nograph=not self.graph,
1595
 
            left_only=left_only, nokeys=nokeys)
 
1642
                                 trailing_eol=trailing_eol, nograph=not self.graph,
 
1643
                                 left_only=left_only, nokeys=nokeys)
1596
1644
 
1597
1645
    def _add_content_nostoresha(self, add_lines):
1598
1646
        """When nostore_sha is supplied using old content raises."""
1599
1647
        vf = self.get_versionedfiles()
1600
 
        empty_text = ('a', [])
1601
 
        sample_text_nl = ('b', ["foo\n", "bar\n"])
1602
 
        sample_text_no_nl = ('c', ["foo\n", "bar"])
 
1648
        empty_text = (b'a', [])
 
1649
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
 
1650
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
1603
1651
        shas = []
1604
1652
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1605
1653
            if add_lines:
1606
1654
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1607
1655
                                         lines)
1608
1656
            else:
1609
 
                sha, _, _ = vf._add_text(self.get_simple_key(version), [],
1610
 
                                         ''.join(lines))
 
1657
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
 
1658
                                         lines)
1611
1659
            shas.append(sha)
1612
1660
        # we now have a copy of all the lines in the vf.
1613
1661
        for sha, (version, lines) in zip(
1614
 
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1615
 
            new_key = self.get_simple_key(version + "2")
1616
 
            self.assertRaises(errors.ExistingContent,
1617
 
                vf.add_lines, new_key, [], lines,
1618
 
                nostore_sha=sha)
1619
 
            self.assertRaises(errors.ExistingContent,
1620
 
                vf._add_text, new_key, [], ''.join(lines),
1621
 
                nostore_sha=sha)
 
1662
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
1663
            new_key = self.get_simple_key(version + b"2")
 
1664
            self.assertRaises(errors.ExistingContent,
 
1665
                              vf.add_lines, new_key, [], lines,
 
1666
                              nostore_sha=sha)
 
1667
            self.assertRaises(errors.ExistingContent,
 
1668
                              vf.add_lines, new_key, [], lines,
 
1669
                              nostore_sha=sha)
1622
1670
            # and no new version should have been added.
1623
 
            record = vf.get_record_stream([new_key], 'unordered', True).next()
 
1671
            record = next(vf.get_record_stream([new_key], 'unordered', True))
1624
1672
            self.assertEqual('absent', record.storage_kind)
1625
1673
 
1626
1674
    def test_add_lines_nostoresha(self):
1627
1675
        self._add_content_nostoresha(add_lines=True)
1628
1676
 
1629
 
    def test__add_text_nostoresha(self):
1630
 
        self._add_content_nostoresha(add_lines=False)
1631
 
 
1632
1677
    def test_add_lines_return(self):
1633
1678
        files = self.get_versionedfiles()
1634
1679
        # save code by using the stock data insertion helper.
1640
1685
            results.append(add[:2])
1641
1686
        if self.key_length == 1:
1642
1687
            self.assertEqual([
1643
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1644
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1645
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1646
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1647
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1688
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1689
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1690
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1691
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1692
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1648
1693
                results)
1649
1694
        elif self.key_length == 2:
1650
1695
            self.assertEqual([
1651
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1652
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1653
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1654
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1655
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1656
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1657
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1658
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1659
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1660
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1696
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1697
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1698
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1699
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1700
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1701
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1702
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1703
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1704
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1705
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1661
1706
                results)
1662
1707
 
1663
1708
    def test_add_lines_no_key_generates_chk_key(self):
1671
1716
            results.append(add[:2])
1672
1717
        if self.key_length == 1:
1673
1718
            self.assertEqual([
1674
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1675
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1676
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1677
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1678
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1719
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1720
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1721
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1722
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1723
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1679
1724
                results)
1680
1725
            # Check the added items got CHK keys.
1681
 
            self.assertEqual(set([
1682
 
                ('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1683
 
                ('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1684
 
                ('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1685
 
                ('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1686
 
                ('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1687
 
                ]),
 
1726
            self.assertEqual({
 
1727
                (b'sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
 
1728
                (b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
 
1729
                (b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
 
1730
                (b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
 
1731
                (b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
 
1732
                },
1688
1733
                files.keys())
1689
1734
        elif self.key_length == 2:
1690
1735
            self.assertEqual([
1691
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1692
 
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
1693
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1694
 
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1695
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1696
 
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1697
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1698
 
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1699
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1700
 
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1736
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1737
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1738
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1739
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1740
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1741
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1742
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1743
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1744
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1745
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1701
1746
                results)
1702
1747
            # Check the added items got CHK keys.
1703
 
            self.assertEqual(set([
1704
 
                ('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1705
 
                ('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1706
 
                ('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1707
 
                ('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1708
 
                ('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1709
 
                ('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1710
 
                ('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1711
 
                ('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1712
 
                ('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1713
 
                ('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1714
 
                ]),
 
1748
            self.assertEqual({
 
1749
                (b'FileA', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1750
                (b'FileA', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1751
                (b'FileA', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1752
                (b'FileA', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1753
                (b'FileA', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1754
                (b'FileB', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1755
                (b'FileB', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1756
                (b'FileB', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1757
                (b'FileB', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1758
                (b'FileB', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1759
                },
1715
1760
                files.keys())
1716
1761
 
1717
1762
    def test_empty_lines(self):
1718
1763
        """Empty files can be stored."""
1719
1764
        f = self.get_versionedfiles()
1720
 
        key_a = self.get_simple_key('a')
 
1765
        key_a = self.get_simple_key(b'a')
1721
1766
        f.add_lines(key_a, [], [])
1722
 
        self.assertEqual('',
1723
 
            f.get_record_stream([key_a], 'unordered', True
1724
 
                ).next().get_bytes_as('fulltext'))
1725
 
        key_b = self.get_simple_key('b')
 
1767
        self.assertEqual(b'',
 
1768
                         next(f.get_record_stream([key_a], 'unordered', True
 
1769
                                                  )).get_bytes_as('fulltext'))
 
1770
        key_b = self.get_simple_key(b'b')
1726
1771
        f.add_lines(key_b, self.get_parents([key_a]), [])
1727
 
        self.assertEqual('',
1728
 
            f.get_record_stream([key_b], 'unordered', True
1729
 
                ).next().get_bytes_as('fulltext'))
 
1772
        self.assertEqual(b'',
 
1773
                         next(f.get_record_stream([key_b], 'unordered', True
 
1774
                                                  )).get_bytes_as('fulltext'))
1730
1775
 
1731
1776
    def test_newline_only(self):
1732
1777
        f = self.get_versionedfiles()
1733
 
        key_a = self.get_simple_key('a')
1734
 
        f.add_lines(key_a, [], ['\n'])
1735
 
        self.assertEqual('\n',
1736
 
            f.get_record_stream([key_a], 'unordered', True
1737
 
                ).next().get_bytes_as('fulltext'))
1738
 
        key_b = self.get_simple_key('b')
1739
 
        f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
1740
 
        self.assertEqual('\n',
1741
 
            f.get_record_stream([key_b], 'unordered', True
1742
 
                ).next().get_bytes_as('fulltext'))
 
1778
        key_a = self.get_simple_key(b'a')
 
1779
        f.add_lines(key_a, [], [b'\n'])
 
1780
        self.assertEqual(b'\n',
 
1781
                         next(f.get_record_stream([key_a], 'unordered', True
 
1782
                                                  )).get_bytes_as('fulltext'))
 
1783
        key_b = self.get_simple_key(b'b')
 
1784
        f.add_lines(key_b, self.get_parents([key_a]), [b'\n'])
 
1785
        self.assertEqual(b'\n',
 
1786
                         next(f.get_record_stream([key_b], 'unordered', True
 
1787
                                                  )).get_bytes_as('fulltext'))
1743
1788
 
1744
1789
    def test_get_known_graph_ancestry(self):
1745
1790
        f = self.get_versionedfiles()
1746
1791
        if not self.graph:
1747
1792
            raise TestNotApplicable('ancestry info only relevant with graph.')
1748
 
        key_a = self.get_simple_key('a')
1749
 
        key_b = self.get_simple_key('b')
1750
 
        key_c = self.get_simple_key('c')
 
1793
        key_a = self.get_simple_key(b'a')
 
1794
        key_b = self.get_simple_key(b'b')
 
1795
        key_c = self.get_simple_key(b'c')
1751
1796
        # A
1752
1797
        # |\
1753
1798
        # | B
1754
1799
        # |/
1755
1800
        # C
1756
 
        f.add_lines(key_a, [], ['\n'])
1757
 
        f.add_lines(key_b, [key_a], ['\n'])
1758
 
        f.add_lines(key_c, [key_a, key_b], ['\n'])
 
1801
        f.add_lines(key_a, [], [b'\n'])
 
1802
        f.add_lines(key_b, [key_a], [b'\n'])
 
1803
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
1759
1804
        kg = f.get_known_graph_ancestry([key_c])
1760
1805
        self.assertIsInstance(kg, _mod_graph.KnownGraph)
1761
1806
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1767
1812
        if getattr(f, 'add_fallback_versioned_files', None) is None:
1768
1813
            raise TestNotApplicable("%s doesn't support fallbacks"
1769
1814
                                    % (f.__class__.__name__,))
1770
 
        key_a = self.get_simple_key('a')
1771
 
        key_b = self.get_simple_key('b')
1772
 
        key_c = self.get_simple_key('c')
 
1815
        key_a = self.get_simple_key(b'a')
 
1816
        key_b = self.get_simple_key(b'b')
 
1817
        key_c = self.get_simple_key(b'c')
1773
1818
        # A     only in fallback
1774
1819
        # |\
1775
1820
        # | B
1776
1821
        # |/
1777
1822
        # C
1778
1823
        g = self.get_versionedfiles('fallback')
1779
 
        g.add_lines(key_a, [], ['\n'])
 
1824
        g.add_lines(key_a, [], [b'\n'])
1780
1825
        f.add_fallback_versioned_files(g)
1781
 
        f.add_lines(key_b, [key_a], ['\n'])
1782
 
        f.add_lines(key_c, [key_a, key_b], ['\n'])
 
1826
        f.add_lines(key_b, [key_a], [b'\n'])
 
1827
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
1783
1828
        kg = f.get_known_graph_ancestry([key_c])
1784
1829
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1785
1830
 
1792
1837
    def assertValidStorageKind(self, storage_kind):
1793
1838
        """Assert that storage_kind is a valid storage_kind."""
1794
1839
        self.assertSubset([storage_kind],
1795
 
            ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1796
 
             'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1797
 
             'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1798
 
             'knit-delta-gz',
1799
 
             'knit-delta-closure', 'knit-delta-closure-ref',
1800
 
             'groupcompress-block', 'groupcompress-block-ref'])
 
1840
                          ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
 
1841
                           'knit-ft', 'knit-delta', 'chunked', 'fulltext',
 
1842
                           'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
 
1843
                           'knit-delta-gz',
 
1844
                           'knit-delta-closure', 'knit-delta-closure-ref',
 
1845
                           'groupcompress-block', 'groupcompress-block-ref'])
1801
1846
 
1802
1847
    def capture_stream(self, f, entries, on_seen, parents,
1803
 
        require_fulltext=False):
 
1848
                       require_fulltext=False):
1804
1849
        """Capture a stream for testing."""
1805
1850
        for factory in entries:
1806
1851
            on_seen(factory.key)
1807
1852
            self.assertValidStorageKind(factory.storage_kind)
1808
1853
            if factory.sha1 is not None:
1809
1854
                self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1810
 
                    factory.sha1)
 
1855
                                 factory.sha1)
1811
1856
            self.assertEqual(parents[factory.key], factory.parents)
1812
1857
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1813
 
                str)
 
1858
                                  bytes)
1814
1859
            if require_fulltext:
1815
1860
                factory.get_bytes_as('fulltext')
1816
1861
 
1828
1873
    def get_keys_and_sort_order(self):
1829
1874
        """Get diamond test keys list, and their sort ordering."""
1830
1875
        if self.key_length == 1:
1831
 
            keys = [('merged',), ('left',), ('right',), ('base',)]
1832
 
            sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
 
1876
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
 
1877
            sort_order = {(b'merged',): 2, (b'left',): 1,
 
1878
                          (b'right',): 1, (b'base',): 0}
1833
1879
        else:
1834
1880
            keys = [
1835
 
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1836
 
                ('FileA', 'base'),
1837
 
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1838
 
                ('FileB', 'base'),
 
1881
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
 
1882
                (b'FileA', b'base'),
 
1883
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
 
1884
                (b'FileB', b'base'),
1839
1885
                ]
1840
1886
            sort_order = {
1841
 
                ('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
1842
 
                ('FileA', 'base'):0,
1843
 
                ('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
1844
 
                ('FileB', 'base'):0,
 
1887
                (b'FileA', b'merged'): 2, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
 
1888
                (b'FileA', b'base'): 0,
 
1889
                (b'FileB', b'merged'): 2, (b'FileB', b'left'): 1, (b'FileB', b'right'): 1,
 
1890
                (b'FileB', b'base'): 0,
1845
1891
                }
1846
1892
        return keys, sort_order
1847
1893
 
1848
1894
    def get_keys_and_groupcompress_sort_order(self):
1849
1895
        """Get diamond test keys list, and their groupcompress sort ordering."""
1850
1896
        if self.key_length == 1:
1851
 
            keys = [('merged',), ('left',), ('right',), ('base',)]
1852
 
            sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
 
1897
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
 
1898
            sort_order = {(b'merged',): 0, (b'left',): 1,
 
1899
                          (b'right',): 1, (b'base',): 2}
1853
1900
        else:
1854
1901
            keys = [
1855
 
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1856
 
                ('FileA', 'base'),
1857
 
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1858
 
                ('FileB', 'base'),
 
1902
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
 
1903
                (b'FileA', b'base'),
 
1904
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
 
1905
                (b'FileB', b'base'),
1859
1906
                ]
1860
1907
            sort_order = {
1861
 
                ('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
1862
 
                ('FileA', 'base'):2,
1863
 
                ('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
1864
 
                ('FileB', 'base'):5,
 
1908
                (b'FileA', b'merged'): 0, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
 
1909
                (b'FileA', b'base'): 2,
 
1910
                (b'FileB', b'merged'): 3, (b'FileB', b'left'): 4, (b'FileB', b'right'): 4,
 
1911
                (b'FileB', b'base'): 5,
1865
1912
                }
1866
1913
        return keys, sort_order
1867
1914
 
1888
1935
            seen.append(factory.key)
1889
1936
            self.assertValidStorageKind(factory.storage_kind)
1890
1937
            self.assertSubset([factory.sha1],
1891
 
                [None, files.get_sha1s([factory.key])[factory.key]])
 
1938
                              [None, files.get_sha1s([factory.key])[factory.key]])
1892
1939
            self.assertEqual(parent_map[factory.key], factory.parents)
1893
1940
            # self.assertEqual(files.get_text(factory.key),
1894
1941
            ft_bytes = factory.get_bytes_as('fulltext')
1895
 
            self.assertIsInstance(ft_bytes, str)
 
1942
            self.assertIsInstance(ft_bytes, bytes)
1896
1943
            chunked_bytes = factory.get_bytes_as('chunked')
1897
 
            self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
 
1944
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
 
1945
            chunked_bytes = factory.iter_bytes_as('chunked')
 
1946
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
1898
1947
 
1899
1948
        self.assertStreamOrder(sort_order, seen, keys)
1900
1949
 
1912
1961
    def assertStreamOrder(self, sort_order, seen, keys):
1913
1962
        self.assertEqual(len(set(seen)), len(keys))
1914
1963
        if self.key_length == 1:
1915
 
            lows = {():0}
 
1964
            lows = {(): 0}
1916
1965
        else:
1917
 
            lows = {('FileA',):0, ('FileB',):0}
 
1966
            lows = {(b'FileA',): 0, (b'FileB',): 0}
1918
1967
        if not self.graph:
1919
1968
            self.assertEqual(set(keys), set(seen))
1920
1969
        else:
1921
1970
            for key in seen:
1922
1971
                sort_pos = sort_order[key]
1923
1972
                self.assertTrue(sort_pos >= lows[key[:-1]],
1924
 
                    "Out of order in sorted stream: %r, %r" % (key, seen))
 
1973
                                "Out of order in sorted stream: %r, %r" % (key, seen))
1925
1974
                lows[key[:-1]] = sort_pos
1926
1975
 
1927
1976
    def test_get_record_stream_unknown_storage_kind_raises(self):
1929
1978
        files = self.get_versionedfiles()
1930
1979
        self.get_diamond_files(files)
1931
1980
        if self.key_length == 1:
1932
 
            keys = [('merged',), ('left',), ('right',), ('base',)]
 
1981
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1933
1982
        else:
1934
1983
            keys = [
1935
 
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1936
 
                ('FileA', 'base'),
1937
 
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1938
 
                ('FileB', 'base'),
 
1984
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
 
1985
                (b'FileA', b'base'),
 
1986
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
 
1987
                (b'FileB', b'base'),
1939
1988
                ]
1940
1989
        parent_map = files.get_parent_map(keys)
1941
1990
        entries = files.get_record_stream(keys, 'unordered', False)
1951
2000
            self.assertEqual(parent_map[factory.key], factory.parents)
1952
2001
            # currently no stream emits mpdiff
1953
2002
            self.assertRaises(errors.UnavailableRepresentation,
1954
 
                factory.get_bytes_as, 'mpdiff')
 
2003
                              factory.get_bytes_as, 'mpdiff')
1955
2004
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1956
 
                str)
 
2005
                                  bytes)
1957
2006
        self.assertEqual(set(keys), seen)
1958
2007
 
1959
2008
    def test_get_record_stream_missing_records_are_absent(self):
1960
2009
        files = self.get_versionedfiles()
1961
2010
        self.get_diamond_files(files)
1962
2011
        if self.key_length == 1:
1963
 
            keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
 
2012
            keys = [(b'merged',), (b'left',), (b'right',),
 
2013
                    (b'absent',), (b'base',)]
1964
2014
        else:
1965
2015
            keys = [
1966
 
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
1967
 
                ('FileA', 'absent'), ('FileA', 'base'),
1968
 
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
1969
 
                ('FileB', 'absent'), ('FileB', 'base'),
1970
 
                ('absent', 'absent'),
 
2016
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
 
2017
                (b'FileA', b'absent'), (b'FileA', b'base'),
 
2018
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
 
2019
                (b'FileB', b'absent'), (b'FileB', b'base'),
 
2020
                (b'absent', b'absent'),
1971
2021
                ]
1972
2022
        parent_map = files.get_parent_map(keys)
1973
2023
        entries = files.get_record_stream(keys, 'unordered', False)
1978
2028
    def assertRecordHasContent(self, record, bytes):
1979
2029
        """Assert that record has the bytes bytes."""
1980
2030
        self.assertEqual(bytes, record.get_bytes_as('fulltext'))
1981
 
        self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
 
2031
        self.assertEqual(bytes, b''.join(record.get_bytes_as('chunked')))
1982
2032
 
1983
2033
    def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
1984
2034
        files = self.get_versionedfiles()
1985
 
        key = self.get_simple_key('foo')
1986
 
        files.add_lines(key, (), ['my text\n', 'content'])
 
2035
        key = self.get_simple_key(b'foo')
 
2036
        files.add_lines(key, (), [b'my text\n', b'content'])
1987
2037
        stream = files.get_record_stream([key], 'unordered', False)
1988
 
        record = stream.next()
 
2038
        record = next(stream)
1989
2039
        if record.storage_kind in ('chunked', 'fulltext'):
1990
2040
            # chunked and fulltext representations are for direct use not wire
1991
2041
            # serialisation: check they are able to be used directly. To send
1992
2042
            # such records over the wire translation will be needed.
1993
 
            self.assertRecordHasContent(record, "my text\ncontent")
 
2043
            self.assertRecordHasContent(record, b"my text\ncontent")
1994
2044
        else:
1995
2045
            bytes = [record.get_bytes_as(record.storage_kind)]
1996
2046
            network_stream = versionedfile.NetworkRecordStream(bytes).read()
1999
2049
            for record in network_stream:
2000
2050
                records.append(record)
2001
2051
                self.assertEqual(source_record.storage_kind,
2002
 
                    record.storage_kind)
 
2052
                                 record.storage_kind)
2003
2053
                self.assertEqual(source_record.parents, record.parents)
2004
2054
                self.assertEqual(
2005
2055
                    source_record.get_bytes_as(source_record.storage_kind),
2012
2062
        :param records: A list to collect the seen records.
2013
2063
        :return: A generator of the records in stream.
2014
2064
        """
2015
 
        # We make assertions during copying to catch things early for
2016
 
        # easier debugging.
2017
 
        for record, ref_record in izip(stream, expected):
 
2065
        # We make assertions during copying to catch things early for easier
 
2066
        # debugging. This must use the iterating zip() from the future.
 
2067
        for record, ref_record in zip(stream, expected):
2018
2068
            records.append(record)
2019
2069
            self.assertEqual(ref_record.key, record.key)
2020
2070
            self.assertEqual(ref_record.storage_kind, record.storage_kind)
2022
2072
            yield record
2023
2073
 
2024
2074
    def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2025
 
        stream):
 
2075
                                        stream):
2026
2076
        """Convert a stream to a bytes iterator.
2027
2077
 
2028
2078
        :param skipped_records: A list with one element to increment when a
2043
2093
    def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2044
2094
        files = self.get_versionedfiles()
2045
2095
        target_files = self.get_versionedfiles('target')
2046
 
        key = self.get_simple_key('ft')
2047
 
        key_delta = self.get_simple_key('delta')
2048
 
        files.add_lines(key, (), ['my text\n', 'content'])
 
2096
        key = self.get_simple_key(b'ft')
 
2097
        key_delta = self.get_simple_key(b'delta')
 
2098
        files.add_lines(key, (), [b'my text\n', b'content'])
2049
2099
        if self.graph:
2050
2100
            delta_parents = (key,)
2051
2101
        else:
2052
2102
            delta_parents = ()
2053
 
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
 
2103
        files.add_lines(key_delta, delta_parents, [
 
2104
                        b'different\n', b'content\n'])
2054
2105
        local = files.get_record_stream([key, key_delta], 'unordered', False)
2055
2106
        ref = files.get_record_stream([key, key_delta], 'unordered', False)
2056
2107
        skipped_records = [0]
2057
2108
        full_texts = {
2058
 
            key: "my text\ncontent",
2059
 
            key_delta: "different\ncontent\n",
 
2109
            key: b"my text\ncontent",
 
2110
            key_delta: b"different\ncontent\n",
2060
2111
            }
2061
2112
        byte_stream = self.stream_to_bytes_or_skip_counter(
2062
2113
            skipped_records, full_texts, local)
2077
2128
        # copy a delta over the wire
2078
2129
        files = self.get_versionedfiles()
2079
2130
        target_files = self.get_versionedfiles('target')
2080
 
        key = self.get_simple_key('ft')
2081
 
        key_delta = self.get_simple_key('delta')
2082
 
        files.add_lines(key, (), ['my text\n', 'content'])
 
2131
        key = self.get_simple_key(b'ft')
 
2132
        key_delta = self.get_simple_key(b'delta')
 
2133
        files.add_lines(key, (), [b'my text\n', b'content'])
2083
2134
        if self.graph:
2084
2135
            delta_parents = (key,)
2085
2136
        else:
2086
2137
            delta_parents = ()
2087
 
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
 
2138
        files.add_lines(key_delta, delta_parents, [
 
2139
                        b'different\n', b'content\n'])
2088
2140
        # Copy the basis text across so we can reconstruct the delta during
2089
2141
        # insertion into target.
2090
2142
        target_files.insert_record_stream(files.get_record_stream([key],
2091
 
            'unordered', False))
 
2143
                                                                  'unordered', False))
2092
2144
        local = files.get_record_stream([key_delta], 'unordered', False)
2093
2145
        ref = files.get_record_stream([key_delta], 'unordered', False)
2094
2146
        skipped_records = [0]
2095
2147
        full_texts = {
2096
 
            key_delta: "different\ncontent\n",
 
2148
            key_delta: b"different\ncontent\n",
2097
2149
            }
2098
2150
        byte_stream = self.stream_to_bytes_or_skip_counter(
2099
2151
            skipped_records, full_texts, local)
2113
2165
    def test_get_record_stream_wire_ready_delta_closure_included(self):
2114
2166
        # copy a delta over the wire with the ability to get its full text.
2115
2167
        files = self.get_versionedfiles()
2116
 
        key = self.get_simple_key('ft')
2117
 
        key_delta = self.get_simple_key('delta')
2118
 
        files.add_lines(key, (), ['my text\n', 'content'])
 
2168
        key = self.get_simple_key(b'ft')
 
2169
        key_delta = self.get_simple_key(b'delta')
 
2170
        files.add_lines(key, (), [b'my text\n', b'content'])
2119
2171
        if self.graph:
2120
2172
            delta_parents = (key,)
2121
2173
        else:
2122
2174
            delta_parents = ()
2123
 
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
 
2175
        files.add_lines(key_delta, delta_parents, [
 
2176
                        b'different\n', b'content\n'])
2124
2177
        local = files.get_record_stream([key_delta], 'unordered', True)
2125
2178
        ref = files.get_record_stream([key_delta], 'unordered', True)
2126
2179
        skipped_records = [0]
2127
2180
        full_texts = {
2128
 
            key_delta: "different\ncontent\n",
 
2181
            key_delta: b"different\ncontent\n",
2129
2182
            }
2130
2183
        byte_stream = self.stream_to_bytes_or_skip_counter(
2131
2184
            skipped_records, full_texts, local)
2145
2198
        seen = set()
2146
2199
        for factory in entries:
2147
2200
            seen.add(factory.key)
2148
 
            if factory.key[-1] == 'absent':
 
2201
            if factory.key[-1] == b'absent':
2149
2202
                self.assertEqual('absent', factory.storage_kind)
2150
2203
                self.assertEqual(None, factory.sha1)
2151
2204
                self.assertEqual(None, factory.parents)
2156
2209
                    self.assertEqual(sha1, factory.sha1)
2157
2210
                self.assertEqual(parents[factory.key], factory.parents)
2158
2211
                self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2159
 
                    str)
 
2212
                                      bytes)
2160
2213
        self.assertEqual(set(keys), seen)
2161
2214
 
2162
2215
    def test_filter_absent_records(self):
2170
2223
        # absent keys is still delivered).
2171
2224
        present_keys = list(keys)
2172
2225
        if self.key_length == 1:
2173
 
            keys.insert(2, ('extra',))
 
2226
            keys.insert(2, (b'extra',))
2174
2227
        else:
2175
 
            keys.insert(2, ('extra', 'extra'))
 
2228
            keys.insert(2, (b'extra', b'extra'))
2176
2229
        entries = files.get_record_stream(keys, 'unordered', False)
2177
2230
        seen = set()
2178
2231
        self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2179
 
            parent_map)
 
2232
                            parent_map)
2180
2233
        self.assertEqual(set(present_keys), seen)
2181
2234
 
2182
2235
    def get_mapper(self):
2196
2249
    def test_get_annotator(self):
2197
2250
        files = self.get_versionedfiles()
2198
2251
        self.get_diamond_files(files)
2199
 
        origin_key = self.get_simple_key('origin')
2200
 
        base_key = self.get_simple_key('base')
2201
 
        left_key = self.get_simple_key('left')
2202
 
        right_key = self.get_simple_key('right')
2203
 
        merged_key = self.get_simple_key('merged')
 
2252
        origin_key = self.get_simple_key(b'origin')
 
2253
        base_key = self.get_simple_key(b'base')
 
2254
        left_key = self.get_simple_key(b'left')
 
2255
        right_key = self.get_simple_key(b'right')
 
2256
        merged_key = self.get_simple_key(b'merged')
2204
2257
        # annotator = files.get_annotator()
2205
2258
        # introduced full text
2206
2259
        origins, lines = files.get_annotator().annotate(origin_key)
2207
2260
        self.assertEqual([(origin_key,)], origins)
2208
 
        self.assertEqual(['origin\n'], lines)
 
2261
        self.assertEqual([b'origin\n'], lines)
2209
2262
        # a delta
2210
2263
        origins, lines = files.get_annotator().annotate(base_key)
2211
2264
        self.assertEqual([(base_key,)], origins)
2227
2280
                (merged_key,),
2228
2281
                ], origins)
2229
2282
        self.assertRaises(RevisionNotPresent,
2230
 
            files.get_annotator().annotate, self.get_simple_key('missing-key'))
 
2283
                          files.get_annotator().annotate, self.get_simple_key(b'missing-key'))
2231
2284
 
2232
2285
    def test_get_parent_map(self):
2233
2286
        files = self.get_versionedfiles()
2234
2287
        if self.key_length == 1:
2235
2288
            parent_details = [
2236
 
                (('r0',), self.get_parents(())),
2237
 
                (('r1',), self.get_parents((('r0',),))),
2238
 
                (('r2',), self.get_parents(())),
2239
 
                (('r3',), self.get_parents(())),
2240
 
                (('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
 
2289
                ((b'r0',), self.get_parents(())),
 
2290
                ((b'r1',), self.get_parents(((b'r0',),))),
 
2291
                ((b'r2',), self.get_parents(())),
 
2292
                ((b'r3',), self.get_parents(())),
 
2293
                ((b'm',), self.get_parents(((b'r0',), (b'r1',), (b'r2',), (b'r3',)))),
2241
2294
                ]
2242
2295
        else:
2243
2296
            parent_details = [
2244
 
                (('FileA', 'r0'), self.get_parents(())),
2245
 
                (('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
2246
 
                (('FileA', 'r2'), self.get_parents(())),
2247
 
                (('FileA', 'r3'), self.get_parents(())),
2248
 
                (('FileA', 'm'), self.get_parents((('FileA', 'r0'),
2249
 
                    ('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
 
2297
                ((b'FileA', b'r0'), self.get_parents(())),
 
2298
                ((b'FileA', b'r1'), self.get_parents(((b'FileA', b'r0'),))),
 
2299
                ((b'FileA', b'r2'), self.get_parents(())),
 
2300
                ((b'FileA', b'r3'), self.get_parents(())),
 
2301
                ((b'FileA', b'm'), self.get_parents(((b'FileA', b'r0'),
 
2302
                                                     (b'FileA', b'r1'), (b'FileA', b'r2'), (b'FileA', b'r3')))),
2250
2303
                ]
2251
2304
        for key, parents in parent_details:
2252
2305
            files.add_lines(key, parents, [])
2253
2306
            # immediately after adding it should be queryable.
2254
 
            self.assertEqual({key:parents}, files.get_parent_map([key]))
 
2307
            self.assertEqual({key: parents}, files.get_parent_map([key]))
2255
2308
        # We can ask for an empty set
2256
2309
        self.assertEqual({}, files.get_parent_map([]))
2257
2310
        # We can ask for many keys
2258
2311
        all_parents = dict(parent_details)
2259
2312
        self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2260
2313
        # Absent keys are just not included in the result.
2261
 
        keys = all_parents.keys()
 
2314
        keys = list(all_parents.keys())
2262
2315
        if self.key_length == 1:
2263
 
            keys.insert(1, ('missing',))
 
2316
            keys.insert(1, (b'missing',))
2264
2317
        else:
2265
 
            keys.insert(1, ('missing', 'missing'))
 
2318
            keys.insert(1, (b'missing', b'missing'))
2266
2319
        # Absent keys are just ignored
2267
2320
        self.assertEqual(all_parents, files.get_parent_map(keys))
2268
2321
 
2270
2323
        files = self.get_versionedfiles()
2271
2324
        self.get_diamond_files(files)
2272
2325
        if self.key_length == 1:
2273
 
            keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
 
2326
            keys = [(b'base',), (b'origin',), (b'left',),
 
2327
                    (b'merged',), (b'right',)]
2274
2328
        else:
2275
2329
            # ask for shas from different prefixes.
2276
2330
            keys = [
2277
 
                ('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
2278
 
                ('FileA', 'merged'), ('FileB', 'right'),
 
2331
                (b'FileA', b'base'), (b'FileB', b'origin'), (b'FileA', b'left'),
 
2332
                (b'FileA', b'merged'), (b'FileB', b'right'),
2279
2333
                ]
2280
2334
        self.assertEqual({
2281
 
            keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2282
 
            keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
2283
 
            keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
2284
 
            keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
2285
 
            keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
 
2335
            keys[0]: b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
 
2336
            keys[1]: b'00e364d235126be43292ab09cb4686cf703ddc17',
 
2337
            keys[2]: b'a8478686da38e370e32e42e8a0c220e33ee9132f',
 
2338
            keys[3]: b'ed8bce375198ea62444dc71952b22cfc2b09226d',
 
2339
            keys[4]: b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2286
2340
            },
2287
2341
            files.get_sha1s(keys))
2288
2342
 
2296
2350
        self.assertEqual(set(actual.keys()), set(expected.keys()))
2297
2351
        actual_parents = actual.get_parent_map(actual.keys())
2298
2352
        if self.graph:
2299
 
            self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
 
2353
            self.assertEqual(
 
2354
                actual_parents, expected.get_parent_map(expected.keys()))
2300
2355
        else:
2301
2356
            for key, parents in actual_parents.items():
2302
2357
                self.assertEqual(None, parents)
2303
2358
        for key in actual.keys():
2304
 
            actual_text = actual.get_record_stream(
2305
 
                [key], 'unordered', True).next().get_bytes_as('fulltext')
2306
 
            expected_text = expected.get_record_stream(
2307
 
                [key], 'unordered', True).next().get_bytes_as('fulltext')
 
2359
            actual_text = next(actual.get_record_stream(
 
2360
                [key], 'unordered', True)).get_bytes_as('fulltext')
 
2361
            expected_text = next(expected.get_record_stream(
 
2362
                [key], 'unordered', True)).get_bytes_as('fulltext')
2308
2363
            self.assertEqual(actual_text, expected_text)
2309
2364
 
2310
2365
    def test_insert_record_stream_fulltexts(self):
2318
2373
            source_transport)
2319
2374
        self.get_diamond_files(source, trailing_eol=False)
2320
2375
        stream = source.get_record_stream(source.keys(), 'topological',
2321
 
            False)
 
2376
                                          False)
2322
2377
        files.insert_record_stream(stream)
2323
2378
        self.assertIdenticalVersionedFile(source, files)
2324
2379
 
2333
2388
            source_transport)
2334
2389
        self.get_diamond_files(source, trailing_eol=False)
2335
2390
        stream = source.get_record_stream(source.keys(), 'topological',
2336
 
            False)
 
2391
                                          False)
2337
2392
        files.insert_record_stream(stream)
2338
2393
        self.assertIdenticalVersionedFile(source, files)
2339
2394
 
2346
2401
        source = make_file_factory(True, mapper)(source_transport)
2347
2402
        self.get_diamond_files(source)
2348
2403
        stream = source.get_record_stream(source.keys(), 'topological',
2349
 
            False)
 
2404
                                          False)
2350
2405
        files.insert_record_stream(stream)
2351
2406
        self.assertIdenticalVersionedFile(source, files)
2352
2407
 
2359
2414
        source = make_file_factory(True, mapper)(source_transport)
2360
2415
        self.get_diamond_files(source, trailing_eol=False)
2361
2416
        stream = source.get_record_stream(source.keys(), 'topological',
2362
 
            False)
 
2417
                                          False)
2363
2418
        files.insert_record_stream(stream)
2364
2419
        self.assertIdenticalVersionedFile(source, files)
2365
2420
 
2372
2427
        source = make_file_factory(False, mapper)(source_transport)
2373
2428
        self.get_diamond_files(source)
2374
2429
        stream = source.get_record_stream(source.keys(), 'topological',
2375
 
            False)
 
2430
                                          False)
2376
2431
        files.insert_record_stream(stream)
2377
2432
        self.assertIdenticalVersionedFile(source, files)
2378
2433
 
2385
2440
        source = make_file_factory(False, mapper)(source_transport)
2386
2441
        self.get_diamond_files(source, trailing_eol=False)
2387
2442
        stream = source.get_record_stream(source.keys(), 'topological',
2388
 
            False)
 
2443
                                          False)
2389
2444
        files.insert_record_stream(stream)
2390
2445
        self.assertIdenticalVersionedFile(source, files)
2391
2446
 
2397
2452
        # insert some keys into f.
2398
2453
        self.get_diamond_files(files, left_only=True)
2399
2454
        stream = source.get_record_stream(source.keys(), 'topological',
2400
 
            False)
 
2455
                                          False)
2401
2456
        files.insert_record_stream(stream)
2402
2457
        self.assertIdenticalVersionedFile(source, files)
2403
2458
 
2405
2460
        """Inserting a stream with absent keys should raise an error."""
2406
2461
        files = self.get_versionedfiles()
2407
2462
        source = self.get_versionedfiles('source')
2408
 
        stream = source.get_record_stream([('missing',) * self.key_length],
2409
 
            'topological', False)
 
2463
        stream = source.get_record_stream([(b'missing',) * self.key_length],
 
2464
                                          'topological', False)
2410
2465
        self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2411
 
            stream)
 
2466
                          stream)
2412
2467
 
2413
2468
    def test_insert_record_stream_out_of_order(self):
2414
2469
        """An out of order stream can either error or work."""
2416
2471
        source = self.get_versionedfiles('source')
2417
2472
        self.get_diamond_files(source)
2418
2473
        if self.key_length == 1:
2419
 
            origin_keys = [('origin',)]
2420
 
            end_keys = [('merged',), ('left',)]
2421
 
            start_keys = [('right',), ('base',)]
 
2474
            origin_keys = [(b'origin',)]
 
2475
            end_keys = [(b'merged',), (b'left',)]
 
2476
            start_keys = [(b'right',), (b'base',)]
2422
2477
        else:
2423
 
            origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
2424
 
            end_keys = [('FileA', 'merged',), ('FileA', 'left',),
2425
 
                ('FileB', 'merged',), ('FileB', 'left',)]
2426
 
            start_keys = [('FileA', 'right',), ('FileA', 'base',),
2427
 
                ('FileB', 'right',), ('FileB', 'base',)]
2428
 
        origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
 
2478
            origin_keys = [(b'FileA', b'origin'), (b'FileB', b'origin')]
 
2479
            end_keys = [(b'FileA', b'merged',), (b'FileA', b'left',),
 
2480
                        (b'FileB', b'merged',), (b'FileB', b'left',)]
 
2481
            start_keys = [(b'FileA', b'right',), (b'FileA', b'base',),
 
2482
                          (b'FileB', b'right',), (b'FileB', b'base',)]
 
2483
        origin_entries = source.get_record_stream(
 
2484
            origin_keys, 'unordered', False)
2429
2485
        end_entries = source.get_record_stream(end_keys, 'topological', False)
2430
 
        start_entries = source.get_record_stream(start_keys, 'topological', False)
2431
 
        entries = chain(origin_entries, end_entries, start_entries)
 
2486
        start_entries = source.get_record_stream(
 
2487
            start_keys, 'topological', False)
 
2488
        entries = itertools.chain(origin_entries, end_entries, start_entries)
2432
2489
        try:
2433
2490
            files.insert_record_stream(entries)
2434
2491
        except RevisionNotPresent:
2446
2503
        source = self.get_versionedfiles('source')
2447
2504
        parents = ()
2448
2505
        keys = []
2449
 
        content = [('same same %d\n' % n) for n in range(500)]
2450
 
        for letter in 'abcdefghijklmnopqrstuvwxyz':
2451
 
            key = ('key-' + letter,)
 
2506
        content = [(b'same same %d\n' % n) for n in range(500)]
 
2507
        letters = b'abcdefghijklmnopqrstuvwxyz'
 
2508
        for i in range(len(letters)):
 
2509
            letter = letters[i:i + 1]
 
2510
            key = (b'key-' + letter,)
2452
2511
            if self.key_length == 2:
2453
 
                key = ('prefix',) + key
2454
 
            content.append('content for ' + letter + '\n')
 
2512
                key = (b'prefix',) + key
 
2513
            content.append(b'content for ' + letter + b'\n')
2455
2514
            source.add_lines(key, parents, content)
2456
2515
            keys.append(key)
2457
2516
            parents = (key,)
2460
2519
        streams = []
2461
2520
        for key in reversed(keys):
2462
2521
            streams.append(source.get_record_stream([key], 'unordered', False))
2463
 
        deltas = chain(*streams[:-1])
 
2522
        deltas = itertools.chain.from_iterable(streams[:-1])
2464
2523
        files = self.get_versionedfiles()
2465
2524
        try:
2466
2525
            files.insert_record_stream(deltas)
2483
2542
        source_transport.mkdir('.')
2484
2543
        source = make_file_factory(False, mapper)(source_transport)
2485
2544
        get_diamond_files(source, self.key_length, trailing_eol=True,
2486
 
            nograph=False, left_only=False)
 
2545
                          nograph=False, left_only=False)
2487
2546
        return source
2488
2547
 
2489
2548
    def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2492
2551
        not added.
2493
2552
        """
2494
2553
        source = self.get_knit_delta_source()
2495
 
        keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
 
2554
        keys = [self.get_simple_key(b'origin'), self.get_simple_key(b'merged')]
2496
2555
        entries = source.get_record_stream(keys, 'unordered', False)
2497
2556
        files = self.get_versionedfiles()
2498
2557
        if self.support_partial_insertion:
2499
2558
            self.assertEqual([],
2500
 
                list(files.get_missing_compression_parent_keys()))
 
2559
                             list(files.get_missing_compression_parent_keys()))
2501
2560
            files.insert_record_stream(entries)
2502
2561
            missing_bases = files.get_missing_compression_parent_keys()
2503
 
            self.assertEqual(set([self.get_simple_key('left')]),
2504
 
                set(missing_bases))
 
2562
            self.assertEqual({self.get_simple_key(b'left')},
 
2563
                             set(missing_bases))
2505
2564
            self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2506
2565
        else:
2507
2566
            self.assertRaises(
2519
2578
            raise TestNotApplicable(
2520
2579
                'versioned file scenario does not support partial insertion')
2521
2580
        source = self.get_knit_delta_source()
2522
 
        entries = source.get_record_stream([self.get_simple_key('origin'),
2523
 
            self.get_simple_key('merged')], 'unordered', False)
 
2581
        entries = source.get_record_stream([self.get_simple_key(b'origin'),
 
2582
                                            self.get_simple_key(b'merged')], 'unordered', False)
2524
2583
        files = self.get_versionedfiles()
2525
2584
        files.insert_record_stream(entries)
2526
2585
        missing_bases = files.get_missing_compression_parent_keys()
2527
 
        self.assertEqual(set([self.get_simple_key('left')]),
2528
 
            set(missing_bases))
 
2586
        self.assertEqual({self.get_simple_key(b'left')},
 
2587
                         set(missing_bases))
2529
2588
        # 'merged' is inserted (although a commit of a write group involving
2530
2589
        # this versionedfiles would fail).
2531
 
        merged_key = self.get_simple_key('merged')
 
2590
        merged_key = self.get_simple_key(b'merged')
2532
2591
        self.assertEqual(
2533
 
            [merged_key], files.get_parent_map([merged_key]).keys())
 
2592
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
2534
2593
        # Add the full delta closure of the missing records
2535
2594
        missing_entries = source.get_record_stream(
2536
2595
            missing_bases, 'unordered', True)
2538
2597
        # Now 'merged' is fully inserted (and a commit would succeed).
2539
2598
        self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2540
2599
        self.assertEqual(
2541
 
            [merged_key], files.get_parent_map([merged_key]).keys())
 
2600
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
2542
2601
        files.check()
2543
2602
 
2544
2603
    def test_iter_lines_added_or_present_in_keys(self):
2558
2617
 
2559
2618
        files = self.get_versionedfiles()
2560
2619
        # add a base to get included
2561
 
        files.add_lines(self.get_simple_key('base'), (), ['base\n'])
 
2620
        files.add_lines(self.get_simple_key(b'base'), (), [b'base\n'])
2562
2621
        # add a ancestor to be included on one side
2563
 
        files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
 
2622
        files.add_lines(self.get_simple_key(
 
2623
            b'lancestor'), (), [b'lancestor\n'])
2564
2624
        # add a ancestor to be included on the other side
2565
 
        files.add_lines(self.get_simple_key('rancestor'),
2566
 
            self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
 
2625
        files.add_lines(self.get_simple_key(b'rancestor'),
 
2626
                        self.get_parents([self.get_simple_key(b'base')]), [b'rancestor\n'])
2567
2627
        # add a child of rancestor with no eofile-nl
2568
 
        files.add_lines(self.get_simple_key('child'),
2569
 
            self.get_parents([self.get_simple_key('rancestor')]),
2570
 
            ['base\n', 'child\n'])
 
2628
        files.add_lines(self.get_simple_key(b'child'),
 
2629
                        self.get_parents([self.get_simple_key(b'rancestor')]),
 
2630
                        [b'base\n', b'child\n'])
2571
2631
        # add a child of lancestor and base to join the two roots
2572
 
        files.add_lines(self.get_simple_key('otherchild'),
2573
 
            self.get_parents([self.get_simple_key('lancestor'),
2574
 
                self.get_simple_key('base')]),
2575
 
            ['base\n', 'lancestor\n', 'otherchild\n'])
 
2632
        files.add_lines(self.get_simple_key(b'otherchild'),
 
2633
                        self.get_parents([self.get_simple_key(b'lancestor'),
 
2634
                                          self.get_simple_key(b'base')]),
 
2635
                        [b'base\n', b'lancestor\n', b'otherchild\n'])
 
2636
 
2576
2637
        def iter_with_keys(keys, expected):
2577
2638
            # now we need to see what lines are returned, and how often.
2578
2639
            lines = {}
2579
2640
            progress = InstrumentedProgress()
2580
2641
            # iterate over the lines
2581
2642
            for line in files.iter_lines_added_or_present_in_keys(keys,
2582
 
                pb=progress):
 
2643
                                                                  pb=progress):
2583
2644
                lines.setdefault(line, 0)
2584
2645
                lines[line] += 1
2585
 
            if []!= progress.updates:
 
2646
            if [] != progress.updates:
2586
2647
                self.assertEqual(expected, progress.updates)
2587
2648
            return lines
2588
2649
        lines = iter_with_keys(
2589
 
            [self.get_simple_key('child'), self.get_simple_key('otherchild')],
 
2650
            [self.get_simple_key(b'child'),
 
2651
             self.get_simple_key(b'otherchild')],
2590
2652
            [('Walking content', 0, 2),
2591
2653
             ('Walking content', 1, 2),
2592
2654
             ('Walking content', 2, 2)])
2593
2655
        # we must see child and otherchild
2594
 
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
 
2656
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
2595
2657
        self.assertTrue(
2596
 
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
 
2658
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
2597
2659
        # we dont care if we got more than that.
2598
2660
 
2599
2661
        # test all lines
2600
2662
        lines = iter_with_keys(files.keys(),
2601
 
            [('Walking content', 0, 5),
2602
 
             ('Walking content', 1, 5),
2603
 
             ('Walking content', 2, 5),
2604
 
             ('Walking content', 3, 5),
2605
 
             ('Walking content', 4, 5),
2606
 
             ('Walking content', 5, 5)])
 
2663
                               [('Walking content', 0, 5),
 
2664
                                ('Walking content', 1, 5),
 
2665
                                ('Walking content', 2, 5),
 
2666
                                ('Walking content', 3, 5),
 
2667
                                ('Walking content', 4, 5),
 
2668
                                ('Walking content', 5, 5)])
2607
2669
        # all lines must be seen at least once
2608
 
        self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
2609
 
        self.assertTrue(
2610
 
            lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
2611
 
        self.assertTrue(
2612
 
            lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
2613
 
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2614
 
        self.assertTrue(
2615
 
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
 
2670
        self.assertTrue(lines[(b'base\n', self.get_simple_key(b'base'))] > 0)
 
2671
        self.assertTrue(
 
2672
            lines[(b'lancestor\n', self.get_simple_key(b'lancestor'))] > 0)
 
2673
        self.assertTrue(
 
2674
            lines[(b'rancestor\n', self.get_simple_key(b'rancestor'))] > 0)
 
2675
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
 
2676
        self.assertTrue(
 
2677
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
2616
2678
 
2617
2679
    def test_make_mpdiffs(self):
2618
 
        from bzrlib import multiparent
 
2680
        from breezy import multiparent
2619
2681
        files = self.get_versionedfiles('source')
2620
2682
        # add texts that should trip the knit maximum delta chain threshold
2621
2683
        # as well as doing parallel chains of data in knits.
2622
2684
        # this is done by two chains of 25 insertions
2623
 
        files.add_lines(self.get_simple_key('base'), [], ['line\n'])
2624
 
        files.add_lines(self.get_simple_key('noeol'),
2625
 
            self.get_parents([self.get_simple_key('base')]), ['line'])
 
2685
        files.add_lines(self.get_simple_key(b'base'), [], [b'line\n'])
 
2686
        files.add_lines(self.get_simple_key(b'noeol'),
 
2687
                        self.get_parents([self.get_simple_key(b'base')]), [b'line'])
2626
2688
        # detailed eol tests:
2627
2689
        # shared last line with parent no-eol
2628
 
        files.add_lines(self.get_simple_key('noeolsecond'),
2629
 
            self.get_parents([self.get_simple_key('noeol')]),
2630
 
                ['line\n', 'line'])
 
2690
        files.add_lines(self.get_simple_key(b'noeolsecond'),
 
2691
                        self.get_parents([self.get_simple_key(b'noeol')]),
 
2692
                        [b'line\n', b'line'])
2631
2693
        # differing last line with parent, both no-eol
2632
 
        files.add_lines(self.get_simple_key('noeolnotshared'),
2633
 
            self.get_parents([self.get_simple_key('noeolsecond')]),
2634
 
                ['line\n', 'phone'])
 
2694
        files.add_lines(self.get_simple_key(b'noeolnotshared'),
 
2695
                        self.get_parents(
 
2696
                            [self.get_simple_key(b'noeolsecond')]),
 
2697
                        [b'line\n', b'phone'])
2635
2698
        # add eol following a noneol parent, change content
2636
 
        files.add_lines(self.get_simple_key('eol'),
2637
 
            self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
 
2699
        files.add_lines(self.get_simple_key(b'eol'),
 
2700
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'phone\n'])
2638
2701
        # add eol following a noneol parent, no change content
2639
 
        files.add_lines(self.get_simple_key('eolline'),
2640
 
            self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
 
2702
        files.add_lines(self.get_simple_key(b'eolline'),
 
2703
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line\n'])
2641
2704
        # noeol with no parents:
2642
 
        files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
 
2705
        files.add_lines(self.get_simple_key(b'noeolbase'), [], [b'line'])
2643
2706
        # noeol preceeding its leftmost parent in the output:
2644
2707
        # this is done by making it a merge of two parents with no common
2645
2708
        # anestry: noeolbase and noeol with the
2646
2709
        # later-inserted parent the leftmost.
2647
 
        files.add_lines(self.get_simple_key('eolbeforefirstparent'),
2648
 
            self.get_parents([self.get_simple_key('noeolbase'),
2649
 
                self.get_simple_key('noeol')]),
2650
 
            ['line'])
 
2710
        files.add_lines(self.get_simple_key(b'eolbeforefirstparent'),
 
2711
                        self.get_parents([self.get_simple_key(b'noeolbase'),
 
2712
                                          self.get_simple_key(b'noeol')]),
 
2713
                        [b'line'])
2651
2714
        # two identical eol texts
2652
 
        files.add_lines(self.get_simple_key('noeoldup'),
2653
 
            self.get_parents([self.get_simple_key('noeol')]), ['line'])
2654
 
        next_parent = self.get_simple_key('base')
2655
 
        text_name = 'chain1-'
2656
 
        text = ['line\n']
2657
 
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2658
 
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2659
 
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2660
 
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2661
 
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
2662
 
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
2663
 
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2664
 
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2665
 
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
2666
 
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2667
 
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2668
 
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2669
 
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
2670
 
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2671
 
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
2672
 
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2673
 
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
2674
 
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2675
 
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
2676
 
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
2677
 
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2678
 
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2679
 
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2680
 
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2681
 
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2682
 
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
2715
        files.add_lines(self.get_simple_key(b'noeoldup'),
 
2716
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line'])
 
2717
        next_parent = self.get_simple_key(b'base')
 
2718
        text_name = b'chain1-'
 
2719
        text = [b'line\n']
 
2720
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
2721
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
2722
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
2723
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
2724
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
2725
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
2726
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
2727
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
2728
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
 
2729
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
2730
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
2731
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
2732
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
2733
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
2734
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
 
2735
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
2736
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
2737
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
2738
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
2739
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
 
2740
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
2741
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
2742
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
2743
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
2744
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
2745
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2683
2746
                 }
2684
2747
        for depth in range(26):
2685
 
            new_version = self.get_simple_key(text_name + '%s' % depth)
2686
 
            text = text + ['line\n']
 
2748
            new_version = self.get_simple_key(text_name + b'%d' % depth)
 
2749
            text = text + [b'line\n']
2687
2750
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2688
2751
            next_parent = new_version
2689
 
        next_parent = self.get_simple_key('base')
2690
 
        text_name = 'chain2-'
2691
 
        text = ['line\n']
 
2752
        next_parent = self.get_simple_key(b'base')
 
2753
        text_name = b'chain2-'
 
2754
        text = [b'line\n']
2692
2755
        for depth in range(26):
2693
 
            new_version = self.get_simple_key(text_name + '%s' % depth)
2694
 
            text = text + ['line\n']
 
2756
            new_version = self.get_simple_key(text_name + b'%d' % depth)
 
2757
            text = text + [b'line\n']
2695
2758
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2696
2759
            next_parent = new_version
2697
2760
        target = self.get_versionedfiles('target')
2701
2764
            target.add_mpdiffs(
2702
2765
                [(key, parents, files.get_sha1s([key])[key], mpdiff)])
2703
2766
            self.assertEqualDiff(
2704
 
                files.get_record_stream([key], 'unordered',
2705
 
                    True).next().get_bytes_as('fulltext'),
2706
 
                target.get_record_stream([key], 'unordered',
2707
 
                    True).next().get_bytes_as('fulltext')
 
2767
                next(files.get_record_stream([key], 'unordered',
 
2768
                                             True)).get_bytes_as('fulltext'),
 
2769
                next(target.get_record_stream([key], 'unordered',
 
2770
                                              True)).get_bytes_as('fulltext')
2708
2771
                )
2709
2772
 
2710
2773
    def test_keys(self):
2713
2776
        files = self.get_versionedfiles()
2714
2777
        self.assertEqual(set(), set(files.keys()))
2715
2778
        if self.key_length == 1:
2716
 
            key = ('foo',)
 
2779
            key = (b'foo',)
2717
2780
        else:
2718
 
            key = ('foo', 'bar',)
 
2781
            key = (b'foo', b'bar',)
2719
2782
        files.add_lines(key, (), [])
2720
 
        self.assertEqual(set([key]), set(files.keys()))
 
2783
        self.assertEqual({key}, set(files.keys()))
2721
2784
 
2722
2785
 
2723
2786
class VirtualVersionedFilesTests(TestCase):
2731
2794
        return ret
2732
2795
 
2733
2796
    def setUp(self):
2734
 
        TestCase.setUp(self)
 
2797
        super(VirtualVersionedFilesTests, self).setUp()
2735
2798
        self._lines = {}
2736
2799
        self._parent_map = {}
2737
2800
        self.texts = VirtualVersionedFiles(self._get_parent_map,
2739
2802
 
2740
2803
    def test_add_lines(self):
2741
2804
        self.assertRaises(NotImplementedError,
2742
 
                self.texts.add_lines, "foo", [], [])
 
2805
                          self.texts.add_lines, b"foo", [], [])
2743
2806
 
2744
2807
    def test_add_mpdiffs(self):
2745
2808
        self.assertRaises(NotImplementedError,
2746
 
                self.texts.add_mpdiffs, [])
 
2809
                          self.texts.add_mpdiffs, [])
2747
2810
 
2748
2811
    def test_check_noerrors(self):
2749
2812
        self.texts.check()
2753
2816
                          [])
2754
2817
 
2755
2818
    def test_get_sha1s_nonexistent(self):
2756
 
        self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
 
2819
        self.assertEqual({}, self.texts.get_sha1s([(b"NONEXISTENT",)]))
2757
2820
 
2758
2821
    def test_get_sha1s(self):
2759
 
        self._lines["key"] = ["dataline1", "dataline2"]
2760
 
        self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
2761
 
                           self.texts.get_sha1s([("key",)]))
 
2822
        self._lines[b"key"] = [b"dataline1", b"dataline2"]
 
2823
        self.assertEqual({(b"key",): osutils.sha_strings(self._lines[b"key"])},
 
2824
                         self.texts.get_sha1s([(b"key",)]))
2762
2825
 
2763
2826
    def test_get_parent_map(self):
2764
 
        self._parent_map = {"G": ("A", "B")}
2765
 
        self.assertEquals({("G",): (("A",),("B",))},
2766
 
                          self.texts.get_parent_map([("G",), ("L",)]))
 
2827
        self._parent_map = {b"G": (b"A", b"B")}
 
2828
        self.assertEqual({(b"G",): ((b"A",), (b"B",))},
 
2829
                         self.texts.get_parent_map([(b"G",), (b"L",)]))
2767
2830
 
2768
2831
    def test_get_record_stream(self):
2769
 
        self._lines["A"] = ["FOO", "BAR"]
2770
 
        it = self.texts.get_record_stream([("A",)], "unordered", True)
2771
 
        record = it.next()
2772
 
        self.assertEquals("chunked", record.storage_kind)
2773
 
        self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
2774
 
        self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
 
2832
        self._lines[b"A"] = [b"FOO", b"BAR"]
 
2833
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
 
2834
        record = next(it)
 
2835
        self.assertEqual("chunked", record.storage_kind)
 
2836
        self.assertEqual(b"FOOBAR", record.get_bytes_as("fulltext"))
 
2837
        self.assertEqual([b"FOO", b"BAR"], record.get_bytes_as("chunked"))
2775
2838
 
2776
2839
    def test_get_record_stream_absent(self):
2777
 
        it = self.texts.get_record_stream([("A",)], "unordered", True)
2778
 
        record = it.next()
2779
 
        self.assertEquals("absent", record.storage_kind)
 
2840
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
 
2841
        record = next(it)
 
2842
        self.assertEqual("absent", record.storage_kind)
2780
2843
 
2781
2844
    def test_iter_lines_added_or_present_in_keys(self):
2782
 
        self._lines["A"] = ["FOO", "BAR"]
2783
 
        self._lines["B"] = ["HEY"]
2784
 
        self._lines["C"] = ["Alberta"]
2785
 
        it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
2786
 
        self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
2787
 
            sorted(list(it)))
 
2845
        self._lines[b"A"] = [b"FOO", b"BAR"]
 
2846
        self._lines[b"B"] = [b"HEY"]
 
2847
        self._lines[b"C"] = [b"Alberta"]
 
2848
        it = self.texts.iter_lines_added_or_present_in_keys([(b"A",), (b"B",)])
 
2849
        self.assertEqual(sorted([(b"FOO", b"A"), (b"BAR", b"A"), (b"HEY", b"B")]),
 
2850
                         sorted(list(it)))
2788
2851
 
2789
2852
 
2790
2853
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2792
2855
    def get_ordering_vf(self, key_priority):
2793
2856
        builder = self.make_branch_builder('test')
2794
2857
        builder.start_series()
2795
 
        builder.build_snapshot('A', None, [
2796
 
            ('add', ('', 'TREE_ROOT', 'directory', None))])
2797
 
        builder.build_snapshot('B', ['A'], [])
2798
 
        builder.build_snapshot('C', ['B'], [])
2799
 
        builder.build_snapshot('D', ['C'], [])
 
2858
        builder.build_snapshot(None, [
 
2859
            ('add', ('', b'TREE_ROOT', 'directory', None))],
 
2860
            revision_id=b'A')
 
2861
        builder.build_snapshot([b'A'], [], revision_id=b'B')
 
2862
        builder.build_snapshot([b'B'], [], revision_id=b'C')
 
2863
        builder.build_snapshot([b'C'], [], revision_id=b'D')
2800
2864
        builder.finish_series()
2801
2865
        b = builder.get_branch()
2802
2866
        b.lock_read()
2809
2873
        self.assertEqual([], vf.calls)
2810
2874
 
2811
2875
    def test_get_record_stream_topological(self):
2812
 
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2813
 
        request_keys = [('B',), ('C',), ('D',), ('A',)]
 
2876
        vf = self.get_ordering_vf(
 
2877
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
 
2878
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2814
2879
        keys = [r.key for r in vf.get_record_stream(request_keys,
2815
 
                                    'topological', False)]
 
2880
                                                    'topological', False)]
2816
2881
        # We should have gotten the keys in topological order
2817
 
        self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
 
2882
        self.assertEqual([(b'A',), (b'B',), (b'C',), (b'D',)], keys)
2818
2883
        # And recorded that the request was made
2819
2884
        self.assertEqual([('get_record_stream', request_keys, 'topological',
2820
2885
                           False)], vf.calls)
2821
2886
 
2822
2887
    def test_get_record_stream_ordered(self):
2823
 
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
2824
 
        request_keys = [('B',), ('C',), ('D',), ('A',)]
 
2888
        vf = self.get_ordering_vf(
 
2889
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
 
2890
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2825
2891
        keys = [r.key for r in vf.get_record_stream(request_keys,
2826
 
                                   'unordered', False)]
 
2892
                                                    'unordered', False)]
2827
2893
        # They should be returned based on their priority
2828
 
        self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
 
2894
        self.assertEqual([(b'D',), (b'B',), (b'A',), (b'C',)], keys)
2829
2895
        # And the request recorded
2830
2896
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2831
2897
                           False)], vf.calls)
2832
2898
 
2833
2899
    def test_get_record_stream_implicit_order(self):
2834
 
        vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
2835
 
        request_keys = [('B',), ('C',), ('D',), ('A',)]
 
2900
        vf = self.get_ordering_vf({(b'B',): 2, (b'D',): 1})
 
2901
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
2836
2902
        keys = [r.key for r in vf.get_record_stream(request_keys,
2837
 
                                   'unordered', False)]
 
2903
                                                    'unordered', False)]
2838
2904
        # A and C are not in the map, so they get sorted to the front. A comes
2839
2905
        # before C alphabetically, so it comes back first
2840
 
        self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
 
2906
        self.assertEqual([(b'A',), (b'C',), (b'D',), (b'B',)], keys)
2841
2907
        # And the request recorded
2842
2908
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2843
2909
                           False)], vf.calls)