/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/tests/per_versionedfile.py

  • Committer: Robert Collins
  • Date: 2010-05-05 00:05:29 UTC
  • mto: This revision was merged to the branch mainline in revision 5206.
  • Revision ID: robertc@robertcollins.net-20100505000529-ltmllyms5watqj5u
Make 'pydoc bzrlib.tests.build_tree_shape' useful.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2006-2012, 2016 Canonical Ltd
 
1
# Copyright (C) 2006-2010 Canonical Ltd
2
2
#
3
3
# Authors:
4
4
#   Johan Rydberg <jrydberg@gnu.org>
21
21
# TODO: might be nice to create a versionedfile with some type of corruption
22
22
# considered typical and check that it can be detected/corrected.
23
23
 
24
 
from gzip import GzipFile
25
 
import itertools
 
24
from itertools import chain, izip
 
25
from StringIO import StringIO
26
26
 
27
 
from .. import (
 
27
from bzrlib import (
28
28
    errors,
29
29
    graph as _mod_graph,
 
30
    groupcompress,
 
31
    knit as _mod_knit,
30
32
    osutils,
31
33
    progress,
32
 
    transport,
33
34
    ui,
34
35
    )
35
 
from ..bzr import (
36
 
    groupcompress,
37
 
    knit as _mod_knit,
38
 
    )
39
 
from ..errors import (
40
 
    RevisionNotPresent,
41
 
    RevisionAlreadyPresent,
42
 
    )
43
 
from ..bzr.knit import (
 
36
from bzrlib.errors import (
 
37
                           RevisionNotPresent,
 
38
                           RevisionAlreadyPresent,
 
39
                           WeaveParentMismatch
 
40
                           )
 
41
from bzrlib.knit import (
44
42
    cleanup_pack_knit,
45
43
    make_file_factory,
46
44
    make_pack_factory,
47
 
    )
48
 
from ..sixish import (
49
 
    BytesIO,
50
 
    zip,
51
 
    )
52
 
from . import (
 
45
    KnitAnnotateFactory,
 
46
    KnitPlainFactory,
 
47
    )
 
48
from bzrlib.tests import (
53
49
    TestCase,
54
50
    TestCaseWithMemoryTransport,
55
51
    TestNotApplicable,
56
52
    TestSkipped,
 
53
    condition_isinstance,
 
54
    split_suite_by_condition,
 
55
    multiply_tests,
57
56
    )
58
 
from .http_utils import TestCaseWithWebserver
59
 
from ..transport.memory import MemoryTransport
60
 
from ..bzr import versionedfile as versionedfile
61
 
from ..bzr.versionedfile import (
62
 
    ChunkedContentFactory,
 
57
from bzrlib.tests.http_utils import TestCaseWithWebserver
 
58
from bzrlib.trace import mutter
 
59
from bzrlib.transport import get_transport
 
60
from bzrlib.transport.memory import MemoryTransport
 
61
from bzrlib.tsort import topo_sort
 
62
from bzrlib.tuned_gzip import GzipFile
 
63
import bzrlib.versionedfile as versionedfile
 
64
from bzrlib.versionedfile import (
63
65
    ConstantMapper,
64
66
    HashEscapedPrefixMapper,
65
67
    PrefixMapper,
66
68
    VirtualVersionedFiles,
67
69
    make_versioned_files_factory,
68
70
    )
69
 
from ..bzr.weave import (
70
 
    WeaveFile,
71
 
    WeaveInvalidChecksum,
72
 
    )
73
 
from ..bzr.weavefile import write_weave
74
 
from .scenarios import load_tests_apply_scenarios
75
 
 
76
 
 
77
 
load_tests = load_tests_apply_scenarios
 
71
from bzrlib.weave import WeaveFile
 
72
from bzrlib.weavefile import read_weave, write_weave
 
73
 
 
74
 
 
75
def load_tests(standard_tests, module, loader):
 
76
    """Parameterize VersionedFiles tests for different implementations."""
 
77
    to_adapt, result = split_suite_by_condition(
 
78
        standard_tests, condition_isinstance(TestVersionedFiles))
 
79
    # We want to be sure of behaviour for:
 
80
    # weaves prefix layout (weave texts)
 
81
    # individually named weaves (weave inventories)
 
82
    # annotated knits - prefix|hash|hash-escape layout, we test the third only
 
83
    #                   as it is the most complex mapper.
 
84
    # individually named knits
 
85
    # individual no-graph knits in packs (signatures)
 
86
    # individual graph knits in packs (inventories)
 
87
    # individual graph nocompression knits in packs (revisions)
 
88
    # plain text knits in packs (texts)
 
89
    len_one_scenarios = [
 
90
        ('weave-named', {
 
91
            'cleanup':None,
 
92
            'factory':make_versioned_files_factory(WeaveFile,
 
93
                ConstantMapper('inventory')),
 
94
            'graph':True,
 
95
            'key_length':1,
 
96
            'support_partial_insertion': False,
 
97
            }),
 
98
        ('named-knit', {
 
99
            'cleanup':None,
 
100
            'factory':make_file_factory(False, ConstantMapper('revisions')),
 
101
            'graph':True,
 
102
            'key_length':1,
 
103
            'support_partial_insertion': False,
 
104
            }),
 
105
        ('named-nograph-nodelta-knit-pack', {
 
106
            'cleanup':cleanup_pack_knit,
 
107
            'factory':make_pack_factory(False, False, 1),
 
108
            'graph':False,
 
109
            'key_length':1,
 
110
            'support_partial_insertion': False,
 
111
            }),
 
112
        ('named-graph-knit-pack', {
 
113
            'cleanup':cleanup_pack_knit,
 
114
            'factory':make_pack_factory(True, True, 1),
 
115
            'graph':True,
 
116
            'key_length':1,
 
117
            'support_partial_insertion': True,
 
118
            }),
 
119
        ('named-graph-nodelta-knit-pack', {
 
120
            'cleanup':cleanup_pack_knit,
 
121
            'factory':make_pack_factory(True, False, 1),
 
122
            'graph':True,
 
123
            'key_length':1,
 
124
            'support_partial_insertion': False,
 
125
            }),
 
126
        ('groupcompress-nograph', {
 
127
            'cleanup':groupcompress.cleanup_pack_group,
 
128
            'factory':groupcompress.make_pack_factory(False, False, 1),
 
129
            'graph': False,
 
130
            'key_length':1,
 
131
            'support_partial_insertion':False,
 
132
            }),
 
133
        ]
 
134
    len_two_scenarios = [
 
135
        ('weave-prefix', {
 
136
            'cleanup':None,
 
137
            'factory':make_versioned_files_factory(WeaveFile,
 
138
                PrefixMapper()),
 
139
            'graph':True,
 
140
            'key_length':2,
 
141
            'support_partial_insertion': False,
 
142
            }),
 
143
        ('annotated-knit-escape', {
 
144
            'cleanup':None,
 
145
            'factory':make_file_factory(True, HashEscapedPrefixMapper()),
 
146
            'graph':True,
 
147
            'key_length':2,
 
148
            'support_partial_insertion': False,
 
149
            }),
 
150
        ('plain-knit-pack', {
 
151
            'cleanup':cleanup_pack_knit,
 
152
            'factory':make_pack_factory(True, True, 2),
 
153
            'graph':True,
 
154
            'key_length':2,
 
155
            'support_partial_insertion': True,
 
156
            }),
 
157
        ('groupcompress', {
 
158
            'cleanup':groupcompress.cleanup_pack_group,
 
159
            'factory':groupcompress.make_pack_factory(True, False, 1),
 
160
            'graph': True,
 
161
            'key_length':1,
 
162
            'support_partial_insertion':False,
 
163
            }),
 
164
        ]
 
165
    scenarios = len_one_scenarios + len_two_scenarios
 
166
    return multiply_tests(to_adapt, scenarios, result)
78
167
 
79
168
 
80
169
def get_diamond_vf(f, trailing_eol=True, left_only=False):
83
172
    :param trailing_eol: If True end the last line with \n.
84
173
    """
85
174
    parents = {
86
 
        b'origin': (),
87
 
        b'base': ((b'origin',),),
88
 
        b'left': ((b'base',),),
89
 
        b'right': ((b'base',),),
90
 
        b'merged': ((b'left',), (b'right',)),
 
175
        'origin': (),
 
176
        'base': (('origin',),),
 
177
        'left': (('base',),),
 
178
        'right': (('base',),),
 
179
        'merged': (('left',), ('right',)),
91
180
        }
92
181
    # insert a diamond graph to exercise deltas and merges.
93
182
    if trailing_eol:
94
 
        last_char = b'\n'
 
183
        last_char = '\n'
95
184
    else:
96
 
        last_char = b''
97
 
    f.add_lines(b'origin', [], [b'origin' + last_char])
98
 
    f.add_lines(b'base', [b'origin'], [b'base' + last_char])
99
 
    f.add_lines(b'left', [b'base'], [b'base\n', b'left' + last_char])
 
185
        last_char = ''
 
186
    f.add_lines('origin', [], ['origin' + last_char])
 
187
    f.add_lines('base', ['origin'], ['base' + last_char])
 
188
    f.add_lines('left', ['base'], ['base\n', 'left' + last_char])
100
189
    if not left_only:
101
 
        f.add_lines(b'right', [b'base'],
102
 
                    [b'base\n', b'right' + last_char])
103
 
        f.add_lines(b'merged', [b'left', b'right'],
104
 
                    [b'base\n', b'left\n', b'right\n', b'merged' + last_char])
 
190
        f.add_lines('right', ['base'],
 
191
            ['base\n', 'right' + last_char])
 
192
        f.add_lines('merged', ['left', 'right'],
 
193
            ['base\n', 'left\n', 'right\n', 'merged' + last_char])
105
194
    return f, parents
106
195
 
107
196
 
108
197
def get_diamond_files(files, key_length, trailing_eol=True, left_only=False,
109
 
                      nograph=False, nokeys=False):
 
198
    nograph=False, nokeys=False):
110
199
    """Get a diamond graph to exercise deltas and merges.
111
200
 
112
201
    This creates a 5-node graph in files. If files supports 2-length keys two
128
217
    if key_length == 1:
129
218
        prefixes = [()]
130
219
    else:
131
 
        prefixes = [(b'FileA',), (b'FileB',)]
 
220
        prefixes = [('FileA',), ('FileB',)]
132
221
    # insert a diamond graph to exercise deltas and merges.
133
222
    if trailing_eol:
134
 
        last_char = b'\n'
 
223
        last_char = '\n'
135
224
    else:
136
 
        last_char = b''
 
225
        last_char = ''
137
226
    result = []
138
 
 
139
227
    def get_parents(suffix_list):
140
228
        if nograph:
141
229
            return ()
142
230
        else:
143
231
            result = [prefix + suffix for suffix in suffix_list]
144
232
            return result
145
 
 
146
233
    def get_key(suffix):
147
234
        if nokeys:
148
235
            return (None, )
151
238
    # we loop over each key because that spreads the inserts across prefixes,
152
239
    # which is how commit operates.
153
240
    for prefix in prefixes:
154
 
        result.append(files.add_lines(prefix + get_key(b'origin'), (),
155
 
                                      [b'origin' + last_char]))
156
 
    for prefix in prefixes:
157
 
        result.append(files.add_lines(prefix + get_key(b'base'),
158
 
                                      get_parents([(b'origin',)]), [b'base' + last_char]))
159
 
    for prefix in prefixes:
160
 
        result.append(files.add_lines(prefix + get_key(b'left'),
161
 
                                      get_parents([(b'base',)]),
162
 
                                      [b'base\n', b'left' + last_char]))
 
241
        result.append(files.add_lines(prefix + get_key('origin'), (),
 
242
            ['origin' + last_char]))
 
243
    for prefix in prefixes:
 
244
        result.append(files.add_lines(prefix + get_key('base'),
 
245
            get_parents([('origin',)]), ['base' + last_char]))
 
246
    for prefix in prefixes:
 
247
        result.append(files.add_lines(prefix + get_key('left'),
 
248
            get_parents([('base',)]),
 
249
            ['base\n', 'left' + last_char]))
163
250
    if not left_only:
164
251
        for prefix in prefixes:
165
 
            result.append(files.add_lines(prefix + get_key(b'right'),
166
 
                                          get_parents([(b'base',)]),
167
 
                                          [b'base\n', b'right' + last_char]))
 
252
            result.append(files.add_lines(prefix + get_key('right'),
 
253
                get_parents([('base',)]),
 
254
                ['base\n', 'right' + last_char]))
168
255
        for prefix in prefixes:
169
 
            result.append(files.add_lines(prefix + get_key(b'merged'),
170
 
                                          get_parents(
171
 
                                              [(b'left',), (b'right',)]),
172
 
                                          [b'base\n', b'left\n', b'right\n', b'merged' + last_char]))
 
256
            result.append(files.add_lines(prefix + get_key('merged'),
 
257
                get_parents([('left',), ('right',)]),
 
258
                ['base\n', 'left\n', 'right\n', 'merged' + last_char]))
173
259
    return result
174
260
 
175
261
 
188
274
 
189
275
    def test_add(self):
190
276
        f = self.get_file()
191
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
192
 
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
193
 
 
 
277
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
278
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
194
279
        def verify_file(f):
195
280
            versions = f.versions()
196
 
            self.assertTrue(b'r0' in versions)
197
 
            self.assertTrue(b'r1' in versions)
198
 
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
199
 
            self.assertEqual(f.get_text(b'r0'), b'a\nb\n')
200
 
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
 
281
            self.assertTrue('r0' in versions)
 
282
            self.assertTrue('r1' in versions)
 
283
            self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
 
284
            self.assertEquals(f.get_text('r0'), 'a\nb\n')
 
285
            self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
201
286
            self.assertEqual(2, len(f))
202
287
            self.assertEqual(2, f.num_versions())
203
288
 
204
289
            self.assertRaises(RevisionNotPresent,
205
 
                              f.add_lines, b'r2', [b'foo'], [])
 
290
                f.add_lines, 'r2', ['foo'], [])
206
291
            self.assertRaises(RevisionAlreadyPresent,
207
 
                              f.add_lines, b'r1', [], [])
 
292
                f.add_lines, 'r1', [], [])
208
293
        verify_file(f)
209
294
        # this checks that reopen with create=True does not break anything.
210
295
        f = self.reopen_file(create=True)
213
298
    def test_adds_with_parent_texts(self):
214
299
        f = self.get_file()
215
300
        parent_texts = {}
216
 
        _, _, parent_texts[b'r0'] = f.add_lines(b'r0', [], [b'a\n', b'b\n'])
 
301
        _, _, parent_texts['r0'] = f.add_lines('r0', [], ['a\n', 'b\n'])
217
302
        try:
218
 
            _, _, parent_texts[b'r1'] = f.add_lines_with_ghosts(b'r1',
219
 
                                                                [b'r0', b'ghost'], [b'b\n', b'c\n'], parent_texts=parent_texts)
 
303
            _, _, parent_texts['r1'] = f.add_lines_with_ghosts('r1',
 
304
                ['r0', 'ghost'], ['b\n', 'c\n'], parent_texts=parent_texts)
220
305
        except NotImplementedError:
221
306
            # if the format doesn't support ghosts, just add normally.
222
 
            _, _, parent_texts[b'r1'] = f.add_lines(b'r1',
223
 
                                                    [b'r0'], [b'b\n', b'c\n'], parent_texts=parent_texts)
224
 
        f.add_lines(b'r2', [b'r1'], [b'c\n', b'd\n'],
225
 
                    parent_texts=parent_texts)
226
 
        self.assertNotEqual(None, parent_texts[b'r0'])
227
 
        self.assertNotEqual(None, parent_texts[b'r1'])
228
 
 
 
307
            _, _, parent_texts['r1'] = f.add_lines('r1',
 
308
                ['r0'], ['b\n', 'c\n'], parent_texts=parent_texts)
 
309
        f.add_lines('r2', ['r1'], ['c\n', 'd\n'], parent_texts=parent_texts)
 
310
        self.assertNotEqual(None, parent_texts['r0'])
 
311
        self.assertNotEqual(None, parent_texts['r1'])
229
312
        def verify_file(f):
230
313
            versions = f.versions()
231
 
            self.assertTrue(b'r0' in versions)
232
 
            self.assertTrue(b'r1' in versions)
233
 
            self.assertTrue(b'r2' in versions)
234
 
            self.assertEqual(f.get_lines(b'r0'), [b'a\n', b'b\n'])
235
 
            self.assertEqual(f.get_lines(b'r1'), [b'b\n', b'c\n'])
236
 
            self.assertEqual(f.get_lines(b'r2'), [b'c\n', b'd\n'])
 
314
            self.assertTrue('r0' in versions)
 
315
            self.assertTrue('r1' in versions)
 
316
            self.assertTrue('r2' in versions)
 
317
            self.assertEquals(f.get_lines('r0'), ['a\n', 'b\n'])
 
318
            self.assertEquals(f.get_lines('r1'), ['b\n', 'c\n'])
 
319
            self.assertEquals(f.get_lines('r2'), ['c\n', 'd\n'])
237
320
            self.assertEqual(3, f.num_versions())
238
 
            origins = f.annotate(b'r1')
239
 
            self.assertEqual(origins[0][0], b'r0')
240
 
            self.assertEqual(origins[1][0], b'r1')
241
 
            origins = f.annotate(b'r2')
242
 
            self.assertEqual(origins[0][0], b'r1')
243
 
            self.assertEqual(origins[1][0], b'r2')
 
321
            origins = f.annotate('r1')
 
322
            self.assertEquals(origins[0][0], 'r0')
 
323
            self.assertEquals(origins[1][0], 'r1')
 
324
            origins = f.annotate('r2')
 
325
            self.assertEquals(origins[0][0], 'r1')
 
326
            self.assertEquals(origins[1][0], 'r2')
244
327
 
245
328
        verify_file(f)
246
329
        f = self.reopen_file()
251
334
        # versioned files version sequences of bytes only.
252
335
        vf = self.get_file()
253
336
        self.assertRaises(errors.BzrBadParameterUnicode,
254
 
                          vf.add_lines, b'a', [], [b'a\n', u'b\n', b'c\n'])
 
337
            vf.add_lines, 'a', [], ['a\n', u'b\n', 'c\n'])
255
338
        self.assertRaises(
256
339
            (errors.BzrBadParameterUnicode, NotImplementedError),
257
 
            vf.add_lines_with_ghosts, b'a', [], [b'a\n', u'b\n', b'c\n'])
 
340
            vf.add_lines_with_ghosts, 'a', [], ['a\n', u'b\n', 'c\n'])
258
341
 
259
342
    def test_add_follows_left_matching_blocks(self):
260
343
        """If we change left_matching_blocks, delta changes
265
348
        vf = self.get_file()
266
349
        if isinstance(vf, WeaveFile):
267
350
            raise TestSkipped("WeaveFile ignores left_matching_blocks")
268
 
        vf.add_lines(b'1', [], [b'a\n'])
269
 
        vf.add_lines(b'2', [b'1'], [b'a\n', b'a\n', b'a\n'],
 
351
        vf.add_lines('1', [], ['a\n'])
 
352
        vf.add_lines('2', ['1'], ['a\n', 'a\n', 'a\n'],
270
353
                     left_matching_blocks=[(0, 0, 1), (1, 3, 0)])
271
 
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'2'))
272
 
        vf.add_lines(b'3', [b'1'], [b'a\n', b'a\n', b'a\n'],
 
354
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('2'))
 
355
        vf.add_lines('3', ['1'], ['a\n', 'a\n', 'a\n'],
273
356
                     left_matching_blocks=[(0, 2, 1), (1, 3, 0)])
274
 
        self.assertEqual([b'a\n', b'a\n', b'a\n'], vf.get_lines(b'3'))
 
357
        self.assertEqual(['a\n', 'a\n', 'a\n'], vf.get_lines('3'))
275
358
 
276
359
    def test_inline_newline_throws(self):
277
360
        # \r characters are not permitted in lines being added
278
361
        vf = self.get_file()
279
362
        self.assertRaises(errors.BzrBadParameterContainsNewline,
280
 
                          vf.add_lines, b'a', [], [b'a\n\n'])
 
363
            vf.add_lines, 'a', [], ['a\n\n'])
281
364
        self.assertRaises(
282
365
            (errors.BzrBadParameterContainsNewline, NotImplementedError),
283
 
            vf.add_lines_with_ghosts, b'a', [], [b'a\n\n'])
 
366
            vf.add_lines_with_ghosts, 'a', [], ['a\n\n'])
284
367
        # but inline CR's are allowed
285
 
        vf.add_lines(b'a', [], [b'a\r\n'])
 
368
        vf.add_lines('a', [], ['a\r\n'])
286
369
        try:
287
 
            vf.add_lines_with_ghosts(b'b', [], [b'a\r\n'])
 
370
            vf.add_lines_with_ghosts('b', [], ['a\r\n'])
288
371
        except NotImplementedError:
289
372
            pass
290
373
 
291
374
    def test_add_reserved(self):
292
375
        vf = self.get_file()
293
376
        self.assertRaises(errors.ReservedId,
294
 
                          vf.add_lines, b'a:', [], [b'a\n', b'b\n', b'c\n'])
 
377
            vf.add_lines, 'a:', [], ['a\n', 'b\n', 'c\n'])
295
378
 
296
379
    def test_add_lines_nostoresha(self):
297
380
        """When nostore_sha is supplied using old content raises."""
298
381
        vf = self.get_file()
299
 
        empty_text = (b'a', [])
300
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
301
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
382
        empty_text = ('a', [])
 
383
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
384
        sample_text_no_nl = ('c', ["foo\n", "bar"])
302
385
        shas = []
303
386
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
304
387
            sha, _, _ = vf.add_lines(version, [], lines)
305
388
            shas.append(sha)
306
389
        # we now have a copy of all the lines in the vf.
307
390
        for sha, (version, lines) in zip(
308
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
391
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
309
392
            self.assertRaises(errors.ExistingContent,
310
 
                              vf.add_lines, version + b"2", [], lines,
311
 
                              nostore_sha=sha)
 
393
                vf.add_lines, version + "2", [], lines,
 
394
                nostore_sha=sha)
312
395
            # and no new version should have been added.
313
396
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
314
 
                              version + b"2")
 
397
                version + "2")
315
398
 
316
399
    def test_add_lines_with_ghosts_nostoresha(self):
317
400
        """When nostore_sha is supplied using old content raises."""
318
401
        vf = self.get_file()
319
 
        empty_text = (b'a', [])
320
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
321
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
402
        empty_text = ('a', [])
 
403
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
404
        sample_text_no_nl = ('c', ["foo\n", "bar"])
322
405
        shas = []
323
406
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
324
407
            sha, _, _ = vf.add_lines(version, [], lines)
326
409
        # we now have a copy of all the lines in the vf.
327
410
        # is the test applicable to this vf implementation?
328
411
        try:
329
 
            vf.add_lines_with_ghosts(b'd', [], [])
 
412
            vf.add_lines_with_ghosts('d', [], [])
330
413
        except NotImplementedError:
331
414
            raise TestSkipped("add_lines_with_ghosts is optional")
332
415
        for sha, (version, lines) in zip(
333
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
416
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
334
417
            self.assertRaises(errors.ExistingContent,
335
 
                              vf.add_lines_with_ghosts, version + b"2", [], lines,
336
 
                              nostore_sha=sha)
 
418
                vf.add_lines_with_ghosts, version + "2", [], lines,
 
419
                nostore_sha=sha)
337
420
            # and no new version should have been added.
338
421
            self.assertRaises(errors.RevisionNotPresent, vf.get_lines,
339
 
                              version + b"2")
 
422
                version + "2")
340
423
 
341
424
    def test_add_lines_return_value(self):
342
425
        # add_lines should return the sha1 and the text size.
343
426
        vf = self.get_file()
344
 
        empty_text = (b'a', [])
345
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
346
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
427
        empty_text = ('a', [])
 
428
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
429
        sample_text_no_nl = ('c', ["foo\n", "bar"])
347
430
        # check results for the three cases:
348
431
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
349
432
            # the first two elements are the same for all versioned files:
352
435
            result = vf.add_lines(version, [], lines)
353
436
            self.assertEqual(3, len(result))
354
437
            self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
355
 
                             result[0:2])
 
438
                result[0:2])
356
439
        # parents should not affect the result:
357
440
        lines = sample_text_nl[1]
358
441
        self.assertEqual((osutils.sha_strings(lines), sum(map(len, lines))),
359
 
                         vf.add_lines(b'd', [b'b', b'c'], lines)[0:2])
 
442
            vf.add_lines('d', ['b', 'c'], lines)[0:2])
360
443
 
361
444
    def test_get_reserved(self):
362
445
        vf = self.get_file()
363
 
        self.assertRaises(errors.ReservedId, vf.get_texts, [b'b:'])
364
 
        self.assertRaises(errors.ReservedId, vf.get_lines, b'b:')
365
 
        self.assertRaises(errors.ReservedId, vf.get_text, b'b:')
 
446
        self.assertRaises(errors.ReservedId, vf.get_texts, ['b:'])
 
447
        self.assertRaises(errors.ReservedId, vf.get_lines, 'b:')
 
448
        self.assertRaises(errors.ReservedId, vf.get_text, 'b:')
366
449
 
367
450
    def test_add_unchanged_last_line_noeol_snapshot(self):
368
451
        """Add a text with an unchanged last line with no eol should work."""
377
460
        for length in range(20):
378
461
            version_lines = {}
379
462
            vf = self.get_file('case-%d' % length)
380
 
            prefix = b'step-%d'
 
463
            prefix = 'step-%d'
381
464
            parents = []
382
465
            for step in range(length):
383
466
                version = prefix % step
384
 
                lines = ([b'prelude \n'] * step) + [b'line']
 
467
                lines = (['prelude \n'] * step) + ['line']
385
468
                vf.add_lines(version, parents, lines)
386
469
                version_lines[version] = lines
387
470
                parents = [version]
388
 
            vf.add_lines(b'no-eol', parents, [b'line'])
 
471
            vf.add_lines('no-eol', parents, ['line'])
389
472
            vf.get_texts(version_lines.keys())
390
 
            self.assertEqualDiff(b'line', vf.get_text(b'no-eol'))
 
473
            self.assertEqualDiff('line', vf.get_text('no-eol'))
391
474
 
392
475
    def test_get_texts_eol_variation(self):
393
476
        # similar to the failure in <http://bugs.launchpad.net/234748>
394
477
        vf = self.get_file()
395
 
        sample_text_nl = [b"line\n"]
396
 
        sample_text_no_nl = [b"line"]
 
478
        sample_text_nl = ["line\n"]
 
479
        sample_text_no_nl = ["line"]
397
480
        versions = []
398
481
        version_lines = {}
399
482
        parents = []
400
483
        for i in range(4):
401
 
            version = b'v%d' % i
 
484
            version = 'v%d' % i
402
485
            if i % 2:
403
486
                lines = sample_text_nl
404
487
            else:
410
493
            # (which is what this test tests) will generate a correct line
411
494
            # delta (which is to say, an empty delta).
412
495
            vf.add_lines(version, parents, lines,
413
 
                         left_matching_blocks=[(0, 0, 1)])
 
496
                left_matching_blocks=[(0, 0, 1)])
414
497
            parents = [version]
415
498
            versions.append(version)
416
499
            version_lines[version] = lines
420
503
 
421
504
    def test_add_lines_with_matching_blocks_noeol_last_line(self):
422
505
        """Add a text with an unchanged last line with no eol should work."""
423
 
        from breezy import multiparent
 
506
        from bzrlib import multiparent
424
507
        # Hand verified sha1 of the text we're adding.
425
508
        sha1 = '6a1d115ec7b60afb664dc14890b5af5ce3c827a4'
426
509
        # Create a mpdiff which adds a new line before the trailing line, and
428
511
        # Test adding this in two situations:
429
512
        # On top of a new insertion
430
513
        vf = self.get_file('fulltext')
431
 
        vf.add_lines(b'noeol', [], [b'line'])
432
 
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
433
 
                     left_matching_blocks=[(0, 1, 1)])
434
 
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
 
514
        vf.add_lines('noeol', [], ['line'])
 
515
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
 
516
            left_matching_blocks=[(0, 1, 1)])
 
517
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
435
518
        # On top of a delta
436
519
        vf = self.get_file('delta')
437
 
        vf.add_lines(b'base', [], [b'line'])
438
 
        vf.add_lines(b'noeol', [b'base'], [b'prelude\n', b'line'])
439
 
        vf.add_lines(b'noeol2', [b'noeol'], [b'newline\n', b'line'],
440
 
                     left_matching_blocks=[(1, 1, 1)])
441
 
        self.assertEqualDiff(b'newline\nline', vf.get_text(b'noeol2'))
 
520
        vf.add_lines('base', [], ['line'])
 
521
        vf.add_lines('noeol', ['base'], ['prelude\n', 'line'])
 
522
        vf.add_lines('noeol2', ['noeol'], ['newline\n', 'line'],
 
523
            left_matching_blocks=[(1, 1, 1)])
 
524
        self.assertEqualDiff('newline\nline', vf.get_text('noeol2'))
442
525
 
443
526
    def test_make_mpdiffs(self):
444
 
        from breezy import multiparent
 
527
        from bzrlib import multiparent
445
528
        vf = self.get_file('foo')
446
529
        sha1s = self._setup_for_deltas(vf)
447
530
        new_vf = self.get_file('bar')
455
538
    def test_make_mpdiffs_with_ghosts(self):
456
539
        vf = self.get_file('foo')
457
540
        try:
458
 
            vf.add_lines_with_ghosts(b'text', [b'ghost'], [b'line\n'])
 
541
            vf.add_lines_with_ghosts('text', ['ghost'], ['line\n'])
459
542
        except NotImplementedError:
460
543
            # old Weave formats do not allow ghosts
461
544
            return
462
 
        self.assertRaises(errors.RevisionNotPresent,
463
 
                          vf.make_mpdiffs, [b'ghost'])
 
545
        self.assertRaises(errors.RevisionNotPresent, vf.make_mpdiffs, ['ghost'])
464
546
 
465
547
    def _setup_for_deltas(self, f):
466
548
        self.assertFalse(f.has_version('base'))
467
549
        # add texts that should trip the knit maximum delta chain threshold
468
550
        # as well as doing parallel chains of data in knits.
469
551
        # this is done by two chains of 25 insertions
470
 
        f.add_lines(b'base', [], [b'line\n'])
471
 
        f.add_lines(b'noeol', [b'base'], [b'line'])
 
552
        f.add_lines('base', [], ['line\n'])
 
553
        f.add_lines('noeol', ['base'], ['line'])
472
554
        # detailed eol tests:
473
555
        # shared last line with parent no-eol
474
 
        f.add_lines(b'noeolsecond', [b'noeol'], [b'line\n', b'line'])
 
556
        f.add_lines('noeolsecond', ['noeol'], ['line\n', 'line'])
475
557
        # differing last line with parent, both no-eol
476
 
        f.add_lines(b'noeolnotshared', [b'noeolsecond'], [b'line\n', b'phone'])
 
558
        f.add_lines('noeolnotshared', ['noeolsecond'], ['line\n', 'phone'])
477
559
        # add eol following a noneol parent, change content
478
 
        f.add_lines(b'eol', [b'noeol'], [b'phone\n'])
 
560
        f.add_lines('eol', ['noeol'], ['phone\n'])
479
561
        # add eol following a noneol parent, no change content
480
 
        f.add_lines(b'eolline', [b'noeol'], [b'line\n'])
 
562
        f.add_lines('eolline', ['noeol'], ['line\n'])
481
563
        # noeol with no parents:
482
 
        f.add_lines(b'noeolbase', [], [b'line'])
 
564
        f.add_lines('noeolbase', [], ['line'])
483
565
        # noeol preceeding its leftmost parent in the output:
484
566
        # this is done by making it a merge of two parents with no common
485
567
        # anestry: noeolbase and noeol with the
486
568
        # later-inserted parent the leftmost.
487
 
        f.add_lines(b'eolbeforefirstparent', [
488
 
                    b'noeolbase', b'noeol'], [b'line'])
 
569
        f.add_lines('eolbeforefirstparent', ['noeolbase', 'noeol'], ['line'])
489
570
        # two identical eol texts
490
 
        f.add_lines(b'noeoldup', [b'noeol'], [b'line'])
491
 
        next_parent = b'base'
492
 
        text_name = b'chain1-'
493
 
        text = [b'line\n']
494
 
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
495
 
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
496
 
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
497
 
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
498
 
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
499
 
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
500
 
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
501
 
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
502
 
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
503
 
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
504
 
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
505
 
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
506
 
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
507
 
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
508
 
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
509
 
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
510
 
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
511
 
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
512
 
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
513
 
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
514
 
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
515
 
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
516
 
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
517
 
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
518
 
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
519
 
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
571
        f.add_lines('noeoldup', ['noeol'], ['line'])
 
572
        next_parent = 'base'
 
573
        text_name = 'chain1-'
 
574
        text = ['line\n']
 
575
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
576
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
577
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
578
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
579
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
580
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
581
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
582
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
583
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
 
584
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
585
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
586
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
587
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
588
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
589
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
 
590
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
591
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
592
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
593
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
594
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
 
595
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
596
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
597
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
598
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
599
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
600
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
520
601
                 }
521
602
        for depth in range(26):
522
 
            new_version = text_name + b'%d' % depth
523
 
            text = text + [b'line\n']
 
603
            new_version = text_name + '%s' % depth
 
604
            text = text + ['line\n']
524
605
            f.add_lines(new_version, [next_parent], text)
525
606
            next_parent = new_version
526
 
        next_parent = b'base'
527
 
        text_name = b'chain2-'
528
 
        text = [b'line\n']
 
607
        next_parent = 'base'
 
608
        text_name = 'chain2-'
 
609
        text = ['line\n']
529
610
        for depth in range(26):
530
 
            new_version = text_name + b'%d' % depth
531
 
            text = text + [b'line\n']
 
611
            new_version = text_name + '%s' % depth
 
612
            text = text + ['line\n']
532
613
            f.add_lines(new_version, [next_parent], text)
533
614
            next_parent = new_version
534
615
        return sha1s
536
617
    def test_ancestry(self):
537
618
        f = self.get_file()
538
619
        self.assertEqual([], f.get_ancestry([]))
539
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
540
 
        f.add_lines(b'r1', [b'r0'], [b'b\n', b'c\n'])
541
 
        f.add_lines(b'r2', [b'r0'], [b'b\n', b'c\n'])
542
 
        f.add_lines(b'r3', [b'r2'], [b'b\n', b'c\n'])
543
 
        f.add_lines(b'rM', [b'r1', b'r2'], [b'b\n', b'c\n'])
 
620
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
621
        f.add_lines('r1', ['r0'], ['b\n', 'c\n'])
 
622
        f.add_lines('r2', ['r0'], ['b\n', 'c\n'])
 
623
        f.add_lines('r3', ['r2'], ['b\n', 'c\n'])
 
624
        f.add_lines('rM', ['r1', 'r2'], ['b\n', 'c\n'])
544
625
        self.assertEqual([], f.get_ancestry([]))
545
 
        versions = f.get_ancestry([b'rM'])
 
626
        versions = f.get_ancestry(['rM'])
546
627
        # there are some possibilities:
547
628
        # r0 r1 r2 rM r3
548
629
        # r0 r1 r2 r3 rM
549
630
        # etc
550
631
        # so we check indexes
551
 
        r0 = versions.index(b'r0')
552
 
        r1 = versions.index(b'r1')
553
 
        r2 = versions.index(b'r2')
554
 
        self.assertFalse(b'r3' in versions)
555
 
        rM = versions.index(b'rM')
 
632
        r0 = versions.index('r0')
 
633
        r1 = versions.index('r1')
 
634
        r2 = versions.index('r2')
 
635
        self.assertFalse('r3' in versions)
 
636
        rM = versions.index('rM')
556
637
        self.assertTrue(r0 < r1)
557
638
        self.assertTrue(r0 < r2)
558
639
        self.assertTrue(r1 < rM)
559
640
        self.assertTrue(r2 < rM)
560
641
 
561
642
        self.assertRaises(RevisionNotPresent,
562
 
                          f.get_ancestry, [b'rM', b'rX'])
 
643
            f.get_ancestry, ['rM', 'rX'])
563
644
 
564
 
        self.assertEqual(set(f.get_ancestry(b'rM')),
565
 
                         set(f.get_ancestry(b'rM', topo_sorted=False)))
 
645
        self.assertEqual(set(f.get_ancestry('rM')),
 
646
            set(f.get_ancestry('rM', topo_sorted=False)))
566
647
 
567
648
    def test_mutate_after_finish(self):
568
649
        self._transaction = 'before'
569
650
        f = self.get_file()
570
651
        self._transaction = 'after'
571
 
        self.assertRaises(errors.OutSideTransaction, f.add_lines, b'', [], [])
572
 
        self.assertRaises(errors.OutSideTransaction,
573
 
                          f.add_lines_with_ghosts, b'', [], [])
 
652
        self.assertRaises(errors.OutSideTransaction, f.add_lines, '', [], [])
 
653
        self.assertRaises(errors.OutSideTransaction, f.add_lines_with_ghosts, '', [], [])
574
654
 
575
655
    def test_copy_to(self):
576
656
        f = self.get_file()
577
 
        f.add_lines(b'0', [], [b'a\n'])
 
657
        f.add_lines('0', [], ['a\n'])
578
658
        t = MemoryTransport()
579
659
        f.copy_to('foo', t)
580
660
        for suffix in self.get_factory().get_suffixes():
587
667
 
588
668
    def test_get_parent_map(self):
589
669
        f = self.get_file()
590
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
591
 
        self.assertEqual(
592
 
            {b'r0': ()}, f.get_parent_map([b'r0']))
593
 
        f.add_lines(b'r1', [b'r0'], [b'a\n', b'b\n'])
594
 
        self.assertEqual(
595
 
            {b'r1': (b'r0',)}, f.get_parent_map([b'r1']))
596
 
        self.assertEqual(
597
 
            {b'r0': (),
598
 
             b'r1': (b'r0',)},
599
 
            f.get_parent_map([b'r0', b'r1']))
600
 
        f.add_lines(b'r2', [], [b'a\n', b'b\n'])
601
 
        f.add_lines(b'r3', [], [b'a\n', b'b\n'])
602
 
        f.add_lines(b'm', [b'r0', b'r1', b'r2', b'r3'], [b'a\n', b'b\n'])
603
 
        self.assertEqual(
604
 
            {b'm': (b'r0', b'r1', b'r2', b'r3')}, f.get_parent_map([b'm']))
605
 
        self.assertEqual({}, f.get_parent_map(b'y'))
606
 
        self.assertEqual(
607
 
            {b'r0': (),
608
 
             b'r1': (b'r0',)},
609
 
            f.get_parent_map([b'r0', b'y', b'r1']))
 
670
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
671
        self.assertEqual(
 
672
            {'r0':()}, f.get_parent_map(['r0']))
 
673
        f.add_lines('r1', ['r0'], ['a\n', 'b\n'])
 
674
        self.assertEqual(
 
675
            {'r1':('r0',)}, f.get_parent_map(['r1']))
 
676
        self.assertEqual(
 
677
            {'r0':(),
 
678
             'r1':('r0',)},
 
679
            f.get_parent_map(['r0', 'r1']))
 
680
        f.add_lines('r2', [], ['a\n', 'b\n'])
 
681
        f.add_lines('r3', [], ['a\n', 'b\n'])
 
682
        f.add_lines('m', ['r0', 'r1', 'r2', 'r3'], ['a\n', 'b\n'])
 
683
        self.assertEqual(
 
684
            {'m':('r0', 'r1', 'r2', 'r3')}, f.get_parent_map(['m']))
 
685
        self.assertEqual({}, f.get_parent_map('y'))
 
686
        self.assertEqual(
 
687
            {'r0':(),
 
688
             'r1':('r0',)},
 
689
            f.get_parent_map(['r0', 'y', 'r1']))
610
690
 
611
691
    def test_annotate(self):
612
692
        f = self.get_file()
613
 
        f.add_lines(b'r0', [], [b'a\n', b'b\n'])
614
 
        f.add_lines(b'r1', [b'r0'], [b'c\n', b'b\n'])
615
 
        origins = f.annotate(b'r1')
616
 
        self.assertEqual(origins[0][0], b'r1')
617
 
        self.assertEqual(origins[1][0], b'r0')
 
693
        f.add_lines('r0', [], ['a\n', 'b\n'])
 
694
        f.add_lines('r1', ['r0'], ['c\n', 'b\n'])
 
695
        origins = f.annotate('r1')
 
696
        self.assertEquals(origins[0][0], 'r1')
 
697
        self.assertEquals(origins[1][0], 'r0')
618
698
 
619
699
        self.assertRaises(RevisionNotPresent,
620
 
                          f.annotate, b'foo')
 
700
            f.annotate, 'foo')
621
701
 
622
702
    def test_detection(self):
623
703
        # Test weaves detect corruption.
628
708
 
629
709
        w = self.get_file_corrupted_text()
630
710
 
631
 
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
632
 
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
633
 
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
634
 
        self.assertRaises(WeaveInvalidChecksum, w.check)
 
711
        self.assertEqual('hello\n', w.get_text('v1'))
 
712
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
 
713
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
 
714
        self.assertRaises(errors.WeaveInvalidChecksum, w.check)
635
715
 
636
716
        w = self.get_file_corrupted_checksum()
637
717
 
638
 
        self.assertEqual(b'hello\n', w.get_text(b'v1'))
639
 
        self.assertRaises(WeaveInvalidChecksum, w.get_text, b'v2')
640
 
        self.assertRaises(WeaveInvalidChecksum, w.get_lines, b'v2')
641
 
        self.assertRaises(WeaveInvalidChecksum, w.check)
 
718
        self.assertEqual('hello\n', w.get_text('v1'))
 
719
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_text, 'v2')
 
720
        self.assertRaises(errors.WeaveInvalidChecksum, w.get_lines, 'v2')
 
721
        self.assertRaises(errors.WeaveInvalidChecksum, w.check)
642
722
 
643
723
    def get_file_corrupted_text(self):
644
724
        """Return a versioned file with corrupt text but valid metadata."""
665
745
 
666
746
        vf = self.get_file()
667
747
        # add a base to get included
668
 
        vf.add_lines(b'base', [], [b'base\n'])
 
748
        vf.add_lines('base', [], ['base\n'])
669
749
        # add a ancestor to be included on one side
670
 
        vf.add_lines(b'lancestor', [], [b'lancestor\n'])
 
750
        vf.add_lines('lancestor', [], ['lancestor\n'])
671
751
        # add a ancestor to be included on the other side
672
 
        vf.add_lines(b'rancestor', [b'base'], [b'rancestor\n'])
 
752
        vf.add_lines('rancestor', ['base'], ['rancestor\n'])
673
753
        # add a child of rancestor with no eofile-nl
674
 
        vf.add_lines(b'child', [b'rancestor'], [b'base\n', b'child\n'])
 
754
        vf.add_lines('child', ['rancestor'], ['base\n', 'child\n'])
675
755
        # add a child of lancestor and base to join the two roots
676
 
        vf.add_lines(b'otherchild',
677
 
                     [b'lancestor', b'base'],
678
 
                     [b'base\n', b'lancestor\n', b'otherchild\n'])
679
 
 
 
756
        vf.add_lines('otherchild',
 
757
                     ['lancestor', 'base'],
 
758
                     ['base\n', 'lancestor\n', 'otherchild\n'])
680
759
        def iter_with_versions(versions, expected):
681
760
            # now we need to see what lines are returned, and how often.
682
761
            lines = {}
683
762
            progress = InstrumentedProgress()
684
763
            # iterate over the lines
685
764
            for line in vf.iter_lines_added_or_present_in_versions(versions,
686
 
                                                                   pb=progress):
 
765
                pb=progress):
687
766
                lines.setdefault(line, 0)
688
767
                lines[line] += 1
689
 
            if [] != progress.updates:
 
768
            if []!= progress.updates:
690
769
                self.assertEqual(expected, progress.updates)
691
770
            return lines
692
 
        lines = iter_with_versions([b'child', b'otherchild'],
 
771
        lines = iter_with_versions(['child', 'otherchild'],
693
772
                                   [('Walking content', 0, 2),
694
773
                                    ('Walking content', 1, 2),
695
774
                                    ('Walking content', 2, 2)])
696
775
        # we must see child and otherchild
697
 
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
698
 
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
 
776
        self.assertTrue(lines[('child\n', 'child')] > 0)
 
777
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
699
778
        # we dont care if we got more than that.
700
779
 
701
780
        # test all lines
706
785
                                          ('Walking content', 4, 5),
707
786
                                          ('Walking content', 5, 5)])
708
787
        # all lines must be seen at least once
709
 
        self.assertTrue(lines[(b'base\n', b'base')] > 0)
710
 
        self.assertTrue(lines[(b'lancestor\n', b'lancestor')] > 0)
711
 
        self.assertTrue(lines[(b'rancestor\n', b'rancestor')] > 0)
712
 
        self.assertTrue(lines[(b'child\n', b'child')] > 0)
713
 
        self.assertTrue(lines[(b'otherchild\n', b'otherchild')] > 0)
 
788
        self.assertTrue(lines[('base\n', 'base')] > 0)
 
789
        self.assertTrue(lines[('lancestor\n', 'lancestor')] > 0)
 
790
        self.assertTrue(lines[('rancestor\n', 'rancestor')] > 0)
 
791
        self.assertTrue(lines[('child\n', 'child')] > 0)
 
792
        self.assertTrue(lines[('otherchild\n', 'otherchild')] > 0)
714
793
 
715
794
    def test_add_lines_with_ghosts(self):
716
795
        # some versioned file formats allow lines to be added with parent
723
802
        parent_id_unicode = u'b\xbfse'
724
803
        parent_id_utf8 = parent_id_unicode.encode('utf8')
725
804
        try:
726
 
            vf.add_lines_with_ghosts(b'notbxbfse', [parent_id_utf8], [])
 
805
            vf.add_lines_with_ghosts('notbxbfse', [parent_id_utf8], [])
727
806
        except NotImplementedError:
728
807
            # check the other ghost apis are also not implemented
729
 
            self.assertRaises(NotImplementedError,
730
 
                              vf.get_ancestry_with_ghosts, [b'foo'])
731
 
            self.assertRaises(NotImplementedError,
732
 
                              vf.get_parents_with_ghosts, b'foo')
 
808
            self.assertRaises(NotImplementedError, vf.get_ancestry_with_ghosts, ['foo'])
 
809
            self.assertRaises(NotImplementedError, vf.get_parents_with_ghosts, 'foo')
733
810
            return
734
811
        vf = self.reopen_file()
735
812
        # test key graph related apis: getncestry, _graph, get_parents
736
813
        # has_version
737
814
        # - these are ghost unaware and must not be reflect ghosts
738
 
        self.assertEqual([b'notbxbfse'], vf.get_ancestry(b'notbxbfse'))
 
815
        self.assertEqual(['notbxbfse'], vf.get_ancestry('notbxbfse'))
739
816
        self.assertFalse(vf.has_version(parent_id_utf8))
740
817
        # we have _with_ghost apis to give us ghost information.
741
 
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
742
 
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
743
 
        self.assertEqual([parent_id_utf8],
744
 
                         vf.get_parents_with_ghosts(b'notbxbfse'))
 
818
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry_with_ghosts(['notbxbfse']))
 
819
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
745
820
        # if we add something that is a ghost of another, it should correct the
746
821
        # results of the prior apis
747
822
        vf.add_lines(parent_id_utf8, [], [])
748
 
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
749
 
                         vf.get_ancestry([b'notbxbfse']))
750
 
        self.assertEqual({b'notbxbfse': (parent_id_utf8,)},
751
 
                         vf.get_parent_map([b'notbxbfse']))
 
823
        self.assertEqual([parent_id_utf8, 'notbxbfse'], vf.get_ancestry(['notbxbfse']))
 
824
        self.assertEqual({'notbxbfse':(parent_id_utf8,)},
 
825
            vf.get_parent_map(['notbxbfse']))
752
826
        self.assertTrue(vf.has_version(parent_id_utf8))
753
827
        # we have _with_ghost apis to give us ghost information.
754
 
        self.assertEqual([parent_id_utf8, b'notbxbfse'],
755
 
                         vf.get_ancestry_with_ghosts([b'notbxbfse']))
756
 
        self.assertEqual([parent_id_utf8],
757
 
                         vf.get_parents_with_ghosts(b'notbxbfse'))
 
828
        self.assertEqual([parent_id_utf8, 'notbxbfse'],
 
829
            vf.get_ancestry_with_ghosts(['notbxbfse']))
 
830
        self.assertEqual([parent_id_utf8], vf.get_parents_with_ghosts('notbxbfse'))
758
831
 
759
832
    def test_add_lines_with_ghosts_after_normal_revs(self):
760
833
        # some versioned file formats allow lines to be added with parent
764
837
        vf = self.get_file()
765
838
        # probe for ghost support
766
839
        try:
767
 
            vf.add_lines_with_ghosts(b'base', [], [b'line\n', b'line_b\n'])
 
840
            vf.add_lines_with_ghosts('base', [], ['line\n', 'line_b\n'])
768
841
        except NotImplementedError:
769
842
            return
770
 
        vf.add_lines_with_ghosts(b'references_ghost',
771
 
                                 [b'base', b'a_ghost'],
772
 
                                 [b'line\n', b'line_b\n', b'line_c\n'])
773
 
        origins = vf.annotate(b'references_ghost')
774
 
        self.assertEqual((b'base', b'line\n'), origins[0])
775
 
        self.assertEqual((b'base', b'line_b\n'), origins[1])
776
 
        self.assertEqual((b'references_ghost', b'line_c\n'), origins[2])
 
843
        vf.add_lines_with_ghosts('references_ghost',
 
844
                                 ['base', 'a_ghost'],
 
845
                                 ['line\n', 'line_b\n', 'line_c\n'])
 
846
        origins = vf.annotate('references_ghost')
 
847
        self.assertEquals(('base', 'line\n'), origins[0])
 
848
        self.assertEquals(('base', 'line_b\n'), origins[1])
 
849
        self.assertEquals(('references_ghost', 'line_c\n'), origins[2])
777
850
 
778
851
    def test_readonly_mode(self):
779
 
        t = self.get_transport()
 
852
        transport = get_transport(self.get_url('.'))
780
853
        factory = self.get_factory()
781
 
        vf = factory('id', t, 0o777, create=True, access_mode='w')
782
 
        vf = factory('id', t, access_mode='r')
783
 
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, b'base', [], [])
 
854
        vf = factory('id', transport, 0777, create=True, access_mode='w')
 
855
        vf = factory('id', transport, access_mode='r')
 
856
        self.assertRaises(errors.ReadOnlyError, vf.add_lines, 'base', [], [])
784
857
        self.assertRaises(errors.ReadOnlyError,
785
858
                          vf.add_lines_with_ghosts,
786
 
                          b'base',
 
859
                          'base',
787
860
                          [],
788
861
                          [])
789
862
 
791
864
        # check the sha1 data is available
792
865
        vf = self.get_file()
793
866
        # a simple file
794
 
        vf.add_lines(b'a', [], [b'a\n'])
 
867
        vf.add_lines('a', [], ['a\n'])
795
868
        # the same file, different metadata
796
 
        vf.add_lines(b'b', [b'a'], [b'a\n'])
 
869
        vf.add_lines('b', ['a'], ['a\n'])
797
870
        # a file differing only in last newline.
798
 
        vf.add_lines(b'c', [], [b'a'])
 
871
        vf.add_lines('c', [], ['a'])
799
872
        self.assertEqual({
800
 
            b'a': b'3f786850e387550fdab836ed7e6dc881de23001b',
801
 
            b'c': b'86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
802
 
            b'b': b'3f786850e387550fdab836ed7e6dc881de23001b',
 
873
            'a': '3f786850e387550fdab836ed7e6dc881de23001b',
 
874
            'c': '86f7e437faa5a7fce15d1ddcb9eaeaea377667b8',
 
875
            'b': '3f786850e387550fdab836ed7e6dc881de23001b',
803
876
            },
804
 
            vf.get_sha1s([b'a', b'c', b'b']))
 
877
            vf.get_sha1s(['a', 'c', 'b']))
805
878
 
806
879
 
807
880
class TestWeave(TestCaseWithMemoryTransport, VersionedFileTestMixIn):
808
881
 
809
882
    def get_file(self, name='foo'):
810
 
        return WeaveFile(name, self.get_transport(),
811
 
                         create=True,
812
 
                         get_scope=self.get_transaction)
 
883
        return WeaveFile(name, get_transport(self.get_url('.')), create=True,
 
884
            get_scope=self.get_transaction)
813
885
 
814
886
    def get_file_corrupted_text(self):
815
 
        w = WeaveFile('foo', self.get_transport(),
816
 
                      create=True,
817
 
                      get_scope=self.get_transaction)
818
 
        w.add_lines(b'v1', [], [b'hello\n'])
819
 
        w.add_lines(b'v2', [b'v1'], [b'hello\n', b'there\n'])
 
887
        w = WeaveFile('foo', get_transport(self.get_url('.')), create=True,
 
888
            get_scope=self.get_transaction)
 
889
        w.add_lines('v1', [], ['hello\n'])
 
890
        w.add_lines('v2', ['v1'], ['hello\n', 'there\n'])
820
891
 
821
892
        # We are going to invasively corrupt the text
822
893
        # Make sure the internals of weave are the same
823
 
        self.assertEqual([(b'{', 0), b'hello\n', (b'}', None), (b'{', 1), b'there\n', (b'}', None)
824
 
                          ], w._weave)
 
894
        self.assertEqual([('{', 0)
 
895
                        , 'hello\n'
 
896
                        , ('}', None)
 
897
                        , ('{', 1)
 
898
                        , 'there\n'
 
899
                        , ('}', None)
 
900
                        ], w._weave)
825
901
 
826
 
        self.assertEqual([b'f572d396fae9206628714fb2ce00f72e94f2258f', b'90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
827
 
                          ], w._sha1s)
 
902
        self.assertEqual(['f572d396fae9206628714fb2ce00f72e94f2258f'
 
903
                        , '90f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
904
                        ], w._sha1s)
828
905
        w.check()
829
906
 
830
907
        # Corrupted
831
 
        w._weave[4] = b'There\n'
 
908
        w._weave[4] = 'There\n'
832
909
        return w
833
910
 
834
911
    def get_file_corrupted_checksum(self):
835
912
        w = self.get_file_corrupted_text()
836
913
        # Corrected
837
 
        w._weave[4] = b'there\n'
838
 
        self.assertEqual(b'hello\nthere\n', w.get_text(b'v2'))
 
914
        w._weave[4] = 'there\n'
 
915
        self.assertEqual('hello\nthere\n', w.get_text('v2'))
839
916
 
840
 
        # Invalid checksum, first digit changed
841
 
        w._sha1s[1] = b'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
 
917
        #Invalid checksum, first digit changed
 
918
        w._sha1s[1] =  'f0f265c6e75f1c8f9ab76dcf85528352c5f215ef'
842
919
        return w
843
920
 
844
921
    def reopen_file(self, name='foo', create=False):
845
 
        return WeaveFile(name, self.get_transport(),
846
 
                         create=create,
847
 
                         get_scope=self.get_transaction)
 
922
        return WeaveFile(name, get_transport(self.get_url('.')), create=create,
 
923
            get_scope=self.get_transaction)
848
924
 
849
925
    def test_no_implicit_create(self):
850
926
        self.assertRaises(errors.NoSuchFile,
851
927
                          WeaveFile,
852
928
                          'foo',
853
 
                          self.get_transport(),
 
929
                          get_transport(self.get_url('.')),
854
930
                          get_scope=self.get_transaction)
855
931
 
856
932
    def get_factory(self):
860
936
class TestPlanMergeVersionedFile(TestCaseWithMemoryTransport):
861
937
 
862
938
    def setUp(self):
863
 
        super(TestPlanMergeVersionedFile, self).setUp()
 
939
        TestCaseWithMemoryTransport.setUp(self)
864
940
        mapper = PrefixMapper()
865
941
        factory = make_file_factory(True, mapper)
866
942
        self.vf1 = factory(self.get_transport('root-1'))
869
945
        self.plan_merge_vf.fallback_versionedfiles.extend([self.vf1, self.vf2])
870
946
 
871
947
    def test_add_lines(self):
872
 
        self.plan_merge_vf.add_lines((b'root', b'a:'), [], [])
873
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
874
 
                          (b'root', b'a'), [], [])
875
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
876
 
                          (b'root', b'a:'), None, [])
877
 
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
878
 
                          (b'root', b'a:'), [], None)
 
948
        self.plan_merge_vf.add_lines(('root', 'a:'), [], [])
 
949
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
950
            ('root', 'a'), [], [])
 
951
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
952
            ('root', 'a:'), None, [])
 
953
        self.assertRaises(ValueError, self.plan_merge_vf.add_lines,
 
954
            ('root', 'a:'), [], None)
879
955
 
880
956
    def setup_abcde(self):
881
 
        self.vf1.add_lines((b'root', b'A'), [], [b'a'])
882
 
        self.vf1.add_lines((b'root', b'B'), [(b'root', b'A')], [b'b'])
883
 
        self.vf2.add_lines((b'root', b'C'), [], [b'c'])
884
 
        self.vf2.add_lines((b'root', b'D'), [(b'root', b'C')], [b'd'])
885
 
        self.plan_merge_vf.add_lines((b'root', b'E:'),
886
 
                                     [(b'root', b'B'), (b'root', b'D')], [b'e'])
 
957
        self.vf1.add_lines(('root', 'A'), [], ['a'])
 
958
        self.vf1.add_lines(('root', 'B'), [('root', 'A')], ['b'])
 
959
        self.vf2.add_lines(('root', 'C'), [], ['c'])
 
960
        self.vf2.add_lines(('root', 'D'), [('root', 'C')], ['d'])
 
961
        self.plan_merge_vf.add_lines(('root', 'E:'),
 
962
            [('root', 'B'), ('root', 'D')], ['e'])
887
963
 
888
964
    def test_get_parents(self):
889
965
        self.setup_abcde()
890
 
        self.assertEqual({(b'root', b'B'): ((b'root', b'A'),)},
891
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'B')]))
892
 
        self.assertEqual({(b'root', b'D'): ((b'root', b'C'),)},
893
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'D')]))
894
 
        self.assertEqual({(b'root', b'E:'): ((b'root', b'B'), (b'root', b'D'))},
895
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'E:')]))
 
966
        self.assertEqual({('root', 'B'):(('root', 'A'),)},
 
967
            self.plan_merge_vf.get_parent_map([('root', 'B')]))
 
968
        self.assertEqual({('root', 'D'):(('root', 'C'),)},
 
969
            self.plan_merge_vf.get_parent_map([('root', 'D')]))
 
970
        self.assertEqual({('root', 'E:'):(('root', 'B'),('root', 'D'))},
 
971
            self.plan_merge_vf.get_parent_map([('root', 'E:')]))
896
972
        self.assertEqual({},
897
 
                         self.plan_merge_vf.get_parent_map([(b'root', b'F')]))
 
973
            self.plan_merge_vf.get_parent_map([('root', 'F')]))
898
974
        self.assertEqual({
899
 
            (b'root', b'B'): ((b'root', b'A'),),
900
 
            (b'root', b'D'): ((b'root', b'C'),),
901
 
            (b'root', b'E:'): ((b'root', b'B'), (b'root', b'D')),
902
 
            },
 
975
                ('root', 'B'):(('root', 'A'),),
 
976
                ('root', 'D'):(('root', 'C'),),
 
977
                ('root', 'E:'):(('root', 'B'),('root', 'D')),
 
978
                },
903
979
            self.plan_merge_vf.get_parent_map(
904
 
                [(b'root', b'B'), (b'root', b'D'), (b'root', b'E:'), (b'root', b'F')]))
 
980
                [('root', 'B'), ('root', 'D'), ('root', 'E:'), ('root', 'F')]))
905
981
 
906
982
    def test_get_record_stream(self):
907
983
        self.setup_abcde()
908
 
 
909
984
        def get_record(suffix):
910
 
            return next(self.plan_merge_vf.get_record_stream(
911
 
                [(b'root', suffix)], 'unordered', True))
912
 
        self.assertEqual(b'a', get_record(b'A').get_bytes_as('fulltext'))
913
 
        self.assertEqual(b'a', b''.join(get_record(b'A').iter_bytes_as('chunked')))
914
 
        self.assertEqual(b'c', get_record(b'C').get_bytes_as('fulltext'))
915
 
        self.assertEqual(b'e', get_record(b'E:').get_bytes_as('fulltext'))
 
985
            return self.plan_merge_vf.get_record_stream(
 
986
                [('root', suffix)], 'unordered', True).next()
 
987
        self.assertEqual('a', get_record('A').get_bytes_as('fulltext'))
 
988
        self.assertEqual('c', get_record('C').get_bytes_as('fulltext'))
 
989
        self.assertEqual('e', get_record('E:').get_bytes_as('fulltext'))
916
990
        self.assertEqual('absent', get_record('F').storage_kind)
917
991
 
918
992
 
925
999
        # we should be able to read from http with a versioned file.
926
1000
        vf = self.get_file()
927
1001
        # try an empty file access
928
 
        readonly_vf = self.get_factory()('foo',
929
 
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
 
1002
        readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
930
1003
        self.assertEqual([], readonly_vf.versions())
931
 
 
932
 
    def test_readonly_http_works_with_feeling(self):
933
 
        # we should be able to read from http with a versioned file.
934
 
        vf = self.get_file()
935
1004
        # now with feeling.
936
 
        vf.add_lines(b'1', [], [b'a\n'])
937
 
        vf.add_lines(b'2', [b'1'], [b'b\n', b'a\n'])
938
 
        readonly_vf = self.get_factory()('foo',
939
 
                                         transport.get_transport_from_url(self.get_readonly_url('.')))
940
 
        self.assertEqual([b'1', b'2'], vf.versions())
941
 
        self.assertEqual([b'1', b'2'], readonly_vf.versions())
 
1005
        vf.add_lines('1', [], ['a\n'])
 
1006
        vf.add_lines('2', ['1'], ['b\n', 'a\n'])
 
1007
        readonly_vf = self.get_factory()('foo', get_transport(self.get_readonly_url('.')))
 
1008
        self.assertEqual(['1', '2'], vf.versions())
942
1009
        for version in readonly_vf.versions():
943
1010
            readonly_vf.get_lines(version)
944
1011
 
946
1013
class TestWeaveHTTP(TestCaseWithWebserver, TestReadonlyHttpMixin):
947
1014
 
948
1015
    def get_file(self):
949
 
        return WeaveFile('foo', self.get_transport(),
950
 
                         create=True,
951
 
                         get_scope=self.get_transaction)
 
1016
        return WeaveFile('foo', get_transport(self.get_url('.')), create=True,
 
1017
            get_scope=self.get_transaction)
952
1018
 
953
1019
    def get_factory(self):
954
1020
        return WeaveFile
957
1023
class MergeCasesMixin(object):
958
1024
 
959
1025
    def doMerge(self, base, a, b, mp):
 
1026
        from cStringIO import StringIO
960
1027
        from textwrap import dedent
961
1028
 
962
1029
        def addcrlf(x):
963
 
            return x + b'\n'
 
1030
            return x + '\n'
964
1031
 
965
1032
        w = self.get_file()
966
 
        w.add_lines(b'text0', [], list(map(addcrlf, base)))
967
 
        w.add_lines(b'text1', [b'text0'], list(map(addcrlf, a)))
968
 
        w.add_lines(b'text2', [b'text0'], list(map(addcrlf, b)))
 
1033
        w.add_lines('text0', [], map(addcrlf, base))
 
1034
        w.add_lines('text1', ['text0'], map(addcrlf, a))
 
1035
        w.add_lines('text2', ['text0'], map(addcrlf, b))
969
1036
 
970
1037
        self.log_contents(w)
971
1038
 
972
1039
        self.log('merge plan:')
973
 
        p = list(w.plan_merge(b'text1', b'text2'))
 
1040
        p = list(w.plan_merge('text1', 'text2'))
974
1041
        for state, line in p:
975
1042
            if line:
976
1043
                self.log('%12s | %s' % (state, line[:-1]))
977
1044
 
978
1045
        self.log('merge:')
979
 
        mt = BytesIO()
 
1046
        mt = StringIO()
980
1047
        mt.writelines(w.weave_merge(p))
981
1048
        mt.seek(0)
982
1049
        self.log(mt.getvalue())
983
1050
 
984
 
        mp = list(map(addcrlf, mp))
 
1051
        mp = map(addcrlf, mp)
985
1052
        self.assertEqual(mt.readlines(), mp)
986
1053
 
 
1054
 
987
1055
    def testOneInsert(self):
988
1056
        self.doMerge([],
989
 
                     [b'aa'],
 
1057
                     ['aa'],
990
1058
                     [],
991
 
                     [b'aa'])
 
1059
                     ['aa'])
992
1060
 
993
1061
    def testSeparateInserts(self):
994
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
995
 
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
996
 
                     [b'aaa', b'bbb', b'yyy', b'ccc'],
997
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
 
1062
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
1063
                     ['aaa', 'xxx', 'bbb', 'ccc'],
 
1064
                     ['aaa', 'bbb', 'yyy', 'ccc'],
 
1065
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
998
1066
 
999
1067
    def testSameInsert(self):
1000
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
1001
 
                     [b'aaa', b'xxx', b'bbb', b'ccc'],
1002
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'],
1003
 
                     [b'aaa', b'xxx', b'bbb', b'yyy', b'ccc'])
1004
 
    overlappedInsertExpected = [b'aaa', b'xxx', b'yyy', b'bbb']
1005
 
 
 
1068
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
1069
                     ['aaa', 'xxx', 'bbb', 'ccc'],
 
1070
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'],
 
1071
                     ['aaa', 'xxx', 'bbb', 'yyy', 'ccc'])
 
1072
    overlappedInsertExpected = ['aaa', 'xxx', 'yyy', 'bbb']
1006
1073
    def testOverlappedInsert(self):
1007
 
        self.doMerge([b'aaa', b'bbb'],
1008
 
                     [b'aaa', b'xxx', b'yyy', b'bbb'],
1009
 
                     [b'aaa', b'xxx', b'bbb'], self.overlappedInsertExpected)
 
1074
        self.doMerge(['aaa', 'bbb'],
 
1075
                     ['aaa', 'xxx', 'yyy', 'bbb'],
 
1076
                     ['aaa', 'xxx', 'bbb'], self.overlappedInsertExpected)
1010
1077
 
1011
1078
        # really it ought to reduce this to
1012
 
        # [b'aaa', b'xxx', b'yyy', b'bbb']
 
1079
        # ['aaa', 'xxx', 'yyy', 'bbb']
 
1080
 
1013
1081
 
1014
1082
    def testClashReplace(self):
1015
 
        self.doMerge([b'aaa'],
1016
 
                     [b'xxx'],
1017
 
                     [b'yyy', b'zzz'],
1018
 
                     [b'<<<<<<< ', b'xxx', b'=======', b'yyy', b'zzz',
1019
 
                      b'>>>>>>> '])
 
1083
        self.doMerge(['aaa'],
 
1084
                     ['xxx'],
 
1085
                     ['yyy', 'zzz'],
 
1086
                     ['<<<<<<< ', 'xxx', '=======', 'yyy', 'zzz',
 
1087
                      '>>>>>>> '])
1020
1088
 
1021
1089
    def testNonClashInsert1(self):
1022
 
        self.doMerge([b'aaa'],
1023
 
                     [b'xxx', b'aaa'],
1024
 
                     [b'yyy', b'zzz'],
1025
 
                     [b'<<<<<<< ', b'xxx', b'aaa', b'=======', b'yyy', b'zzz',
1026
 
                      b'>>>>>>> '])
 
1090
        self.doMerge(['aaa'],
 
1091
                     ['xxx', 'aaa'],
 
1092
                     ['yyy', 'zzz'],
 
1093
                     ['<<<<<<< ', 'xxx', 'aaa', '=======', 'yyy', 'zzz',
 
1094
                      '>>>>>>> '])
1027
1095
 
1028
1096
    def testNonClashInsert2(self):
1029
 
        self.doMerge([b'aaa'],
1030
 
                     [b'aaa'],
1031
 
                     [b'yyy', b'zzz'],
1032
 
                     [b'yyy', b'zzz'])
 
1097
        self.doMerge(['aaa'],
 
1098
                     ['aaa'],
 
1099
                     ['yyy', 'zzz'],
 
1100
                     ['yyy', 'zzz'])
 
1101
 
1033
1102
 
1034
1103
    def testDeleteAndModify(self):
1035
1104
        """Clashing delete and modification.
1042
1111
        # skippd, not working yet
1043
1112
        return
1044
1113
 
1045
 
        self.doMerge([b'aaa', b'bbb', b'ccc'],
1046
 
                     [b'aaa', b'ddd', b'ccc'],
1047
 
                     [b'aaa', b'ccc'],
1048
 
                     [b'<<<<<<<< ', b'aaa', b'=======', b'>>>>>>> ', b'ccc'])
 
1114
        self.doMerge(['aaa', 'bbb', 'ccc'],
 
1115
                     ['aaa', 'ddd', 'ccc'],
 
1116
                     ['aaa', 'ccc'],
 
1117
                     ['<<<<<<<< ', 'aaa', '=======', '>>>>>>> ', 'ccc'])
1049
1118
 
1050
1119
    def _test_merge_from_strings(self, base, a, b, expected):
1051
1120
        w = self.get_file()
1052
 
        w.add_lines(b'text0', [], base.splitlines(True))
1053
 
        w.add_lines(b'text1', [b'text0'], a.splitlines(True))
1054
 
        w.add_lines(b'text2', [b'text0'], b.splitlines(True))
 
1121
        w.add_lines('text0', [], base.splitlines(True))
 
1122
        w.add_lines('text1', ['text0'], a.splitlines(True))
 
1123
        w.add_lines('text2', ['text0'], b.splitlines(True))
1055
1124
        self.log('merge plan:')
1056
 
        p = list(w.plan_merge(b'text1', b'text2'))
 
1125
        p = list(w.plan_merge('text1', 'text2'))
1057
1126
        for state, line in p:
1058
1127
            if line:
1059
1128
                self.log('%12s | %s' % (state, line[:-1]))
1060
1129
        self.log('merge result:')
1061
 
        result_text = b''.join(w.weave_merge(p))
 
1130
        result_text = ''.join(w.weave_merge(p))
1062
1131
        self.log(result_text)
1063
1132
        self.assertEqualDiff(result_text, expected)
1064
1133
 
1065
1134
    def test_weave_merge_conflicts(self):
1066
1135
        # does weave merge properly handle plans that end with unchanged?
1067
 
        result = b''.join(self.get_file().weave_merge([('new-a', b'hello\n')]))
1068
 
        self.assertEqual(result, b'hello\n')
 
1136
        result = ''.join(self.get_file().weave_merge([('new-a', 'hello\n')]))
 
1137
        self.assertEqual(result, 'hello\n')
1069
1138
 
1070
1139
    def test_deletion_extended(self):
1071
1140
        """One side deletes, the other deletes more.
1072
1141
        """
1073
 
        base = b"""\
 
1142
        base = """\
1074
1143
            line 1
1075
1144
            line 2
1076
1145
            line 3
1077
1146
            """
1078
 
        a = b"""\
 
1147
        a = """\
1079
1148
            line 1
1080
1149
            line 2
1081
1150
            """
1082
 
        b = b"""\
 
1151
        b = """\
1083
1152
            line 1
1084
1153
            """
1085
 
        result = b"""\
 
1154
        result = """\
1086
1155
            line 1
1087
1156
<<<<<<<\x20
1088
1157
            line 2
1097
1166
        Arguably it'd be better to treat these as agreement, rather than
1098
1167
        conflict, but for now conflict is safer.
1099
1168
        """
1100
 
        base = b"""\
 
1169
        base = """\
1101
1170
            start context
1102
1171
            int a() {}
1103
1172
            int b() {}
1104
1173
            int c() {}
1105
1174
            end context
1106
1175
            """
1107
 
        a = b"""\
 
1176
        a = """\
1108
1177
            start context
1109
1178
            int a() {}
1110
1179
            end context
1111
1180
            """
1112
 
        b = b"""\
 
1181
        b = """\
1113
1182
            start context
1114
1183
            int c() {}
1115
1184
            end context
1116
1185
            """
1117
 
        result = b"""\
 
1186
        result = """\
1118
1187
            start context
1119
1188
<<<<<<<\x20
1120
1189
            int a() {}
1127
1196
 
1128
1197
    def test_agreement_deletion(self):
1129
1198
        """Agree to delete some lines, without conflicts."""
1130
 
        base = b"""\
 
1199
        base = """\
1131
1200
            start context
1132
1201
            base line 1
1133
1202
            base line 2
1134
1203
            end context
1135
1204
            """
1136
 
        a = b"""\
1137
 
            start context
1138
 
            base line 1
1139
 
            end context
1140
 
            """
1141
 
        b = b"""\
1142
 
            start context
1143
 
            base line 1
1144
 
            end context
1145
 
            """
1146
 
        result = b"""\
 
1205
        a = """\
 
1206
            start context
 
1207
            base line 1
 
1208
            end context
 
1209
            """
 
1210
        b = """\
 
1211
            start context
 
1212
            base line 1
 
1213
            end context
 
1214
            """
 
1215
        result = """\
1147
1216
            start context
1148
1217
            base line 1
1149
1218
            end context
1160
1229
 
1161
1230
        It's better to consider the whole thing as a disagreement region.
1162
1231
        """
1163
 
        base = b"""\
 
1232
        base = """\
1164
1233
            start context
1165
1234
            base line 1
1166
1235
            base line 2
1167
1236
            end context
1168
1237
            """
1169
 
        a = b"""\
 
1238
        a = """\
1170
1239
            start context
1171
1240
            base line 1
1172
1241
            a's replacement line 2
1173
1242
            end context
1174
1243
            """
1175
 
        b = b"""\
 
1244
        b = """\
1176
1245
            start context
1177
1246
            b replaces
1178
1247
            both lines
1179
1248
            end context
1180
1249
            """
1181
 
        result = b"""\
 
1250
        result = """\
1182
1251
            start context
1183
1252
<<<<<<<\x20
1184
1253
            base line 1
1195
1264
class TestWeaveMerge(TestCaseWithMemoryTransport, MergeCasesMixin):
1196
1265
 
1197
1266
    def get_file(self, name='foo'):
1198
 
        return WeaveFile(name, self.get_transport(),
1199
 
                         create=True)
 
1267
        return WeaveFile(name, get_transport(self.get_url('.')), create=True)
1200
1268
 
1201
1269
    def log_contents(self, w):
1202
1270
        self.log('weave is:')
1203
 
        tmpf = BytesIO()
 
1271
        tmpf = StringIO()
1204
1272
        write_weave(w, tmpf)
1205
1273
        self.log(tmpf.getvalue())
1206
1274
 
1207
 
    overlappedInsertExpected = [b'aaa', b'<<<<<<< ', b'xxx', b'yyy', b'=======',
1208
 
                                b'xxx', b'>>>>>>> ', b'bbb']
 
1275
    overlappedInsertExpected = ['aaa', '<<<<<<< ', 'xxx', 'yyy', '=======',
 
1276
                                'xxx', '>>>>>>> ', 'bbb']
1209
1277
 
1210
1278
 
1211
1279
class TestContentFactoryAdaption(TestCaseWithMemoryTransport):
1216
1284
        # Each is source_kind, requested_kind, adapter class
1217
1285
        scenarios = [
1218
1286
            ('knit-delta-gz', 'fulltext', _mod_knit.DeltaPlainToFullText),
1219
 
            ('knit-delta-gz', 'lines', _mod_knit.DeltaPlainToFullText),
1220
 
            ('knit-delta-gz', 'chunked', _mod_knit.DeltaPlainToFullText),
1221
1287
            ('knit-ft-gz', 'fulltext', _mod_knit.FTPlainToFullText),
1222
 
            ('knit-ft-gz', 'lines', _mod_knit.FTPlainToFullText),
1223
 
            ('knit-ft-gz', 'chunked', _mod_knit.FTPlainToFullText),
1224
1288
            ('knit-annotated-delta-gz', 'knit-delta-gz',
1225
1289
                _mod_knit.DeltaAnnotatedToUnannotated),
1226
1290
            ('knit-annotated-delta-gz', 'fulltext',
1229
1293
                _mod_knit.FTAnnotatedToUnannotated),
1230
1294
            ('knit-annotated-ft-gz', 'fulltext',
1231
1295
                _mod_knit.FTAnnotatedToFullText),
1232
 
            ('knit-annotated-ft-gz', 'lines',
1233
 
                _mod_knit.FTAnnotatedToFullText),
1234
 
            ('knit-annotated-ft-gz', 'chunked',
1235
 
                _mod_knit.FTAnnotatedToFullText),
1236
1296
            ]
1237
1297
        for source, requested, klass in scenarios:
1238
1298
            adapter_factory = versionedfile.adapter_registry.get(
1245
1305
        transport = self.get_transport()
1246
1306
        return make_file_factory(annotated, mapper)(transport)
1247
1307
 
1248
 
    def helpGetBytes(self, f, ft_name, ft_adapter, delta_name, delta_adapter):
 
1308
    def helpGetBytes(self, f, ft_adapter, delta_adapter):
1249
1309
        """Grab the interested adapted texts for tests."""
1250
1310
        # origin is a fulltext
1251
 
        entries = f.get_record_stream([(b'origin',)], 'unordered', False)
1252
 
        base = next(entries)
1253
 
        ft_data = ft_adapter.get_bytes(base, ft_name)
 
1311
        entries = f.get_record_stream([('origin',)], 'unordered', False)
 
1312
        base = entries.next()
 
1313
        ft_data = ft_adapter.get_bytes(base)
1254
1314
        # merged is both a delta and multiple parents.
1255
 
        entries = f.get_record_stream([(b'merged',)], 'unordered', False)
1256
 
        merged = next(entries)
1257
 
        delta_data = delta_adapter.get_bytes(merged, delta_name)
 
1315
        entries = f.get_record_stream([('merged',)], 'unordered', False)
 
1316
        merged = entries.next()
 
1317
        delta_data = delta_adapter.get_bytes(merged)
1258
1318
        return ft_data, delta_data
1259
1319
 
1260
1320
    def test_deannotation_noeol(self):
1262
1322
        # we need a full text, and a delta
1263
1323
        f = self.get_knit()
1264
1324
        get_diamond_files(f, 1, trailing_eol=False)
1265
 
        ft_data, delta_data = self.helpGetBytes(
1266
 
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1267
 
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
1268
 
        self.assertEqual(
1269
 
            b'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
1270
 
            b'origin\n'
1271
 
            b'end origin\n',
1272
 
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1273
 
        self.assertEqual(
1274
 
            b'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
1275
 
            b'1,2,3\nleft\nright\nmerged\nend merged\n',
1276
 
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
 
1325
        ft_data, delta_data = self.helpGetBytes(f,
 
1326
            _mod_knit.FTAnnotatedToUnannotated(None),
 
1327
            _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1328
        self.assertEqual(
 
1329
            'version origin 1 b284f94827db1fa2970d9e2014f080413b547a7e\n'
 
1330
            'origin\n'
 
1331
            'end origin\n',
 
1332
            GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
 
1333
        self.assertEqual(
 
1334
            'version merged 4 32c2e79763b3f90e8ccde37f9710b6629c25a796\n'
 
1335
            '1,2,3\nleft\nright\nmerged\nend merged\n',
 
1336
            GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1277
1337
 
1278
1338
    def test_deannotation(self):
1279
1339
        """Test converting annotated knits to unannotated knits."""
1280
1340
        # we need a full text, and a delta
1281
1341
        f = self.get_knit()
1282
1342
        get_diamond_files(f, 1)
1283
 
        ft_data, delta_data = self.helpGetBytes(
1284
 
            f, 'knit-ft-gz', _mod_knit.FTAnnotatedToUnannotated(None),
1285
 
            'knit-delta-gz', _mod_knit.DeltaAnnotatedToUnannotated(None))
1286
 
        self.assertEqual(
1287
 
            b'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
1288
 
            b'origin\n'
1289
 
            b'end origin\n',
1290
 
            GzipFile(mode='rb', fileobj=BytesIO(ft_data)).read())
1291
 
        self.assertEqual(
1292
 
            b'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
1293
 
            b'2,2,2\nright\nmerged\nend merged\n',
1294
 
            GzipFile(mode='rb', fileobj=BytesIO(delta_data)).read())
 
1343
        ft_data, delta_data = self.helpGetBytes(f,
 
1344
            _mod_knit.FTAnnotatedToUnannotated(None),
 
1345
            _mod_knit.DeltaAnnotatedToUnannotated(None))
 
1346
        self.assertEqual(
 
1347
            'version origin 1 00e364d235126be43292ab09cb4686cf703ddc17\n'
 
1348
            'origin\n'
 
1349
            'end origin\n',
 
1350
            GzipFile(mode='rb', fileobj=StringIO(ft_data)).read())
 
1351
        self.assertEqual(
 
1352
            'version merged 3 ed8bce375198ea62444dc71952b22cfc2b09226d\n'
 
1353
            '2,2,2\nright\nmerged\nend merged\n',
 
1354
            GzipFile(mode='rb', fileobj=StringIO(delta_data)).read())
1295
1355
 
1296
1356
    def test_annotated_to_fulltext_no_eol(self):
1297
1357
        """Test adapting annotated knits to full texts (for -> weaves)."""
1301
1361
        # Reconstructing a full text requires a backing versioned file, and it
1302
1362
        # must have the base lines requested from it.
1303
1363
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1304
 
        ft_data, delta_data = self.helpGetBytes(
1305
 
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
1306
 
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1307
 
        self.assertEqual(b'origin', ft_data)
1308
 
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
1309
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1310
 
                           True)], logged_vf.calls)
 
1364
        ft_data, delta_data = self.helpGetBytes(f,
 
1365
            _mod_knit.FTAnnotatedToFullText(None),
 
1366
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1367
        self.assertEqual('origin', ft_data)
 
1368
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
 
1369
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1370
            True)], logged_vf.calls)
1311
1371
 
1312
1372
    def test_annotated_to_fulltext(self):
1313
1373
        """Test adapting annotated knits to full texts (for -> weaves)."""
1317
1377
        # Reconstructing a full text requires a backing versioned file, and it
1318
1378
        # must have the base lines requested from it.
1319
1379
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1320
 
        ft_data, delta_data = self.helpGetBytes(
1321
 
            f, 'fulltext', _mod_knit.FTAnnotatedToFullText(None),
1322
 
            'fulltext', _mod_knit.DeltaAnnotatedToFullText(logged_vf))
1323
 
        self.assertEqual(b'origin\n', ft_data)
1324
 
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
1325
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1326
 
                           True)], logged_vf.calls)
 
1380
        ft_data, delta_data = self.helpGetBytes(f,
 
1381
            _mod_knit.FTAnnotatedToFullText(None),
 
1382
            _mod_knit.DeltaAnnotatedToFullText(logged_vf))
 
1383
        self.assertEqual('origin\n', ft_data)
 
1384
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
 
1385
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1386
            True)], logged_vf.calls)
1327
1387
 
1328
1388
    def test_unannotated_to_fulltext(self):
1329
1389
        """Test adapting unannotated knits to full texts.
1336
1396
        # Reconstructing a full text requires a backing versioned file, and it
1337
1397
        # must have the base lines requested from it.
1338
1398
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1339
 
        ft_data, delta_data = self.helpGetBytes(
1340
 
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
1341
 
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
1342
 
        self.assertEqual(b'origin\n', ft_data)
1343
 
        self.assertEqual(b'base\nleft\nright\nmerged\n', delta_data)
1344
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1345
 
                           True)], logged_vf.calls)
 
1399
        ft_data, delta_data = self.helpGetBytes(f,
 
1400
            _mod_knit.FTPlainToFullText(None),
 
1401
            _mod_knit.DeltaPlainToFullText(logged_vf))
 
1402
        self.assertEqual('origin\n', ft_data)
 
1403
        self.assertEqual('base\nleft\nright\nmerged\n', delta_data)
 
1404
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1405
            True)], logged_vf.calls)
1346
1406
 
1347
1407
    def test_unannotated_to_fulltext_no_eol(self):
1348
1408
        """Test adapting unannotated knits to full texts.
1355
1415
        # Reconstructing a full text requires a backing versioned file, and it
1356
1416
        # must have the base lines requested from it.
1357
1417
        logged_vf = versionedfile.RecordingVersionedFilesDecorator(f)
1358
 
        ft_data, delta_data = self.helpGetBytes(
1359
 
            f, 'fulltext', _mod_knit.FTPlainToFullText(None),
1360
 
            'fulltext', _mod_knit.DeltaPlainToFullText(logged_vf))
1361
 
        self.assertEqual(b'origin', ft_data)
1362
 
        self.assertEqual(b'base\nleft\nright\nmerged', delta_data)
1363
 
        self.assertEqual([('get_record_stream', [(b'left',)], 'unordered',
1364
 
                           True)], logged_vf.calls)
 
1418
        ft_data, delta_data = self.helpGetBytes(f,
 
1419
            _mod_knit.FTPlainToFullText(None),
 
1420
            _mod_knit.DeltaPlainToFullText(logged_vf))
 
1421
        self.assertEqual('origin', ft_data)
 
1422
        self.assertEqual('base\nleft\nright\nmerged', delta_data)
 
1423
        self.assertEqual([('get_record_stream', [('left',)], 'unordered',
 
1424
            True)], logged_vf.calls)
1365
1425
 
1366
1426
 
1367
1427
class TestKeyMapper(TestCaseWithMemoryTransport):
1369
1429
 
1370
1430
    def test_identity_mapper(self):
1371
1431
        mapper = versionedfile.ConstantMapper("inventory")
1372
 
        self.assertEqual("inventory", mapper.map((b'foo@ar',)))
1373
 
        self.assertEqual("inventory", mapper.map((b'quux',)))
 
1432
        self.assertEqual("inventory", mapper.map(('foo@ar',)))
 
1433
        self.assertEqual("inventory", mapper.map(('quux',)))
1374
1434
 
1375
1435
    def test_prefix_mapper(self):
1376
1436
        #format5: plain
1377
1437
        mapper = versionedfile.PrefixMapper()
1378
 
        self.assertEqual("file-id", mapper.map((b"file-id", b"revision-id")))
1379
 
        self.assertEqual("new-id", mapper.map((b"new-id", b"revision-id")))
1380
 
        self.assertEqual((b'file-id',), mapper.unmap("file-id"))
1381
 
        self.assertEqual((b'new-id',), mapper.unmap("new-id"))
 
1438
        self.assertEqual("file-id", mapper.map(("file-id", "revision-id")))
 
1439
        self.assertEqual("new-id", mapper.map(("new-id", "revision-id")))
 
1440
        self.assertEqual(('file-id',), mapper.unmap("file-id"))
 
1441
        self.assertEqual(('new-id',), mapper.unmap("new-id"))
1382
1442
 
1383
1443
    def test_hash_prefix_mapper(self):
1384
1444
        #format6: hash + plain
1385
1445
        mapper = versionedfile.HashPrefixMapper()
1386
 
        self.assertEqual(
1387
 
            "9b/file-id", mapper.map((b"file-id", b"revision-id")))
1388
 
        self.assertEqual("45/new-id", mapper.map((b"new-id", b"revision-id")))
1389
 
        self.assertEqual((b'file-id',), mapper.unmap("9b/file-id"))
1390
 
        self.assertEqual((b'new-id',), mapper.unmap("45/new-id"))
 
1446
        self.assertEqual("9b/file-id", mapper.map(("file-id", "revision-id")))
 
1447
        self.assertEqual("45/new-id", mapper.map(("new-id", "revision-id")))
 
1448
        self.assertEqual(('file-id',), mapper.unmap("9b/file-id"))
 
1449
        self.assertEqual(('new-id',), mapper.unmap("45/new-id"))
1391
1450
 
1392
1451
    def test_hash_escaped_mapper(self):
1393
1452
        #knit1: hash + escaped
1394
1453
        mapper = versionedfile.HashEscapedPrefixMapper()
1395
 
        self.assertEqual("88/%2520", mapper.map((b" ", b"revision-id")))
1396
 
        self.assertEqual("ed/fil%2545-%2549d", mapper.map((b"filE-Id",
1397
 
                                                           b"revision-id")))
1398
 
        self.assertEqual("88/ne%2557-%2549d", mapper.map((b"neW-Id",
1399
 
                                                          b"revision-id")))
1400
 
        self.assertEqual((b'filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
1401
 
        self.assertEqual((b'neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
 
1454
        self.assertEqual("88/%2520", mapper.map((" ", "revision-id")))
 
1455
        self.assertEqual("ed/fil%2545-%2549d", mapper.map(("filE-Id",
 
1456
            "revision-id")))
 
1457
        self.assertEqual("88/ne%2557-%2549d", mapper.map(("neW-Id",
 
1458
            "revision-id")))
 
1459
        self.assertEqual(('filE-Id',), mapper.unmap("ed/fil%2545-%2549d"))
 
1460
        self.assertEqual(('neW-Id',), mapper.unmap("88/ne%2557-%2549d"))
1402
1461
 
1403
1462
 
1404
1463
class TestVersionedFiles(TestCaseWithMemoryTransport):
1405
1464
    """Tests for the multiple-file variant of VersionedFile."""
1406
1465
 
1407
 
    # We want to be sure of behaviour for:
1408
 
    # weaves prefix layout (weave texts)
1409
 
    # individually named weaves (weave inventories)
1410
 
    # annotated knits - prefix|hash|hash-escape layout, we test the third only
1411
 
    #                   as it is the most complex mapper.
1412
 
    # individually named knits
1413
 
    # individual no-graph knits in packs (signatures)
1414
 
    # individual graph knits in packs (inventories)
1415
 
    # individual graph nocompression knits in packs (revisions)
1416
 
    # plain text knits in packs (texts)
1417
 
    len_one_scenarios = [
1418
 
        ('weave-named', {
1419
 
            'cleanup': None,
1420
 
            'factory': make_versioned_files_factory(WeaveFile,
1421
 
                                                    ConstantMapper('inventory')),
1422
 
            'graph': True,
1423
 
            'key_length': 1,
1424
 
            'support_partial_insertion': False,
1425
 
            }),
1426
 
        ('named-knit', {
1427
 
            'cleanup': None,
1428
 
            'factory': make_file_factory(False, ConstantMapper('revisions')),
1429
 
            'graph': True,
1430
 
            'key_length': 1,
1431
 
            'support_partial_insertion': False,
1432
 
            }),
1433
 
        ('named-nograph-nodelta-knit-pack', {
1434
 
            'cleanup': cleanup_pack_knit,
1435
 
            'factory': make_pack_factory(False, False, 1),
1436
 
            'graph': False,
1437
 
            'key_length': 1,
1438
 
            'support_partial_insertion': False,
1439
 
            }),
1440
 
        ('named-graph-knit-pack', {
1441
 
            'cleanup': cleanup_pack_knit,
1442
 
            'factory': make_pack_factory(True, True, 1),
1443
 
            'graph': True,
1444
 
            'key_length': 1,
1445
 
            'support_partial_insertion': True,
1446
 
            }),
1447
 
        ('named-graph-nodelta-knit-pack', {
1448
 
            'cleanup': cleanup_pack_knit,
1449
 
            'factory': make_pack_factory(True, False, 1),
1450
 
            'graph': True,
1451
 
            'key_length': 1,
1452
 
            'support_partial_insertion': False,
1453
 
            }),
1454
 
        ('groupcompress-nograph', {
1455
 
            'cleanup': groupcompress.cleanup_pack_group,
1456
 
            'factory': groupcompress.make_pack_factory(False, False, 1),
1457
 
            'graph': False,
1458
 
            'key_length': 1,
1459
 
            'support_partial_insertion': False,
1460
 
            }),
1461
 
        ]
1462
 
    len_two_scenarios = [
1463
 
        ('weave-prefix', {
1464
 
            'cleanup': None,
1465
 
            'factory': make_versioned_files_factory(WeaveFile,
1466
 
                                                    PrefixMapper()),
1467
 
            'graph': True,
1468
 
            'key_length': 2,
1469
 
            'support_partial_insertion': False,
1470
 
            }),
1471
 
        ('annotated-knit-escape', {
1472
 
            'cleanup': None,
1473
 
            'factory': make_file_factory(True, HashEscapedPrefixMapper()),
1474
 
            'graph': True,
1475
 
            'key_length': 2,
1476
 
            'support_partial_insertion': False,
1477
 
            }),
1478
 
        ('plain-knit-pack', {
1479
 
            'cleanup': cleanup_pack_knit,
1480
 
            'factory': make_pack_factory(True, True, 2),
1481
 
            'graph': True,
1482
 
            'key_length': 2,
1483
 
            'support_partial_insertion': True,
1484
 
            }),
1485
 
        ('groupcompress', {
1486
 
            'cleanup': groupcompress.cleanup_pack_group,
1487
 
            'factory': groupcompress.make_pack_factory(True, False, 1),
1488
 
            'graph': True,
1489
 
            'key_length': 1,
1490
 
            'support_partial_insertion': False,
1491
 
            }),
1492
 
        ]
1493
 
 
1494
 
    scenarios = len_one_scenarios + len_two_scenarios
1495
 
 
1496
1466
    def get_versionedfiles(self, relpath='files'):
1497
1467
        transport = self.get_transport(relpath)
1498
1468
        if relpath != '.':
1507
1477
        if self.key_length == 1:
1508
1478
            return (suffix,)
1509
1479
        else:
1510
 
            return (b'FileA',) + (suffix,)
1511
 
 
1512
 
    def test_add_fallback_implies_without_fallbacks(self):
1513
 
        f = self.get_versionedfiles('files')
1514
 
        if getattr(f, 'add_fallback_versioned_files', None) is None:
1515
 
            raise TestNotApplicable("%s doesn't support fallbacks"
1516
 
                                    % (f.__class__.__name__,))
1517
 
        g = self.get_versionedfiles('fallback')
1518
 
        key_a = self.get_simple_key(b'a')
1519
 
        g.add_lines(key_a, [], [b'\n'])
1520
 
        f.add_fallback_versioned_files(g)
1521
 
        self.assertTrue(key_a in f.get_parent_map([key_a]))
1522
 
        self.assertFalse(
1523
 
            key_a in f.without_fallbacks().get_parent_map([key_a]))
 
1480
            return ('FileA',) + (suffix,)
1524
1481
 
1525
1482
    def test_add_lines(self):
1526
1483
        f = self.get_versionedfiles()
1527
 
        key0 = self.get_simple_key(b'r0')
1528
 
        key1 = self.get_simple_key(b'r1')
1529
 
        key2 = self.get_simple_key(b'r2')
1530
 
        keyf = self.get_simple_key(b'foo')
1531
 
        f.add_lines(key0, [], [b'a\n', b'b\n'])
 
1484
        key0 = self.get_simple_key('r0')
 
1485
        key1 = self.get_simple_key('r1')
 
1486
        key2 = self.get_simple_key('r2')
 
1487
        keyf = self.get_simple_key('foo')
 
1488
        f.add_lines(key0, [], ['a\n', 'b\n'])
1532
1489
        if self.graph:
1533
 
            f.add_lines(key1, [key0], [b'b\n', b'c\n'])
 
1490
            f.add_lines(key1, [key0], ['b\n', 'c\n'])
1534
1491
        else:
1535
 
            f.add_lines(key1, [], [b'b\n', b'c\n'])
 
1492
            f.add_lines(key1, [], ['b\n', 'c\n'])
1536
1493
        keys = f.keys()
1537
1494
        self.assertTrue(key0 in keys)
1538
1495
        self.assertTrue(key1 in keys)
1540
1497
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1541
1498
            records.append((record.key, record.get_bytes_as('fulltext')))
1542
1499
        records.sort()
1543
 
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
 
1500
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1544
1501
 
1545
 
    def test_add_chunks(self):
 
1502
    def test__add_text(self):
1546
1503
        f = self.get_versionedfiles()
1547
 
        key0 = self.get_simple_key(b'r0')
1548
 
        key1 = self.get_simple_key(b'r1')
1549
 
        key2 = self.get_simple_key(b'r2')
1550
 
        keyf = self.get_simple_key(b'foo')
1551
 
        def add_chunks(key, parents, chunks):
1552
 
            factory = ChunkedContentFactory(
1553
 
                key, parents, osutils.sha_strings(chunks), chunks)
1554
 
            return f.add_content(factory)
1555
 
 
1556
 
        add_chunks(key0, [], [b'a', b'\nb\n'])
 
1504
        key0 = self.get_simple_key('r0')
 
1505
        key1 = self.get_simple_key('r1')
 
1506
        key2 = self.get_simple_key('r2')
 
1507
        keyf = self.get_simple_key('foo')
 
1508
        f._add_text(key0, [], 'a\nb\n')
1557
1509
        if self.graph:
1558
 
            add_chunks(key1, [key0], [b'b', b'\n', b'c\n'])
 
1510
            f._add_text(key1, [key0], 'b\nc\n')
1559
1511
        else:
1560
 
            add_chunks(key1, [], [b'b\n', b'c\n'])
 
1512
            f._add_text(key1, [], 'b\nc\n')
1561
1513
        keys = f.keys()
1562
 
        self.assertIn(key0, keys)
1563
 
        self.assertIn(key1, keys)
 
1514
        self.assertTrue(key0 in keys)
 
1515
        self.assertTrue(key1 in keys)
1564
1516
        records = []
1565
1517
        for record in f.get_record_stream([key0, key1], 'unordered', True):
1566
1518
            records.append((record.key, record.get_bytes_as('fulltext')))
1567
1519
        records.sort()
1568
 
        self.assertEqual([(key0, b'a\nb\n'), (key1, b'b\nc\n')], records)
 
1520
        self.assertEqual([(key0, 'a\nb\n'), (key1, 'b\nc\n')], records)
1569
1521
 
1570
1522
    def test_annotate(self):
1571
1523
        files = self.get_versionedfiles()
1573
1525
        if self.key_length == 1:
1574
1526
            prefix = ()
1575
1527
        else:
1576
 
            prefix = (b'FileA',)
 
1528
            prefix = ('FileA',)
1577
1529
        # introduced full text
1578
 
        origins = files.annotate(prefix + (b'origin',))
 
1530
        origins = files.annotate(prefix + ('origin',))
1579
1531
        self.assertEqual([
1580
 
            (prefix + (b'origin',), b'origin\n')],
 
1532
            (prefix + ('origin',), 'origin\n')],
1581
1533
            origins)
1582
1534
        # a delta
1583
 
        origins = files.annotate(prefix + (b'base',))
 
1535
        origins = files.annotate(prefix + ('base',))
1584
1536
        self.assertEqual([
1585
 
            (prefix + (b'base',), b'base\n')],
 
1537
            (prefix + ('base',), 'base\n')],
1586
1538
            origins)
1587
1539
        # a merge
1588
 
        origins = files.annotate(prefix + (b'merged',))
 
1540
        origins = files.annotate(prefix + ('merged',))
1589
1541
        if self.graph:
1590
1542
            self.assertEqual([
1591
 
                (prefix + (b'base',), b'base\n'),
1592
 
                (prefix + (b'left',), b'left\n'),
1593
 
                (prefix + (b'right',), b'right\n'),
1594
 
                (prefix + (b'merged',), b'merged\n')
 
1543
                (prefix + ('base',), 'base\n'),
 
1544
                (prefix + ('left',), 'left\n'),
 
1545
                (prefix + ('right',), 'right\n'),
 
1546
                (prefix + ('merged',), 'merged\n')
1595
1547
                ],
1596
1548
                origins)
1597
1549
        else:
1598
1550
            # Without a graph everything is new.
1599
1551
            self.assertEqual([
1600
 
                (prefix + (b'merged',), b'base\n'),
1601
 
                (prefix + (b'merged',), b'left\n'),
1602
 
                (prefix + (b'merged',), b'right\n'),
1603
 
                (prefix + (b'merged',), b'merged\n')
 
1552
                (prefix + ('merged',), 'base\n'),
 
1553
                (prefix + ('merged',), 'left\n'),
 
1554
                (prefix + ('merged',), 'right\n'),
 
1555
                (prefix + ('merged',), 'merged\n')
1604
1556
                ],
1605
1557
                origins)
1606
1558
        self.assertRaises(RevisionNotPresent,
1607
 
                          files.annotate, prefix + ('missing-key',))
 
1559
            files.annotate, prefix + ('missing-key',))
1608
1560
 
1609
1561
    def test_check_no_parameters(self):
1610
1562
        files = self.get_versionedfiles()
1624
1576
        seen = set()
1625
1577
        # Texts output should be fulltexts.
1626
1578
        self.capture_stream(files, entries, seen.add,
1627
 
                            files.get_parent_map(keys), require_fulltext=True)
 
1579
            files.get_parent_map(keys), require_fulltext=True)
1628
1580
        # All texts should be output.
1629
1581
        self.assertEqual(set(keys), seen)
1630
1582
 
1637
1589
        files = self.get_versionedfiles()
1638
1590
 
1639
1591
    def get_diamond_files(self, files, trailing_eol=True, left_only=False,
1640
 
                          nokeys=False):
 
1592
        nokeys=False):
1641
1593
        return get_diamond_files(files, self.key_length,
1642
 
                                 trailing_eol=trailing_eol, nograph=not self.graph,
1643
 
                                 left_only=left_only, nokeys=nokeys)
 
1594
            trailing_eol=trailing_eol, nograph=not self.graph,
 
1595
            left_only=left_only, nokeys=nokeys)
1644
1596
 
1645
1597
    def _add_content_nostoresha(self, add_lines):
1646
1598
        """When nostore_sha is supplied using old content raises."""
1647
1599
        vf = self.get_versionedfiles()
1648
 
        empty_text = (b'a', [])
1649
 
        sample_text_nl = (b'b', [b"foo\n", b"bar\n"])
1650
 
        sample_text_no_nl = (b'c', [b"foo\n", b"bar"])
 
1600
        empty_text = ('a', [])
 
1601
        sample_text_nl = ('b', ["foo\n", "bar\n"])
 
1602
        sample_text_no_nl = ('c', ["foo\n", "bar"])
1651
1603
        shas = []
1652
1604
        for version, lines in (empty_text, sample_text_nl, sample_text_no_nl):
1653
1605
            if add_lines:
1654
1606
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1655
1607
                                         lines)
1656
1608
            else:
1657
 
                sha, _, _ = vf.add_lines(self.get_simple_key(version), [],
1658
 
                                         lines)
 
1609
                sha, _, _ = vf._add_text(self.get_simple_key(version), [],
 
1610
                                         ''.join(lines))
1659
1611
            shas.append(sha)
1660
1612
        # we now have a copy of all the lines in the vf.
1661
1613
        for sha, (version, lines) in zip(
1662
 
                shas, (empty_text, sample_text_nl, sample_text_no_nl)):
1663
 
            new_key = self.get_simple_key(version + b"2")
1664
 
            self.assertRaises(errors.ExistingContent,
1665
 
                              vf.add_lines, new_key, [], lines,
1666
 
                              nostore_sha=sha)
1667
 
            self.assertRaises(errors.ExistingContent,
1668
 
                              vf.add_lines, new_key, [], lines,
1669
 
                              nostore_sha=sha)
 
1614
            shas, (empty_text, sample_text_nl, sample_text_no_nl)):
 
1615
            new_key = self.get_simple_key(version + "2")
 
1616
            self.assertRaises(errors.ExistingContent,
 
1617
                vf.add_lines, new_key, [], lines,
 
1618
                nostore_sha=sha)
 
1619
            self.assertRaises(errors.ExistingContent,
 
1620
                vf._add_text, new_key, [], ''.join(lines),
 
1621
                nostore_sha=sha)
1670
1622
            # and no new version should have been added.
1671
 
            record = next(vf.get_record_stream([new_key], 'unordered', True))
 
1623
            record = vf.get_record_stream([new_key], 'unordered', True).next()
1672
1624
            self.assertEqual('absent', record.storage_kind)
1673
1625
 
1674
1626
    def test_add_lines_nostoresha(self):
1675
1627
        self._add_content_nostoresha(add_lines=True)
1676
1628
 
 
1629
    def test__add_text_nostoresha(self):
 
1630
        self._add_content_nostoresha(add_lines=False)
 
1631
 
1677
1632
    def test_add_lines_return(self):
1678
1633
        files = self.get_versionedfiles()
1679
1634
        # save code by using the stock data insertion helper.
1685
1640
            results.append(add[:2])
1686
1641
        if self.key_length == 1:
1687
1642
            self.assertEqual([
1688
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1689
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1690
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1691
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1692
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1643
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1644
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1645
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1646
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1647
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1693
1648
                results)
1694
1649
        elif self.key_length == 2:
1695
1650
            self.assertEqual([
1696
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1697
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1698
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1699
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1700
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1701
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1702
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1703
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1704
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1705
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1651
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1652
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1653
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1654
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1655
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1656
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1657
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1658
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1659
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1660
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1706
1661
                results)
1707
1662
 
1708
1663
    def test_add_lines_no_key_generates_chk_key(self):
1716
1671
            results.append(add[:2])
1717
1672
        if self.key_length == 1:
1718
1673
            self.assertEqual([
1719
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1720
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1721
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1722
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1723
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1674
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1675
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1676
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1677
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1678
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1724
1679
                results)
1725
1680
            # Check the added items got CHK keys.
1726
 
            self.assertEqual({
1727
 
                (b'sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
1728
 
                (b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
1729
 
                (b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
1730
 
                (b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
1731
 
                (b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
1732
 
                },
 
1681
            self.assertEqual(set([
 
1682
                ('sha1:00e364d235126be43292ab09cb4686cf703ddc17',),
 
1683
                ('sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',),
 
1684
                ('sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',),
 
1685
                ('sha1:a8478686da38e370e32e42e8a0c220e33ee9132f',),
 
1686
                ('sha1:ed8bce375198ea62444dc71952b22cfc2b09226d',),
 
1687
                ]),
1733
1688
                files.keys())
1734
1689
        elif self.key_length == 2:
1735
1690
            self.assertEqual([
1736
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1737
 
                (b'00e364d235126be43292ab09cb4686cf703ddc17', 7),
1738
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1739
 
                (b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
1740
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1741
 
                (b'a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
1742
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1743
 
                (b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
1744
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
1745
 
                (b'ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
 
1691
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1692
                ('00e364d235126be43292ab09cb4686cf703ddc17', 7),
 
1693
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1694
                ('51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44', 5),
 
1695
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1696
                ('a8478686da38e370e32e42e8a0c220e33ee9132f', 10),
 
1697
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1698
                ('9ef09dfa9d86780bdec9219a22560c6ece8e0ef1', 11),
 
1699
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23),
 
1700
                ('ed8bce375198ea62444dc71952b22cfc2b09226d', 23)],
1746
1701
                results)
1747
1702
            # Check the added items got CHK keys.
1748
 
            self.assertEqual({
1749
 
                (b'FileA', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1750
 
                (b'FileA', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1751
 
                (b'FileA', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1752
 
                (b'FileA', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1753
 
                (b'FileA', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1754
 
                (b'FileB', b'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
1755
 
                (b'FileB', b'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
1756
 
                (b'FileB', b'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
1757
 
                (b'FileB', b'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
1758
 
                (b'FileB', b'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
1759
 
                },
 
1703
            self.assertEqual(set([
 
1704
                ('FileA', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1705
                ('FileA', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1706
                ('FileA', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1707
                ('FileA', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1708
                ('FileA', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1709
                ('FileB', 'sha1:00e364d235126be43292ab09cb4686cf703ddc17'),
 
1710
                ('FileB', 'sha1:51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44'),
 
1711
                ('FileB', 'sha1:9ef09dfa9d86780bdec9219a22560c6ece8e0ef1'),
 
1712
                ('FileB', 'sha1:a8478686da38e370e32e42e8a0c220e33ee9132f'),
 
1713
                ('FileB', 'sha1:ed8bce375198ea62444dc71952b22cfc2b09226d'),
 
1714
                ]),
1760
1715
                files.keys())
1761
1716
 
1762
1717
    def test_empty_lines(self):
1763
1718
        """Empty files can be stored."""
1764
1719
        f = self.get_versionedfiles()
1765
 
        key_a = self.get_simple_key(b'a')
 
1720
        key_a = self.get_simple_key('a')
1766
1721
        f.add_lines(key_a, [], [])
1767
 
        self.assertEqual(b'',
1768
 
                         next(f.get_record_stream([key_a], 'unordered', True
1769
 
                                                  )).get_bytes_as('fulltext'))
1770
 
        key_b = self.get_simple_key(b'b')
 
1722
        self.assertEqual('',
 
1723
            f.get_record_stream([key_a], 'unordered', True
 
1724
                ).next().get_bytes_as('fulltext'))
 
1725
        key_b = self.get_simple_key('b')
1771
1726
        f.add_lines(key_b, self.get_parents([key_a]), [])
1772
 
        self.assertEqual(b'',
1773
 
                         next(f.get_record_stream([key_b], 'unordered', True
1774
 
                                                  )).get_bytes_as('fulltext'))
 
1727
        self.assertEqual('',
 
1728
            f.get_record_stream([key_b], 'unordered', True
 
1729
                ).next().get_bytes_as('fulltext'))
1775
1730
 
1776
1731
    def test_newline_only(self):
1777
1732
        f = self.get_versionedfiles()
1778
 
        key_a = self.get_simple_key(b'a')
1779
 
        f.add_lines(key_a, [], [b'\n'])
1780
 
        self.assertEqual(b'\n',
1781
 
                         next(f.get_record_stream([key_a], 'unordered', True
1782
 
                                                  )).get_bytes_as('fulltext'))
1783
 
        key_b = self.get_simple_key(b'b')
1784
 
        f.add_lines(key_b, self.get_parents([key_a]), [b'\n'])
1785
 
        self.assertEqual(b'\n',
1786
 
                         next(f.get_record_stream([key_b], 'unordered', True
1787
 
                                                  )).get_bytes_as('fulltext'))
 
1733
        key_a = self.get_simple_key('a')
 
1734
        f.add_lines(key_a, [], ['\n'])
 
1735
        self.assertEqual('\n',
 
1736
            f.get_record_stream([key_a], 'unordered', True
 
1737
                ).next().get_bytes_as('fulltext'))
 
1738
        key_b = self.get_simple_key('b')
 
1739
        f.add_lines(key_b, self.get_parents([key_a]), ['\n'])
 
1740
        self.assertEqual('\n',
 
1741
            f.get_record_stream([key_b], 'unordered', True
 
1742
                ).next().get_bytes_as('fulltext'))
1788
1743
 
1789
1744
    def test_get_known_graph_ancestry(self):
1790
1745
        f = self.get_versionedfiles()
1791
1746
        if not self.graph:
1792
1747
            raise TestNotApplicable('ancestry info only relevant with graph.')
1793
 
        key_a = self.get_simple_key(b'a')
1794
 
        key_b = self.get_simple_key(b'b')
1795
 
        key_c = self.get_simple_key(b'c')
 
1748
        key_a = self.get_simple_key('a')
 
1749
        key_b = self.get_simple_key('b')
 
1750
        key_c = self.get_simple_key('c')
1796
1751
        # A
1797
1752
        # |\
1798
1753
        # | B
1799
1754
        # |/
1800
1755
        # C
1801
 
        f.add_lines(key_a, [], [b'\n'])
1802
 
        f.add_lines(key_b, [key_a], [b'\n'])
1803
 
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
 
1756
        f.add_lines(key_a, [], ['\n'])
 
1757
        f.add_lines(key_b, [key_a], ['\n'])
 
1758
        f.add_lines(key_c, [key_a, key_b], ['\n'])
1804
1759
        kg = f.get_known_graph_ancestry([key_c])
1805
1760
        self.assertIsInstance(kg, _mod_graph.KnownGraph)
1806
1761
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1812
1767
        if getattr(f, 'add_fallback_versioned_files', None) is None:
1813
1768
            raise TestNotApplicable("%s doesn't support fallbacks"
1814
1769
                                    % (f.__class__.__name__,))
1815
 
        key_a = self.get_simple_key(b'a')
1816
 
        key_b = self.get_simple_key(b'b')
1817
 
        key_c = self.get_simple_key(b'c')
 
1770
        key_a = self.get_simple_key('a')
 
1771
        key_b = self.get_simple_key('b')
 
1772
        key_c = self.get_simple_key('c')
1818
1773
        # A     only in fallback
1819
1774
        # |\
1820
1775
        # | B
1821
1776
        # |/
1822
1777
        # C
1823
1778
        g = self.get_versionedfiles('fallback')
1824
 
        g.add_lines(key_a, [], [b'\n'])
 
1779
        g.add_lines(key_a, [], ['\n'])
1825
1780
        f.add_fallback_versioned_files(g)
1826
 
        f.add_lines(key_b, [key_a], [b'\n'])
1827
 
        f.add_lines(key_c, [key_a, key_b], [b'\n'])
 
1781
        f.add_lines(key_b, [key_a], ['\n'])
 
1782
        f.add_lines(key_c, [key_a, key_b], ['\n'])
1828
1783
        kg = f.get_known_graph_ancestry([key_c])
1829
1784
        self.assertEqual([key_a, key_b, key_c], list(kg.topo_sort()))
1830
1785
 
1837
1792
    def assertValidStorageKind(self, storage_kind):
1838
1793
        """Assert that storage_kind is a valid storage_kind."""
1839
1794
        self.assertSubset([storage_kind],
1840
 
                          ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
1841
 
                           'knit-ft', 'knit-delta', 'chunked', 'fulltext',
1842
 
                           'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
1843
 
                           'knit-delta-gz',
1844
 
                           'knit-delta-closure', 'knit-delta-closure-ref',
1845
 
                           'groupcompress-block', 'groupcompress-block-ref'])
 
1795
            ['mpdiff', 'knit-annotated-ft', 'knit-annotated-delta',
 
1796
             'knit-ft', 'knit-delta', 'chunked', 'fulltext',
 
1797
             'knit-annotated-ft-gz', 'knit-annotated-delta-gz', 'knit-ft-gz',
 
1798
             'knit-delta-gz',
 
1799
             'knit-delta-closure', 'knit-delta-closure-ref',
 
1800
             'groupcompress-block', 'groupcompress-block-ref'])
1846
1801
 
1847
1802
    def capture_stream(self, f, entries, on_seen, parents,
1848
 
                       require_fulltext=False):
 
1803
        require_fulltext=False):
1849
1804
        """Capture a stream for testing."""
1850
1805
        for factory in entries:
1851
1806
            on_seen(factory.key)
1852
1807
            self.assertValidStorageKind(factory.storage_kind)
1853
1808
            if factory.sha1 is not None:
1854
1809
                self.assertEqual(f.get_sha1s([factory.key])[factory.key],
1855
 
                                 factory.sha1)
 
1810
                    factory.sha1)
1856
1811
            self.assertEqual(parents[factory.key], factory.parents)
1857
1812
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
1858
 
                                  bytes)
 
1813
                str)
1859
1814
            if require_fulltext:
1860
1815
                factory.get_bytes_as('fulltext')
1861
1816
 
1873
1828
    def get_keys_and_sort_order(self):
1874
1829
        """Get diamond test keys list, and their sort ordering."""
1875
1830
        if self.key_length == 1:
1876
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1877
 
            sort_order = {(b'merged',): 2, (b'left',): 1,
1878
 
                          (b'right',): 1, (b'base',): 0}
 
1831
            keys = [('merged',), ('left',), ('right',), ('base',)]
 
1832
            sort_order = {('merged',):2, ('left',):1, ('right',):1, ('base',):0}
1879
1833
        else:
1880
1834
            keys = [
1881
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1882
 
                (b'FileA', b'base'),
1883
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1884
 
                (b'FileB', b'base'),
 
1835
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1836
                ('FileA', 'base'),
 
1837
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1838
                ('FileB', 'base'),
1885
1839
                ]
1886
1840
            sort_order = {
1887
 
                (b'FileA', b'merged'): 2, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1888
 
                (b'FileA', b'base'): 0,
1889
 
                (b'FileB', b'merged'): 2, (b'FileB', b'left'): 1, (b'FileB', b'right'): 1,
1890
 
                (b'FileB', b'base'): 0,
 
1841
                ('FileA', 'merged'):2, ('FileA', 'left'):1, ('FileA', 'right'):1,
 
1842
                ('FileA', 'base'):0,
 
1843
                ('FileB', 'merged'):2, ('FileB', 'left'):1, ('FileB', 'right'):1,
 
1844
                ('FileB', 'base'):0,
1891
1845
                }
1892
1846
        return keys, sort_order
1893
1847
 
1894
1848
    def get_keys_and_groupcompress_sort_order(self):
1895
1849
        """Get diamond test keys list, and their groupcompress sort ordering."""
1896
1850
        if self.key_length == 1:
1897
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
1898
 
            sort_order = {(b'merged',): 0, (b'left',): 1,
1899
 
                          (b'right',): 1, (b'base',): 2}
 
1851
            keys = [('merged',), ('left',), ('right',), ('base',)]
 
1852
            sort_order = {('merged',):0, ('left',):1, ('right',):1, ('base',):2}
1900
1853
        else:
1901
1854
            keys = [
1902
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1903
 
                (b'FileA', b'base'),
1904
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1905
 
                (b'FileB', b'base'),
 
1855
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1856
                ('FileA', 'base'),
 
1857
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1858
                ('FileB', 'base'),
1906
1859
                ]
1907
1860
            sort_order = {
1908
 
                (b'FileA', b'merged'): 0, (b'FileA', b'left'): 1, (b'FileA', b'right'): 1,
1909
 
                (b'FileA', b'base'): 2,
1910
 
                (b'FileB', b'merged'): 3, (b'FileB', b'left'): 4, (b'FileB', b'right'): 4,
1911
 
                (b'FileB', b'base'): 5,
 
1861
                ('FileA', 'merged'):0, ('FileA', 'left'):1, ('FileA', 'right'):1,
 
1862
                ('FileA', 'base'):2,
 
1863
                ('FileB', 'merged'):3, ('FileB', 'left'):4, ('FileB', 'right'):4,
 
1864
                ('FileB', 'base'):5,
1912
1865
                }
1913
1866
        return keys, sort_order
1914
1867
 
1935
1888
            seen.append(factory.key)
1936
1889
            self.assertValidStorageKind(factory.storage_kind)
1937
1890
            self.assertSubset([factory.sha1],
1938
 
                              [None, files.get_sha1s([factory.key])[factory.key]])
 
1891
                [None, files.get_sha1s([factory.key])[factory.key]])
1939
1892
            self.assertEqual(parent_map[factory.key], factory.parents)
1940
1893
            # self.assertEqual(files.get_text(factory.key),
1941
1894
            ft_bytes = factory.get_bytes_as('fulltext')
1942
 
            self.assertIsInstance(ft_bytes, bytes)
 
1895
            self.assertIsInstance(ft_bytes, str)
1943
1896
            chunked_bytes = factory.get_bytes_as('chunked')
1944
 
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
1945
 
            chunked_bytes = factory.iter_bytes_as('chunked')
1946
 
            self.assertEqualDiff(ft_bytes, b''.join(chunked_bytes))
 
1897
            self.assertEqualDiff(ft_bytes, ''.join(chunked_bytes))
1947
1898
 
1948
1899
        self.assertStreamOrder(sort_order, seen, keys)
1949
1900
 
1961
1912
    def assertStreamOrder(self, sort_order, seen, keys):
1962
1913
        self.assertEqual(len(set(seen)), len(keys))
1963
1914
        if self.key_length == 1:
1964
 
            lows = {(): 0}
 
1915
            lows = {():0}
1965
1916
        else:
1966
 
            lows = {(b'FileA',): 0, (b'FileB',): 0}
 
1917
            lows = {('FileA',):0, ('FileB',):0}
1967
1918
        if not self.graph:
1968
1919
            self.assertEqual(set(keys), set(seen))
1969
1920
        else:
1970
1921
            for key in seen:
1971
1922
                sort_pos = sort_order[key]
1972
1923
                self.assertTrue(sort_pos >= lows[key[:-1]],
1973
 
                                "Out of order in sorted stream: %r, %r" % (key, seen))
 
1924
                    "Out of order in sorted stream: %r, %r" % (key, seen))
1974
1925
                lows[key[:-1]] = sort_pos
1975
1926
 
1976
1927
    def test_get_record_stream_unknown_storage_kind_raises(self):
1978
1929
        files = self.get_versionedfiles()
1979
1930
        self.get_diamond_files(files)
1980
1931
        if self.key_length == 1:
1981
 
            keys = [(b'merged',), (b'left',), (b'right',), (b'base',)]
 
1932
            keys = [('merged',), ('left',), ('right',), ('base',)]
1982
1933
        else:
1983
1934
            keys = [
1984
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
1985
 
                (b'FileA', b'base'),
1986
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
1987
 
                (b'FileB', b'base'),
 
1935
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1936
                ('FileA', 'base'),
 
1937
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1938
                ('FileB', 'base'),
1988
1939
                ]
1989
1940
        parent_map = files.get_parent_map(keys)
1990
1941
        entries = files.get_record_stream(keys, 'unordered', False)
2000
1951
            self.assertEqual(parent_map[factory.key], factory.parents)
2001
1952
            # currently no stream emits mpdiff
2002
1953
            self.assertRaises(errors.UnavailableRepresentation,
2003
 
                              factory.get_bytes_as, 'mpdiff')
 
1954
                factory.get_bytes_as, 'mpdiff')
2004
1955
            self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2005
 
                                  bytes)
 
1956
                str)
2006
1957
        self.assertEqual(set(keys), seen)
2007
1958
 
2008
1959
    def test_get_record_stream_missing_records_are_absent(self):
2009
1960
        files = self.get_versionedfiles()
2010
1961
        self.get_diamond_files(files)
2011
1962
        if self.key_length == 1:
2012
 
            keys = [(b'merged',), (b'left',), (b'right',),
2013
 
                    (b'absent',), (b'base',)]
 
1963
            keys = [('merged',), ('left',), ('right',), ('absent',), ('base',)]
2014
1964
        else:
2015
1965
            keys = [
2016
 
                (b'FileA', b'merged'), (b'FileA', b'left'), (b'FileA', b'right'),
2017
 
                (b'FileA', b'absent'), (b'FileA', b'base'),
2018
 
                (b'FileB', b'merged'), (b'FileB', b'left'), (b'FileB', b'right'),
2019
 
                (b'FileB', b'absent'), (b'FileB', b'base'),
2020
 
                (b'absent', b'absent'),
 
1966
                ('FileA', 'merged'), ('FileA', 'left'), ('FileA', 'right'),
 
1967
                ('FileA', 'absent'), ('FileA', 'base'),
 
1968
                ('FileB', 'merged'), ('FileB', 'left'), ('FileB', 'right'),
 
1969
                ('FileB', 'absent'), ('FileB', 'base'),
 
1970
                ('absent', 'absent'),
2021
1971
                ]
2022
1972
        parent_map = files.get_parent_map(keys)
2023
1973
        entries = files.get_record_stream(keys, 'unordered', False)
2028
1978
    def assertRecordHasContent(self, record, bytes):
2029
1979
        """Assert that record has the bytes bytes."""
2030
1980
        self.assertEqual(bytes, record.get_bytes_as('fulltext'))
2031
 
        self.assertEqual(bytes, b''.join(record.get_bytes_as('chunked')))
 
1981
        self.assertEqual(bytes, ''.join(record.get_bytes_as('chunked')))
2032
1982
 
2033
1983
    def test_get_record_stream_native_formats_are_wire_ready_one_ft(self):
2034
1984
        files = self.get_versionedfiles()
2035
 
        key = self.get_simple_key(b'foo')
2036
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
1985
        key = self.get_simple_key('foo')
 
1986
        files.add_lines(key, (), ['my text\n', 'content'])
2037
1987
        stream = files.get_record_stream([key], 'unordered', False)
2038
 
        record = next(stream)
 
1988
        record = stream.next()
2039
1989
        if record.storage_kind in ('chunked', 'fulltext'):
2040
1990
            # chunked and fulltext representations are for direct use not wire
2041
1991
            # serialisation: check they are able to be used directly. To send
2042
1992
            # such records over the wire translation will be needed.
2043
 
            self.assertRecordHasContent(record, b"my text\ncontent")
 
1993
            self.assertRecordHasContent(record, "my text\ncontent")
2044
1994
        else:
2045
1995
            bytes = [record.get_bytes_as(record.storage_kind)]
2046
1996
            network_stream = versionedfile.NetworkRecordStream(bytes).read()
2049
1999
            for record in network_stream:
2050
2000
                records.append(record)
2051
2001
                self.assertEqual(source_record.storage_kind,
2052
 
                                 record.storage_kind)
 
2002
                    record.storage_kind)
2053
2003
                self.assertEqual(source_record.parents, record.parents)
2054
2004
                self.assertEqual(
2055
2005
                    source_record.get_bytes_as(source_record.storage_kind),
2062
2012
        :param records: A list to collect the seen records.
2063
2013
        :return: A generator of the records in stream.
2064
2014
        """
2065
 
        # We make assertions during copying to catch things early for easier
2066
 
        # debugging. This must use the iterating zip() from the future.
2067
 
        for record, ref_record in zip(stream, expected):
 
2015
        # We make assertions during copying to catch things early for
 
2016
        # easier debugging.
 
2017
        for record, ref_record in izip(stream, expected):
2068
2018
            records.append(record)
2069
2019
            self.assertEqual(ref_record.key, record.key)
2070
2020
            self.assertEqual(ref_record.storage_kind, record.storage_kind)
2072
2022
            yield record
2073
2023
 
2074
2024
    def stream_to_bytes_or_skip_counter(self, skipped_records, full_texts,
2075
 
                                        stream):
 
2025
        stream):
2076
2026
        """Convert a stream to a bytes iterator.
2077
2027
 
2078
2028
        :param skipped_records: A list with one element to increment when a
2093
2043
    def test_get_record_stream_native_formats_are_wire_ready_ft_delta(self):
2094
2044
        files = self.get_versionedfiles()
2095
2045
        target_files = self.get_versionedfiles('target')
2096
 
        key = self.get_simple_key(b'ft')
2097
 
        key_delta = self.get_simple_key(b'delta')
2098
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2046
        key = self.get_simple_key('ft')
 
2047
        key_delta = self.get_simple_key('delta')
 
2048
        files.add_lines(key, (), ['my text\n', 'content'])
2099
2049
        if self.graph:
2100
2050
            delta_parents = (key,)
2101
2051
        else:
2102
2052
            delta_parents = ()
2103
 
        files.add_lines(key_delta, delta_parents, [
2104
 
                        b'different\n', b'content\n'])
 
2053
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2105
2054
        local = files.get_record_stream([key, key_delta], 'unordered', False)
2106
2055
        ref = files.get_record_stream([key, key_delta], 'unordered', False)
2107
2056
        skipped_records = [0]
2108
2057
        full_texts = {
2109
 
            key: b"my text\ncontent",
2110
 
            key_delta: b"different\ncontent\n",
 
2058
            key: "my text\ncontent",
 
2059
            key_delta: "different\ncontent\n",
2111
2060
            }
2112
2061
        byte_stream = self.stream_to_bytes_or_skip_counter(
2113
2062
            skipped_records, full_texts, local)
2128
2077
        # copy a delta over the wire
2129
2078
        files = self.get_versionedfiles()
2130
2079
        target_files = self.get_versionedfiles('target')
2131
 
        key = self.get_simple_key(b'ft')
2132
 
        key_delta = self.get_simple_key(b'delta')
2133
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2080
        key = self.get_simple_key('ft')
 
2081
        key_delta = self.get_simple_key('delta')
 
2082
        files.add_lines(key, (), ['my text\n', 'content'])
2134
2083
        if self.graph:
2135
2084
            delta_parents = (key,)
2136
2085
        else:
2137
2086
            delta_parents = ()
2138
 
        files.add_lines(key_delta, delta_parents, [
2139
 
                        b'different\n', b'content\n'])
 
2087
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2140
2088
        # Copy the basis text across so we can reconstruct the delta during
2141
2089
        # insertion into target.
2142
2090
        target_files.insert_record_stream(files.get_record_stream([key],
2143
 
                                                                  'unordered', False))
 
2091
            'unordered', False))
2144
2092
        local = files.get_record_stream([key_delta], 'unordered', False)
2145
2093
        ref = files.get_record_stream([key_delta], 'unordered', False)
2146
2094
        skipped_records = [0]
2147
2095
        full_texts = {
2148
 
            key_delta: b"different\ncontent\n",
 
2096
            key_delta: "different\ncontent\n",
2149
2097
            }
2150
2098
        byte_stream = self.stream_to_bytes_or_skip_counter(
2151
2099
            skipped_records, full_texts, local)
2165
2113
    def test_get_record_stream_wire_ready_delta_closure_included(self):
2166
2114
        # copy a delta over the wire with the ability to get its full text.
2167
2115
        files = self.get_versionedfiles()
2168
 
        key = self.get_simple_key(b'ft')
2169
 
        key_delta = self.get_simple_key(b'delta')
2170
 
        files.add_lines(key, (), [b'my text\n', b'content'])
 
2116
        key = self.get_simple_key('ft')
 
2117
        key_delta = self.get_simple_key('delta')
 
2118
        files.add_lines(key, (), ['my text\n', 'content'])
2171
2119
        if self.graph:
2172
2120
            delta_parents = (key,)
2173
2121
        else:
2174
2122
            delta_parents = ()
2175
 
        files.add_lines(key_delta, delta_parents, [
2176
 
                        b'different\n', b'content\n'])
 
2123
        files.add_lines(key_delta, delta_parents, ['different\n', 'content\n'])
2177
2124
        local = files.get_record_stream([key_delta], 'unordered', True)
2178
2125
        ref = files.get_record_stream([key_delta], 'unordered', True)
2179
2126
        skipped_records = [0]
2180
2127
        full_texts = {
2181
 
            key_delta: b"different\ncontent\n",
 
2128
            key_delta: "different\ncontent\n",
2182
2129
            }
2183
2130
        byte_stream = self.stream_to_bytes_or_skip_counter(
2184
2131
            skipped_records, full_texts, local)
2198
2145
        seen = set()
2199
2146
        for factory in entries:
2200
2147
            seen.add(factory.key)
2201
 
            if factory.key[-1] == b'absent':
 
2148
            if factory.key[-1] == 'absent':
2202
2149
                self.assertEqual('absent', factory.storage_kind)
2203
2150
                self.assertEqual(None, factory.sha1)
2204
2151
                self.assertEqual(None, factory.parents)
2209
2156
                    self.assertEqual(sha1, factory.sha1)
2210
2157
                self.assertEqual(parents[factory.key], factory.parents)
2211
2158
                self.assertIsInstance(factory.get_bytes_as(factory.storage_kind),
2212
 
                                      bytes)
 
2159
                    str)
2213
2160
        self.assertEqual(set(keys), seen)
2214
2161
 
2215
2162
    def test_filter_absent_records(self):
2223
2170
        # absent keys is still delivered).
2224
2171
        present_keys = list(keys)
2225
2172
        if self.key_length == 1:
2226
 
            keys.insert(2, (b'extra',))
 
2173
            keys.insert(2, ('extra',))
2227
2174
        else:
2228
 
            keys.insert(2, (b'extra', b'extra'))
 
2175
            keys.insert(2, ('extra', 'extra'))
2229
2176
        entries = files.get_record_stream(keys, 'unordered', False)
2230
2177
        seen = set()
2231
2178
        self.capture_stream(files, versionedfile.filter_absent(entries), seen.add,
2232
 
                            parent_map)
 
2179
            parent_map)
2233
2180
        self.assertEqual(set(present_keys), seen)
2234
2181
 
2235
2182
    def get_mapper(self):
2249
2196
    def test_get_annotator(self):
2250
2197
        files = self.get_versionedfiles()
2251
2198
        self.get_diamond_files(files)
2252
 
        origin_key = self.get_simple_key(b'origin')
2253
 
        base_key = self.get_simple_key(b'base')
2254
 
        left_key = self.get_simple_key(b'left')
2255
 
        right_key = self.get_simple_key(b'right')
2256
 
        merged_key = self.get_simple_key(b'merged')
 
2199
        origin_key = self.get_simple_key('origin')
 
2200
        base_key = self.get_simple_key('base')
 
2201
        left_key = self.get_simple_key('left')
 
2202
        right_key = self.get_simple_key('right')
 
2203
        merged_key = self.get_simple_key('merged')
2257
2204
        # annotator = files.get_annotator()
2258
2205
        # introduced full text
2259
2206
        origins, lines = files.get_annotator().annotate(origin_key)
2260
2207
        self.assertEqual([(origin_key,)], origins)
2261
 
        self.assertEqual([b'origin\n'], lines)
 
2208
        self.assertEqual(['origin\n'], lines)
2262
2209
        # a delta
2263
2210
        origins, lines = files.get_annotator().annotate(base_key)
2264
2211
        self.assertEqual([(base_key,)], origins)
2280
2227
                (merged_key,),
2281
2228
                ], origins)
2282
2229
        self.assertRaises(RevisionNotPresent,
2283
 
                          files.get_annotator().annotate, self.get_simple_key(b'missing-key'))
 
2230
            files.get_annotator().annotate, self.get_simple_key('missing-key'))
2284
2231
 
2285
2232
    def test_get_parent_map(self):
2286
2233
        files = self.get_versionedfiles()
2287
2234
        if self.key_length == 1:
2288
2235
            parent_details = [
2289
 
                ((b'r0',), self.get_parents(())),
2290
 
                ((b'r1',), self.get_parents(((b'r0',),))),
2291
 
                ((b'r2',), self.get_parents(())),
2292
 
                ((b'r3',), self.get_parents(())),
2293
 
                ((b'm',), self.get_parents(((b'r0',), (b'r1',), (b'r2',), (b'r3',)))),
 
2236
                (('r0',), self.get_parents(())),
 
2237
                (('r1',), self.get_parents((('r0',),))),
 
2238
                (('r2',), self.get_parents(())),
 
2239
                (('r3',), self.get_parents(())),
 
2240
                (('m',), self.get_parents((('r0',),('r1',),('r2',),('r3',)))),
2294
2241
                ]
2295
2242
        else:
2296
2243
            parent_details = [
2297
 
                ((b'FileA', b'r0'), self.get_parents(())),
2298
 
                ((b'FileA', b'r1'), self.get_parents(((b'FileA', b'r0'),))),
2299
 
                ((b'FileA', b'r2'), self.get_parents(())),
2300
 
                ((b'FileA', b'r3'), self.get_parents(())),
2301
 
                ((b'FileA', b'm'), self.get_parents(((b'FileA', b'r0'),
2302
 
                                                     (b'FileA', b'r1'), (b'FileA', b'r2'), (b'FileA', b'r3')))),
 
2244
                (('FileA', 'r0'), self.get_parents(())),
 
2245
                (('FileA', 'r1'), self.get_parents((('FileA', 'r0'),))),
 
2246
                (('FileA', 'r2'), self.get_parents(())),
 
2247
                (('FileA', 'r3'), self.get_parents(())),
 
2248
                (('FileA', 'm'), self.get_parents((('FileA', 'r0'),
 
2249
                    ('FileA', 'r1'), ('FileA', 'r2'), ('FileA', 'r3')))),
2303
2250
                ]
2304
2251
        for key, parents in parent_details:
2305
2252
            files.add_lines(key, parents, [])
2306
2253
            # immediately after adding it should be queryable.
2307
 
            self.assertEqual({key: parents}, files.get_parent_map([key]))
 
2254
            self.assertEqual({key:parents}, files.get_parent_map([key]))
2308
2255
        # We can ask for an empty set
2309
2256
        self.assertEqual({}, files.get_parent_map([]))
2310
2257
        # We can ask for many keys
2311
2258
        all_parents = dict(parent_details)
2312
2259
        self.assertEqual(all_parents, files.get_parent_map(all_parents.keys()))
2313
2260
        # Absent keys are just not included in the result.
2314
 
        keys = list(all_parents.keys())
 
2261
        keys = all_parents.keys()
2315
2262
        if self.key_length == 1:
2316
 
            keys.insert(1, (b'missing',))
 
2263
            keys.insert(1, ('missing',))
2317
2264
        else:
2318
 
            keys.insert(1, (b'missing', b'missing'))
 
2265
            keys.insert(1, ('missing', 'missing'))
2319
2266
        # Absent keys are just ignored
2320
2267
        self.assertEqual(all_parents, files.get_parent_map(keys))
2321
2268
 
2323
2270
        files = self.get_versionedfiles()
2324
2271
        self.get_diamond_files(files)
2325
2272
        if self.key_length == 1:
2326
 
            keys = [(b'base',), (b'origin',), (b'left',),
2327
 
                    (b'merged',), (b'right',)]
 
2273
            keys = [('base',), ('origin',), ('left',), ('merged',), ('right',)]
2328
2274
        else:
2329
2275
            # ask for shas from different prefixes.
2330
2276
            keys = [
2331
 
                (b'FileA', b'base'), (b'FileB', b'origin'), (b'FileA', b'left'),
2332
 
                (b'FileA', b'merged'), (b'FileB', b'right'),
 
2277
                ('FileA', 'base'), ('FileB', 'origin'), ('FileA', 'left'),
 
2278
                ('FileA', 'merged'), ('FileB', 'right'),
2333
2279
                ]
2334
2280
        self.assertEqual({
2335
 
            keys[0]: b'51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
2336
 
            keys[1]: b'00e364d235126be43292ab09cb4686cf703ddc17',
2337
 
            keys[2]: b'a8478686da38e370e32e42e8a0c220e33ee9132f',
2338
 
            keys[3]: b'ed8bce375198ea62444dc71952b22cfc2b09226d',
2339
 
            keys[4]: b'9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
 
2281
            keys[0]: '51c64a6f4fc375daf0d24aafbabe4d91b6f4bb44',
 
2282
            keys[1]: '00e364d235126be43292ab09cb4686cf703ddc17',
 
2283
            keys[2]: 'a8478686da38e370e32e42e8a0c220e33ee9132f',
 
2284
            keys[3]: 'ed8bce375198ea62444dc71952b22cfc2b09226d',
 
2285
            keys[4]: '9ef09dfa9d86780bdec9219a22560c6ece8e0ef1',
2340
2286
            },
2341
2287
            files.get_sha1s(keys))
2342
2288
 
2350
2296
        self.assertEqual(set(actual.keys()), set(expected.keys()))
2351
2297
        actual_parents = actual.get_parent_map(actual.keys())
2352
2298
        if self.graph:
2353
 
            self.assertEqual(
2354
 
                actual_parents, expected.get_parent_map(expected.keys()))
 
2299
            self.assertEqual(actual_parents, expected.get_parent_map(expected.keys()))
2355
2300
        else:
2356
2301
            for key, parents in actual_parents.items():
2357
2302
                self.assertEqual(None, parents)
2358
2303
        for key in actual.keys():
2359
 
            actual_text = next(actual.get_record_stream(
2360
 
                [key], 'unordered', True)).get_bytes_as('fulltext')
2361
 
            expected_text = next(expected.get_record_stream(
2362
 
                [key], 'unordered', True)).get_bytes_as('fulltext')
 
2304
            actual_text = actual.get_record_stream(
 
2305
                [key], 'unordered', True).next().get_bytes_as('fulltext')
 
2306
            expected_text = expected.get_record_stream(
 
2307
                [key], 'unordered', True).next().get_bytes_as('fulltext')
2363
2308
            self.assertEqual(actual_text, expected_text)
2364
2309
 
2365
2310
    def test_insert_record_stream_fulltexts(self):
2373
2318
            source_transport)
2374
2319
        self.get_diamond_files(source, trailing_eol=False)
2375
2320
        stream = source.get_record_stream(source.keys(), 'topological',
2376
 
                                          False)
 
2321
            False)
2377
2322
        files.insert_record_stream(stream)
2378
2323
        self.assertIdenticalVersionedFile(source, files)
2379
2324
 
2388
2333
            source_transport)
2389
2334
        self.get_diamond_files(source, trailing_eol=False)
2390
2335
        stream = source.get_record_stream(source.keys(), 'topological',
2391
 
                                          False)
 
2336
            False)
2392
2337
        files.insert_record_stream(stream)
2393
2338
        self.assertIdenticalVersionedFile(source, files)
2394
2339
 
2401
2346
        source = make_file_factory(True, mapper)(source_transport)
2402
2347
        self.get_diamond_files(source)
2403
2348
        stream = source.get_record_stream(source.keys(), 'topological',
2404
 
                                          False)
 
2349
            False)
2405
2350
        files.insert_record_stream(stream)
2406
2351
        self.assertIdenticalVersionedFile(source, files)
2407
2352
 
2414
2359
        source = make_file_factory(True, mapper)(source_transport)
2415
2360
        self.get_diamond_files(source, trailing_eol=False)
2416
2361
        stream = source.get_record_stream(source.keys(), 'topological',
2417
 
                                          False)
 
2362
            False)
2418
2363
        files.insert_record_stream(stream)
2419
2364
        self.assertIdenticalVersionedFile(source, files)
2420
2365
 
2427
2372
        source = make_file_factory(False, mapper)(source_transport)
2428
2373
        self.get_diamond_files(source)
2429
2374
        stream = source.get_record_stream(source.keys(), 'topological',
2430
 
                                          False)
 
2375
            False)
2431
2376
        files.insert_record_stream(stream)
2432
2377
        self.assertIdenticalVersionedFile(source, files)
2433
2378
 
2440
2385
        source = make_file_factory(False, mapper)(source_transport)
2441
2386
        self.get_diamond_files(source, trailing_eol=False)
2442
2387
        stream = source.get_record_stream(source.keys(), 'topological',
2443
 
                                          False)
 
2388
            False)
2444
2389
        files.insert_record_stream(stream)
2445
2390
        self.assertIdenticalVersionedFile(source, files)
2446
2391
 
2452
2397
        # insert some keys into f.
2453
2398
        self.get_diamond_files(files, left_only=True)
2454
2399
        stream = source.get_record_stream(source.keys(), 'topological',
2455
 
                                          False)
 
2400
            False)
2456
2401
        files.insert_record_stream(stream)
2457
2402
        self.assertIdenticalVersionedFile(source, files)
2458
2403
 
2460
2405
        """Inserting a stream with absent keys should raise an error."""
2461
2406
        files = self.get_versionedfiles()
2462
2407
        source = self.get_versionedfiles('source')
2463
 
        stream = source.get_record_stream([(b'missing',) * self.key_length],
2464
 
                                          'topological', False)
 
2408
        stream = source.get_record_stream([('missing',) * self.key_length],
 
2409
            'topological', False)
2465
2410
        self.assertRaises(errors.RevisionNotPresent, files.insert_record_stream,
2466
 
                          stream)
 
2411
            stream)
2467
2412
 
2468
2413
    def test_insert_record_stream_out_of_order(self):
2469
2414
        """An out of order stream can either error or work."""
2471
2416
        source = self.get_versionedfiles('source')
2472
2417
        self.get_diamond_files(source)
2473
2418
        if self.key_length == 1:
2474
 
            origin_keys = [(b'origin',)]
2475
 
            end_keys = [(b'merged',), (b'left',)]
2476
 
            start_keys = [(b'right',), (b'base',)]
 
2419
            origin_keys = [('origin',)]
 
2420
            end_keys = [('merged',), ('left',)]
 
2421
            start_keys = [('right',), ('base',)]
2477
2422
        else:
2478
 
            origin_keys = [(b'FileA', b'origin'), (b'FileB', b'origin')]
2479
 
            end_keys = [(b'FileA', b'merged',), (b'FileA', b'left',),
2480
 
                        (b'FileB', b'merged',), (b'FileB', b'left',)]
2481
 
            start_keys = [(b'FileA', b'right',), (b'FileA', b'base',),
2482
 
                          (b'FileB', b'right',), (b'FileB', b'base',)]
2483
 
        origin_entries = source.get_record_stream(
2484
 
            origin_keys, 'unordered', False)
 
2423
            origin_keys = [('FileA', 'origin'), ('FileB', 'origin')]
 
2424
            end_keys = [('FileA', 'merged',), ('FileA', 'left',),
 
2425
                ('FileB', 'merged',), ('FileB', 'left',)]
 
2426
            start_keys = [('FileA', 'right',), ('FileA', 'base',),
 
2427
                ('FileB', 'right',), ('FileB', 'base',)]
 
2428
        origin_entries = source.get_record_stream(origin_keys, 'unordered', False)
2485
2429
        end_entries = source.get_record_stream(end_keys, 'topological', False)
2486
 
        start_entries = source.get_record_stream(
2487
 
            start_keys, 'topological', False)
2488
 
        entries = itertools.chain(origin_entries, end_entries, start_entries)
 
2430
        start_entries = source.get_record_stream(start_keys, 'topological', False)
 
2431
        entries = chain(origin_entries, end_entries, start_entries)
2489
2432
        try:
2490
2433
            files.insert_record_stream(entries)
2491
2434
        except RevisionNotPresent:
2503
2446
        source = self.get_versionedfiles('source')
2504
2447
        parents = ()
2505
2448
        keys = []
2506
 
        content = [(b'same same %d\n' % n) for n in range(500)]
2507
 
        letters = b'abcdefghijklmnopqrstuvwxyz'
2508
 
        for i in range(len(letters)):
2509
 
            letter = letters[i:i + 1]
2510
 
            key = (b'key-' + letter,)
 
2449
        content = [('same same %d\n' % n) for n in range(500)]
 
2450
        for letter in 'abcdefghijklmnopqrstuvwxyz':
 
2451
            key = ('key-' + letter,)
2511
2452
            if self.key_length == 2:
2512
 
                key = (b'prefix',) + key
2513
 
            content.append(b'content for ' + letter + b'\n')
 
2453
                key = ('prefix',) + key
 
2454
            content.append('content for ' + letter + '\n')
2514
2455
            source.add_lines(key, parents, content)
2515
2456
            keys.append(key)
2516
2457
            parents = (key,)
2519
2460
        streams = []
2520
2461
        for key in reversed(keys):
2521
2462
            streams.append(source.get_record_stream([key], 'unordered', False))
2522
 
        deltas = itertools.chain.from_iterable(streams[:-1])
 
2463
        deltas = chain(*streams[:-1])
2523
2464
        files = self.get_versionedfiles()
2524
2465
        try:
2525
2466
            files.insert_record_stream(deltas)
2542
2483
        source_transport.mkdir('.')
2543
2484
        source = make_file_factory(False, mapper)(source_transport)
2544
2485
        get_diamond_files(source, self.key_length, trailing_eol=True,
2545
 
                          nograph=False, left_only=False)
 
2486
            nograph=False, left_only=False)
2546
2487
        return source
2547
2488
 
2548
2489
    def test_insert_record_stream_delta_missing_basis_no_corruption(self):
2551
2492
        not added.
2552
2493
        """
2553
2494
        source = self.get_knit_delta_source()
2554
 
        keys = [self.get_simple_key(b'origin'), self.get_simple_key(b'merged')]
 
2495
        keys = [self.get_simple_key('origin'), self.get_simple_key('merged')]
2555
2496
        entries = source.get_record_stream(keys, 'unordered', False)
2556
2497
        files = self.get_versionedfiles()
2557
2498
        if self.support_partial_insertion:
2558
2499
            self.assertEqual([],
2559
 
                             list(files.get_missing_compression_parent_keys()))
 
2500
                list(files.get_missing_compression_parent_keys()))
2560
2501
            files.insert_record_stream(entries)
2561
2502
            missing_bases = files.get_missing_compression_parent_keys()
2562
 
            self.assertEqual({self.get_simple_key(b'left')},
2563
 
                             set(missing_bases))
 
2503
            self.assertEqual(set([self.get_simple_key('left')]),
 
2504
                set(missing_bases))
2564
2505
            self.assertEqual(set(keys), set(files.get_parent_map(keys)))
2565
2506
        else:
2566
2507
            self.assertRaises(
2578
2519
            raise TestNotApplicable(
2579
2520
                'versioned file scenario does not support partial insertion')
2580
2521
        source = self.get_knit_delta_source()
2581
 
        entries = source.get_record_stream([self.get_simple_key(b'origin'),
2582
 
                                            self.get_simple_key(b'merged')], 'unordered', False)
 
2522
        entries = source.get_record_stream([self.get_simple_key('origin'),
 
2523
            self.get_simple_key('merged')], 'unordered', False)
2583
2524
        files = self.get_versionedfiles()
2584
2525
        files.insert_record_stream(entries)
2585
2526
        missing_bases = files.get_missing_compression_parent_keys()
2586
 
        self.assertEqual({self.get_simple_key(b'left')},
2587
 
                         set(missing_bases))
 
2527
        self.assertEqual(set([self.get_simple_key('left')]),
 
2528
            set(missing_bases))
2588
2529
        # 'merged' is inserted (although a commit of a write group involving
2589
2530
        # this versionedfiles would fail).
2590
 
        merged_key = self.get_simple_key(b'merged')
 
2531
        merged_key = self.get_simple_key('merged')
2591
2532
        self.assertEqual(
2592
 
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
 
2533
            [merged_key], files.get_parent_map([merged_key]).keys())
2593
2534
        # Add the full delta closure of the missing records
2594
2535
        missing_entries = source.get_record_stream(
2595
2536
            missing_bases, 'unordered', True)
2597
2538
        # Now 'merged' is fully inserted (and a commit would succeed).
2598
2539
        self.assertEqual([], list(files.get_missing_compression_parent_keys()))
2599
2540
        self.assertEqual(
2600
 
            [merged_key], list(files.get_parent_map([merged_key]).keys()))
 
2541
            [merged_key], files.get_parent_map([merged_key]).keys())
2601
2542
        files.check()
2602
2543
 
2603
2544
    def test_iter_lines_added_or_present_in_keys(self):
2617
2558
 
2618
2559
        files = self.get_versionedfiles()
2619
2560
        # add a base to get included
2620
 
        files.add_lines(self.get_simple_key(b'base'), (), [b'base\n'])
 
2561
        files.add_lines(self.get_simple_key('base'), (), ['base\n'])
2621
2562
        # add a ancestor to be included on one side
2622
 
        files.add_lines(self.get_simple_key(
2623
 
            b'lancestor'), (), [b'lancestor\n'])
 
2563
        files.add_lines(self.get_simple_key('lancestor'), (), ['lancestor\n'])
2624
2564
        # add a ancestor to be included on the other side
2625
 
        files.add_lines(self.get_simple_key(b'rancestor'),
2626
 
                        self.get_parents([self.get_simple_key(b'base')]), [b'rancestor\n'])
 
2565
        files.add_lines(self.get_simple_key('rancestor'),
 
2566
            self.get_parents([self.get_simple_key('base')]), ['rancestor\n'])
2627
2567
        # add a child of rancestor with no eofile-nl
2628
 
        files.add_lines(self.get_simple_key(b'child'),
2629
 
                        self.get_parents([self.get_simple_key(b'rancestor')]),
2630
 
                        [b'base\n', b'child\n'])
 
2568
        files.add_lines(self.get_simple_key('child'),
 
2569
            self.get_parents([self.get_simple_key('rancestor')]),
 
2570
            ['base\n', 'child\n'])
2631
2571
        # add a child of lancestor and base to join the two roots
2632
 
        files.add_lines(self.get_simple_key(b'otherchild'),
2633
 
                        self.get_parents([self.get_simple_key(b'lancestor'),
2634
 
                                          self.get_simple_key(b'base')]),
2635
 
                        [b'base\n', b'lancestor\n', b'otherchild\n'])
2636
 
 
 
2572
        files.add_lines(self.get_simple_key('otherchild'),
 
2573
            self.get_parents([self.get_simple_key('lancestor'),
 
2574
                self.get_simple_key('base')]),
 
2575
            ['base\n', 'lancestor\n', 'otherchild\n'])
2637
2576
        def iter_with_keys(keys, expected):
2638
2577
            # now we need to see what lines are returned, and how often.
2639
2578
            lines = {}
2640
2579
            progress = InstrumentedProgress()
2641
2580
            # iterate over the lines
2642
2581
            for line in files.iter_lines_added_or_present_in_keys(keys,
2643
 
                                                                  pb=progress):
 
2582
                pb=progress):
2644
2583
                lines.setdefault(line, 0)
2645
2584
                lines[line] += 1
2646
 
            if [] != progress.updates:
 
2585
            if []!= progress.updates:
2647
2586
                self.assertEqual(expected, progress.updates)
2648
2587
            return lines
2649
2588
        lines = iter_with_keys(
2650
 
            [self.get_simple_key(b'child'),
2651
 
             self.get_simple_key(b'otherchild')],
 
2589
            [self.get_simple_key('child'), self.get_simple_key('otherchild')],
2652
2590
            [('Walking content', 0, 2),
2653
2591
             ('Walking content', 1, 2),
2654
2592
             ('Walking content', 2, 2)])
2655
2593
        # we must see child and otherchild
2656
 
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
 
2594
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
2657
2595
        self.assertTrue(
2658
 
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
 
2596
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2659
2597
        # we dont care if we got more than that.
2660
2598
 
2661
2599
        # test all lines
2662
2600
        lines = iter_with_keys(files.keys(),
2663
 
                               [('Walking content', 0, 5),
2664
 
                                ('Walking content', 1, 5),
2665
 
                                ('Walking content', 2, 5),
2666
 
                                ('Walking content', 3, 5),
2667
 
                                ('Walking content', 4, 5),
2668
 
                                ('Walking content', 5, 5)])
 
2601
            [('Walking content', 0, 5),
 
2602
             ('Walking content', 1, 5),
 
2603
             ('Walking content', 2, 5),
 
2604
             ('Walking content', 3, 5),
 
2605
             ('Walking content', 4, 5),
 
2606
             ('Walking content', 5, 5)])
2669
2607
        # all lines must be seen at least once
2670
 
        self.assertTrue(lines[(b'base\n', self.get_simple_key(b'base'))] > 0)
2671
 
        self.assertTrue(
2672
 
            lines[(b'lancestor\n', self.get_simple_key(b'lancestor'))] > 0)
2673
 
        self.assertTrue(
2674
 
            lines[(b'rancestor\n', self.get_simple_key(b'rancestor'))] > 0)
2675
 
        self.assertTrue(lines[(b'child\n', self.get_simple_key(b'child'))] > 0)
2676
 
        self.assertTrue(
2677
 
            lines[(b'otherchild\n', self.get_simple_key(b'otherchild'))] > 0)
 
2608
        self.assertTrue(lines[('base\n', self.get_simple_key('base'))] > 0)
 
2609
        self.assertTrue(
 
2610
            lines[('lancestor\n', self.get_simple_key('lancestor'))] > 0)
 
2611
        self.assertTrue(
 
2612
            lines[('rancestor\n', self.get_simple_key('rancestor'))] > 0)
 
2613
        self.assertTrue(lines[('child\n', self.get_simple_key('child'))] > 0)
 
2614
        self.assertTrue(
 
2615
            lines[('otherchild\n', self.get_simple_key('otherchild'))] > 0)
2678
2616
 
2679
2617
    def test_make_mpdiffs(self):
2680
 
        from breezy import multiparent
 
2618
        from bzrlib import multiparent
2681
2619
        files = self.get_versionedfiles('source')
2682
2620
        # add texts that should trip the knit maximum delta chain threshold
2683
2621
        # as well as doing parallel chains of data in knits.
2684
2622
        # this is done by two chains of 25 insertions
2685
 
        files.add_lines(self.get_simple_key(b'base'), [], [b'line\n'])
2686
 
        files.add_lines(self.get_simple_key(b'noeol'),
2687
 
                        self.get_parents([self.get_simple_key(b'base')]), [b'line'])
 
2623
        files.add_lines(self.get_simple_key('base'), [], ['line\n'])
 
2624
        files.add_lines(self.get_simple_key('noeol'),
 
2625
            self.get_parents([self.get_simple_key('base')]), ['line'])
2688
2626
        # detailed eol tests:
2689
2627
        # shared last line with parent no-eol
2690
 
        files.add_lines(self.get_simple_key(b'noeolsecond'),
2691
 
                        self.get_parents([self.get_simple_key(b'noeol')]),
2692
 
                        [b'line\n', b'line'])
 
2628
        files.add_lines(self.get_simple_key('noeolsecond'),
 
2629
            self.get_parents([self.get_simple_key('noeol')]),
 
2630
                ['line\n', 'line'])
2693
2631
        # differing last line with parent, both no-eol
2694
 
        files.add_lines(self.get_simple_key(b'noeolnotshared'),
2695
 
                        self.get_parents(
2696
 
                            [self.get_simple_key(b'noeolsecond')]),
2697
 
                        [b'line\n', b'phone'])
 
2632
        files.add_lines(self.get_simple_key('noeolnotshared'),
 
2633
            self.get_parents([self.get_simple_key('noeolsecond')]),
 
2634
                ['line\n', 'phone'])
2698
2635
        # add eol following a noneol parent, change content
2699
 
        files.add_lines(self.get_simple_key(b'eol'),
2700
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'phone\n'])
 
2636
        files.add_lines(self.get_simple_key('eol'),
 
2637
            self.get_parents([self.get_simple_key('noeol')]), ['phone\n'])
2701
2638
        # add eol following a noneol parent, no change content
2702
 
        files.add_lines(self.get_simple_key(b'eolline'),
2703
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line\n'])
 
2639
        files.add_lines(self.get_simple_key('eolline'),
 
2640
            self.get_parents([self.get_simple_key('noeol')]), ['line\n'])
2704
2641
        # noeol with no parents:
2705
 
        files.add_lines(self.get_simple_key(b'noeolbase'), [], [b'line'])
 
2642
        files.add_lines(self.get_simple_key('noeolbase'), [], ['line'])
2706
2643
        # noeol preceeding its leftmost parent in the output:
2707
2644
        # this is done by making it a merge of two parents with no common
2708
2645
        # anestry: noeolbase and noeol with the
2709
2646
        # later-inserted parent the leftmost.
2710
 
        files.add_lines(self.get_simple_key(b'eolbeforefirstparent'),
2711
 
                        self.get_parents([self.get_simple_key(b'noeolbase'),
2712
 
                                          self.get_simple_key(b'noeol')]),
2713
 
                        [b'line'])
 
2647
        files.add_lines(self.get_simple_key('eolbeforefirstparent'),
 
2648
            self.get_parents([self.get_simple_key('noeolbase'),
 
2649
                self.get_simple_key('noeol')]),
 
2650
            ['line'])
2714
2651
        # two identical eol texts
2715
 
        files.add_lines(self.get_simple_key(b'noeoldup'),
2716
 
                        self.get_parents([self.get_simple_key(b'noeol')]), [b'line'])
2717
 
        next_parent = self.get_simple_key(b'base')
2718
 
        text_name = b'chain1-'
2719
 
        text = [b'line\n']
2720
 
        sha1s = {0: b'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
2721
 
                 1: b'45e21ea146a81ea44a821737acdb4f9791c8abe7',
2722
 
                 2: b'e1f11570edf3e2a070052366c582837a4fe4e9fa',
2723
 
                 3: b'26b4b8626da827088c514b8f9bbe4ebf181edda1',
2724
 
                 4: b'e28a5510be25ba84d31121cff00956f9970ae6f6',
2725
 
                 5: b'd63ec0ce22e11dcf65a931b69255d3ac747a318d',
2726
 
                 6: b'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
2727
 
                 7: b'95c14da9cafbf828e3e74a6f016d87926ba234ab',
2728
 
                 8: b'779e9a0b28f9f832528d4b21e17e168c67697272',
2729
 
                 9: b'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
2730
 
                 10: b'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
2731
 
                 11: b'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
2732
 
                 12: b'31a2286267f24d8bedaa43355f8ad7129509ea85',
2733
 
                 13: b'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
2734
 
                 14: b'2c4b1736566b8ca6051e668de68650686a3922f2',
2735
 
                 15: b'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
2736
 
                 16: b'b0d2e18d3559a00580f6b49804c23fea500feab3',
2737
 
                 17: b'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
2738
 
                 18: b'5cf64a3459ae28efa60239e44b20312d25b253f3',
2739
 
                 19: b'1ebed371807ba5935958ad0884595126e8c4e823',
2740
 
                 20: b'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
2741
 
                 21: b'01edc447978004f6e4e962b417a4ae1955b6fe5d',
2742
 
                 22: b'd8d8dc49c4bf0bab401e0298bb5ad827768618bb',
2743
 
                 23: b'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
2744
 
                 24: b'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
2745
 
                 25: b'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
 
2652
        files.add_lines(self.get_simple_key('noeoldup'),
 
2653
            self.get_parents([self.get_simple_key('noeol')]), ['line'])
 
2654
        next_parent = self.get_simple_key('base')
 
2655
        text_name = 'chain1-'
 
2656
        text = ['line\n']
 
2657
        sha1s = {0 :'da6d3141cb4a5e6f464bf6e0518042ddc7bfd079',
 
2658
                 1 :'45e21ea146a81ea44a821737acdb4f9791c8abe7',
 
2659
                 2 :'e1f11570edf3e2a070052366c582837a4fe4e9fa',
 
2660
                 3 :'26b4b8626da827088c514b8f9bbe4ebf181edda1',
 
2661
                 4 :'e28a5510be25ba84d31121cff00956f9970ae6f6',
 
2662
                 5 :'d63ec0ce22e11dcf65a931b69255d3ac747a318d',
 
2663
                 6 :'2c2888d288cb5e1d98009d822fedfe6019c6a4ea',
 
2664
                 7 :'95c14da9cafbf828e3e74a6f016d87926ba234ab',
 
2665
                 8 :'779e9a0b28f9f832528d4b21e17e168c67697272',
 
2666
                 9 :'1f8ff4e5c6ff78ac106fcfe6b1e8cb8740ff9a8f',
 
2667
                 10:'131a2ae712cf51ed62f143e3fbac3d4206c25a05',
 
2668
                 11:'c5a9d6f520d2515e1ec401a8f8a67e6c3c89f199',
 
2669
                 12:'31a2286267f24d8bedaa43355f8ad7129509ea85',
 
2670
                 13:'dc2a7fe80e8ec5cae920973973a8ee28b2da5e0a',
 
2671
                 14:'2c4b1736566b8ca6051e668de68650686a3922f2',
 
2672
                 15:'5912e4ecd9b0c07be4d013e7e2bdcf9323276cde',
 
2673
                 16:'b0d2e18d3559a00580f6b49804c23fea500feab3',
 
2674
                 17:'8e1d43ad72f7562d7cb8f57ee584e20eb1a69fc7',
 
2675
                 18:'5cf64a3459ae28efa60239e44b20312d25b253f3',
 
2676
                 19:'1ebed371807ba5935958ad0884595126e8c4e823',
 
2677
                 20:'2aa62a8b06fb3b3b892a3292a068ade69d5ee0d3',
 
2678
                 21:'01edc447978004f6e4e962b417a4ae1955b6fe5d',
 
2679
                 22:'d8d8dc49c4bf0bab401e0298bb5ad827768618bb',
 
2680
                 23:'c21f62b1c482862983a8ffb2b0c64b3451876e3f',
 
2681
                 24:'c0593fe795e00dff6b3c0fe857a074364d5f04fc',
 
2682
                 25:'dd1a1cf2ba9cc225c3aff729953e6364bf1d1855',
2746
2683
                 }
2747
2684
        for depth in range(26):
2748
 
            new_version = self.get_simple_key(text_name + b'%d' % depth)
2749
 
            text = text + [b'line\n']
 
2685
            new_version = self.get_simple_key(text_name + '%s' % depth)
 
2686
            text = text + ['line\n']
2750
2687
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2751
2688
            next_parent = new_version
2752
 
        next_parent = self.get_simple_key(b'base')
2753
 
        text_name = b'chain2-'
2754
 
        text = [b'line\n']
 
2689
        next_parent = self.get_simple_key('base')
 
2690
        text_name = 'chain2-'
 
2691
        text = ['line\n']
2755
2692
        for depth in range(26):
2756
 
            new_version = self.get_simple_key(text_name + b'%d' % depth)
2757
 
            text = text + [b'line\n']
 
2693
            new_version = self.get_simple_key(text_name + '%s' % depth)
 
2694
            text = text + ['line\n']
2758
2695
            files.add_lines(new_version, self.get_parents([next_parent]), text)
2759
2696
            next_parent = new_version
2760
2697
        target = self.get_versionedfiles('target')
2764
2701
            target.add_mpdiffs(
2765
2702
                [(key, parents, files.get_sha1s([key])[key], mpdiff)])
2766
2703
            self.assertEqualDiff(
2767
 
                next(files.get_record_stream([key], 'unordered',
2768
 
                                             True)).get_bytes_as('fulltext'),
2769
 
                next(target.get_record_stream([key], 'unordered',
2770
 
                                              True)).get_bytes_as('fulltext')
 
2704
                files.get_record_stream([key], 'unordered',
 
2705
                    True).next().get_bytes_as('fulltext'),
 
2706
                target.get_record_stream([key], 'unordered',
 
2707
                    True).next().get_bytes_as('fulltext')
2771
2708
                )
2772
2709
 
2773
2710
    def test_keys(self):
2776
2713
        files = self.get_versionedfiles()
2777
2714
        self.assertEqual(set(), set(files.keys()))
2778
2715
        if self.key_length == 1:
2779
 
            key = (b'foo',)
 
2716
            key = ('foo',)
2780
2717
        else:
2781
 
            key = (b'foo', b'bar',)
 
2718
            key = ('foo', 'bar',)
2782
2719
        files.add_lines(key, (), [])
2783
 
        self.assertEqual({key}, set(files.keys()))
 
2720
        self.assertEqual(set([key]), set(files.keys()))
2784
2721
 
2785
2722
 
2786
2723
class VirtualVersionedFilesTests(TestCase):
2794
2731
        return ret
2795
2732
 
2796
2733
    def setUp(self):
2797
 
        super(VirtualVersionedFilesTests, self).setUp()
 
2734
        TestCase.setUp(self)
2798
2735
        self._lines = {}
2799
2736
        self._parent_map = {}
2800
2737
        self.texts = VirtualVersionedFiles(self._get_parent_map,
2802
2739
 
2803
2740
    def test_add_lines(self):
2804
2741
        self.assertRaises(NotImplementedError,
2805
 
                          self.texts.add_lines, b"foo", [], [])
 
2742
                self.texts.add_lines, "foo", [], [])
2806
2743
 
2807
2744
    def test_add_mpdiffs(self):
2808
2745
        self.assertRaises(NotImplementedError,
2809
 
                          self.texts.add_mpdiffs, [])
 
2746
                self.texts.add_mpdiffs, [])
2810
2747
 
2811
2748
    def test_check_noerrors(self):
2812
2749
        self.texts.check()
2816
2753
                          [])
2817
2754
 
2818
2755
    def test_get_sha1s_nonexistent(self):
2819
 
        self.assertEqual({}, self.texts.get_sha1s([(b"NONEXISTENT",)]))
 
2756
        self.assertEquals({}, self.texts.get_sha1s([("NONEXISTENT",)]))
2820
2757
 
2821
2758
    def test_get_sha1s(self):
2822
 
        self._lines[b"key"] = [b"dataline1", b"dataline2"]
2823
 
        self.assertEqual({(b"key",): osutils.sha_strings(self._lines[b"key"])},
2824
 
                         self.texts.get_sha1s([(b"key",)]))
 
2759
        self._lines["key"] = ["dataline1", "dataline2"]
 
2760
        self.assertEquals({("key",): osutils.sha_strings(self._lines["key"])},
 
2761
                           self.texts.get_sha1s([("key",)]))
2825
2762
 
2826
2763
    def test_get_parent_map(self):
2827
 
        self._parent_map = {b"G": (b"A", b"B")}
2828
 
        self.assertEqual({(b"G",): ((b"A",), (b"B",))},
2829
 
                         self.texts.get_parent_map([(b"G",), (b"L",)]))
 
2764
        self._parent_map = {"G": ("A", "B")}
 
2765
        self.assertEquals({("G",): (("A",),("B",))},
 
2766
                          self.texts.get_parent_map([("G",), ("L",)]))
2830
2767
 
2831
2768
    def test_get_record_stream(self):
2832
 
        self._lines[b"A"] = [b"FOO", b"BAR"]
2833
 
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
2834
 
        record = next(it)
2835
 
        self.assertEqual("chunked", record.storage_kind)
2836
 
        self.assertEqual(b"FOOBAR", record.get_bytes_as("fulltext"))
2837
 
        self.assertEqual([b"FOO", b"BAR"], record.get_bytes_as("chunked"))
 
2769
        self._lines["A"] = ["FOO", "BAR"]
 
2770
        it = self.texts.get_record_stream([("A",)], "unordered", True)
 
2771
        record = it.next()
 
2772
        self.assertEquals("chunked", record.storage_kind)
 
2773
        self.assertEquals("FOOBAR", record.get_bytes_as("fulltext"))
 
2774
        self.assertEquals(["FOO", "BAR"], record.get_bytes_as("chunked"))
2838
2775
 
2839
2776
    def test_get_record_stream_absent(self):
2840
 
        it = self.texts.get_record_stream([(b"A",)], "unordered", True)
2841
 
        record = next(it)
2842
 
        self.assertEqual("absent", record.storage_kind)
 
2777
        it = self.texts.get_record_stream([("A",)], "unordered", True)
 
2778
        record = it.next()
 
2779
        self.assertEquals("absent", record.storage_kind)
2843
2780
 
2844
2781
    def test_iter_lines_added_or_present_in_keys(self):
2845
 
        self._lines[b"A"] = [b"FOO", b"BAR"]
2846
 
        self._lines[b"B"] = [b"HEY"]
2847
 
        self._lines[b"C"] = [b"Alberta"]
2848
 
        it = self.texts.iter_lines_added_or_present_in_keys([(b"A",), (b"B",)])
2849
 
        self.assertEqual(sorted([(b"FOO", b"A"), (b"BAR", b"A"), (b"HEY", b"B")]),
2850
 
                         sorted(list(it)))
 
2782
        self._lines["A"] = ["FOO", "BAR"]
 
2783
        self._lines["B"] = ["HEY"]
 
2784
        self._lines["C"] = ["Alberta"]
 
2785
        it = self.texts.iter_lines_added_or_present_in_keys([("A",), ("B",)])
 
2786
        self.assertEquals(sorted([("FOO", "A"), ("BAR", "A"), ("HEY", "B")]),
 
2787
            sorted(list(it)))
2851
2788
 
2852
2789
 
2853
2790
class TestOrderingVersionedFilesDecorator(TestCaseWithMemoryTransport):
2855
2792
    def get_ordering_vf(self, key_priority):
2856
2793
        builder = self.make_branch_builder('test')
2857
2794
        builder.start_series()
2858
 
        builder.build_snapshot(None, [
2859
 
            ('add', ('', b'TREE_ROOT', 'directory', None))],
2860
 
            revision_id=b'A')
2861
 
        builder.build_snapshot([b'A'], [], revision_id=b'B')
2862
 
        builder.build_snapshot([b'B'], [], revision_id=b'C')
2863
 
        builder.build_snapshot([b'C'], [], revision_id=b'D')
 
2795
        builder.build_snapshot('A', None, [
 
2796
            ('add', ('', 'TREE_ROOT', 'directory', None))])
 
2797
        builder.build_snapshot('B', ['A'], [])
 
2798
        builder.build_snapshot('C', ['B'], [])
 
2799
        builder.build_snapshot('D', ['C'], [])
2864
2800
        builder.finish_series()
2865
2801
        b = builder.get_branch()
2866
2802
        b.lock_read()
2873
2809
        self.assertEqual([], vf.calls)
2874
2810
 
2875
2811
    def test_get_record_stream_topological(self):
2876
 
        vf = self.get_ordering_vf(
2877
 
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2878
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2812
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
 
2813
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2879
2814
        keys = [r.key for r in vf.get_record_stream(request_keys,
2880
 
                                                    'topological', False)]
 
2815
                                    'topological', False)]
2881
2816
        # We should have gotten the keys in topological order
2882
 
        self.assertEqual([(b'A',), (b'B',), (b'C',), (b'D',)], keys)
 
2817
        self.assertEqual([('A',), ('B',), ('C',), ('D',)], keys)
2883
2818
        # And recorded that the request was made
2884
2819
        self.assertEqual([('get_record_stream', request_keys, 'topological',
2885
2820
                           False)], vf.calls)
2886
2821
 
2887
2822
    def test_get_record_stream_ordered(self):
2888
 
        vf = self.get_ordering_vf(
2889
 
            {(b'A',): 3, (b'B',): 2, (b'C',): 4, (b'D',): 1})
2890
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2823
        vf = self.get_ordering_vf({('A',): 3, ('B',): 2, ('C',): 4, ('D',): 1})
 
2824
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2891
2825
        keys = [r.key for r in vf.get_record_stream(request_keys,
2892
 
                                                    'unordered', False)]
 
2826
                                   'unordered', False)]
2893
2827
        # They should be returned based on their priority
2894
 
        self.assertEqual([(b'D',), (b'B',), (b'A',), (b'C',)], keys)
 
2828
        self.assertEqual([('D',), ('B',), ('A',), ('C',)], keys)
2895
2829
        # And the request recorded
2896
2830
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2897
2831
                           False)], vf.calls)
2898
2832
 
2899
2833
    def test_get_record_stream_implicit_order(self):
2900
 
        vf = self.get_ordering_vf({(b'B',): 2, (b'D',): 1})
2901
 
        request_keys = [(b'B',), (b'C',), (b'D',), (b'A',)]
 
2834
        vf = self.get_ordering_vf({('B',): 2, ('D',): 1})
 
2835
        request_keys = [('B',), ('C',), ('D',), ('A',)]
2902
2836
        keys = [r.key for r in vf.get_record_stream(request_keys,
2903
 
                                                    'unordered', False)]
 
2837
                                   'unordered', False)]
2904
2838
        # A and C are not in the map, so they get sorted to the front. A comes
2905
2839
        # before C alphabetically, so it comes back first
2906
 
        self.assertEqual([(b'A',), (b'C',), (b'D',), (b'B',)], keys)
 
2840
        self.assertEqual([('A',), ('C',), ('D',), ('B',)], keys)
2907
2841
        # And the request recorded
2908
2842
        self.assertEqual([('get_record_stream', request_keys, 'unordered',
2909
2843
                           False)], vf.calls)