/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
1
# Copyright (C) 2008 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
17
"""Tests for pack repositories.
18
19
These tests are repeated for all pack-based repository formats.
20
"""
21
22
from StringIO import StringIO
23
from stat import S_ISDIR
24
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
26
from bzrlib import (
27
    bzrdir,
28
    errors,
29
    inventory,
30
    progress,
31
    repository,
32
    revision as _mod_revision,
33
    symbol_versioning,
34
    tests,
35
    ui,
36
    upgrade,
37
    workingtree,
38
    )
39
from bzrlib.tests import (
40
    TestCase,
41
    TestCaseWithTransport,
42
    TestSkipped,
43
    )
44
from bzrlib.transport import (
45
    fakenfs,
46
    get_transport,
47
    )
48
49
50
class TestPackRepository(TestCaseWithTransport):
51
    """Tests to be repeated across all pack-based formats.
52
53
    The following are populated from the test scenario:
54
55
    :ivar format_name: Registered name fo the format to test.
56
    :ivar format_string: On-disk format marker.
57
    :ivar format_supports_external_lookups: Boolean.
58
    """
59
60
    def get_format(self):
61
        return bzrdir.format_registry.make_bzrdir(self.format_name)
62
63
    def test_attribute__fetch_order(self):
64
        """Packs do not need ordered data retrieval."""
65
        format = self.get_format()
66
        repo = self.make_repository('.', format=format)
67
        self.assertEqual('unsorted', repo._fetch_order)
68
69
    def test_attribute__fetch_uses_deltas(self):
70
        """Packs reuse deltas."""
71
        format = self.get_format()
72
        repo = self.make_repository('.', format=format)
73
        self.assertEqual(True, repo._fetch_uses_deltas)
74
75
    def test_disk_layout(self):
76
        format = self.get_format()
77
        repo = self.make_repository('.', format=format)
78
        # in case of side effects of locking.
79
        repo.lock_write()
80
        repo.unlock()
81
        t = repo.bzrdir.get_repository_transport(None)
82
        self.check_format(t)
83
        # XXX: no locks left when unlocked at the moment
84
        # self.assertEqualDiff('', t.get('lock').read())
85
        self.check_databases(t)
86
87
    def check_format(self, t):
88
        self.assertEqualDiff(
89
            self.format_string, # from scenario
90
            t.get('format').read())
91
92
    def assertHasNoKndx(self, t, knit_name):
93
        """Assert that knit_name has no index on t."""
94
        self.assertFalse(t.has(knit_name + '.kndx'))
95
96
    def assertHasNoKnit(self, t, knit_name):
97
        """Assert that knit_name exists on t."""
98
        # no default content
99
        self.assertFalse(t.has(knit_name + '.knit'))
100
101
    def check_databases(self, t):
102
        """check knit content for a repository."""
103
        # check conversion worked
104
        self.assertHasNoKndx(t, 'inventory')
105
        self.assertHasNoKnit(t, 'inventory')
106
        self.assertHasNoKndx(t, 'revisions')
107
        self.assertHasNoKnit(t, 'revisions')
108
        self.assertHasNoKndx(t, 'signatures')
109
        self.assertHasNoKnit(t, 'signatures')
110
        self.assertFalse(t.has('knits'))
111
        # revision-indexes file-container directory
112
        self.assertEqual([],
113
            list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
114
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
115
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
116
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
117
        self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
118
119
    def test_shared_disk_layout(self):
120
        format = self.get_format()
121
        repo = self.make_repository('.', shared=True, format=format)
122
        # we want:
123
        t = repo.bzrdir.get_repository_transport(None)
124
        self.check_format(t)
125
        # XXX: no locks left when unlocked at the moment
126
        # self.assertEqualDiff('', t.get('lock').read())
127
        # We should have a 'shared-storage' marker file.
128
        self.assertEqualDiff('', t.get('shared-storage').read())
129
        self.check_databases(t)
130
131
    def test_shared_no_tree_disk_layout(self):
132
        format = self.get_format()
133
        repo = self.make_repository('.', shared=True, format=format)
134
        repo.set_make_working_trees(False)
135
        # we want:
136
        t = repo.bzrdir.get_repository_transport(None)
137
        self.check_format(t)
138
        # XXX: no locks left when unlocked at the moment
139
        # self.assertEqualDiff('', t.get('lock').read())
140
        # We should have a 'shared-storage' marker file.
141
        self.assertEqualDiff('', t.get('shared-storage').read())
142
        # We should have a marker for the no-working-trees flag.
143
        self.assertEqualDiff('', t.get('no-working-trees').read())
144
        # The marker should go when we toggle the setting.
145
        repo.set_make_working_trees(True)
146
        self.assertFalse(t.has('no-working-trees'))
147
        self.check_databases(t)
148
149
    def test_adding_revision_creates_pack_indices(self):
150
        format = self.get_format()
151
        tree = self.make_branch_and_tree('.', format=format)
152
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
153
        self.assertEqual([],
154
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
155
        tree.commit('foobarbaz')
156
        index = GraphIndex(trans, 'pack-names', None)
157
        index_nodes = list(index.iter_all_entries())
158
        self.assertEqual(1, len(index_nodes))
159
        node = index_nodes[0]
160
        name = node[1][0]
161
        # the pack sizes should be listed in the index
162
        pack_value = node[2]
163
        sizes = [int(digits) for digits in pack_value.split(' ')]
164
        for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
165
            stat = trans.stat('indices/%s%s' % (name, suffix))
166
            self.assertEqual(size, stat.st_size)
167
168
    def test_pulling_nothing_leads_to_no_new_names(self):
169
        format = self.get_format()
170
        tree1 = self.make_branch_and_tree('1', format=format)
171
        tree2 = self.make_branch_and_tree('2', format=format)
172
        tree1.branch.repository.fetch(tree2.branch.repository)
173
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
174
        self.assertEqual([],
175
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
176
177
    def test_commit_across_pack_shape_boundary_autopacks(self):
178
        format = self.get_format()
179
        tree = self.make_branch_and_tree('.', format=format)
180
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
181
        # This test could be a little cheaper by replacing the packs
182
        # attribute on the repository to allow a different pack distribution
183
        # and max packs policy - so we are checking the policy is honoured
184
        # in the test. But for now 11 commits is not a big deal in a single
185
        # test.
186
        for x in range(9):
187
            tree.commit('commit %s' % x)
188
        # there should be 9 packs:
189
        index = GraphIndex(trans, 'pack-names', None)
190
        self.assertEqual(9, len(list(index.iter_all_entries())))
191
        # insert some files in obsolete_packs which should be removed by pack.
192
        trans.put_bytes('obsolete_packs/foo', '123')
193
        trans.put_bytes('obsolete_packs/bar', '321')
194
        # committing one more should coalesce to 1 of 10.
195
        tree.commit('commit triggering pack')
196
        index = GraphIndex(trans, 'pack-names', None)
197
        self.assertEqual(1, len(list(index.iter_all_entries())))
198
        # packing should not damage data
199
        tree = tree.bzrdir.open_workingtree()
200
        check_result = tree.branch.repository.check(
201
            [tree.branch.last_revision()])
202
        # We should have 50 (10x5) files in the obsolete_packs directory.
203
        obsolete_files = list(trans.list_dir('obsolete_packs'))
204
        self.assertFalse('foo' in obsolete_files)
205
        self.assertFalse('bar' in obsolete_files)
206
        self.assertEqual(50, len(obsolete_files))
207
        # XXX: Todo check packs obsoleted correctly - old packs and indices
208
        # in the obsolete_packs directory.
209
        large_pack_name = list(index.iter_all_entries())[0][1][0]
210
        # finally, committing again should not touch the large pack.
211
        tree.commit('commit not triggering pack')
212
        index = GraphIndex(trans, 'pack-names', None)
213
        self.assertEqual(2, len(list(index.iter_all_entries())))
214
        pack_names = [node[1][0] for node in index.iter_all_entries()]
215
        self.assertTrue(large_pack_name in pack_names)
216
217
    def test_fail_obsolete_deletion(self):
218
        # failing to delete obsolete packs is not fatal
219
        format = self.get_format()
220
        server = fakenfs.FakeNFSServer()
221
        server.setUp()
222
        self.addCleanup(server.tearDown)
223
        transport = get_transport(server.get_url())
224
        bzrdir = self.get_format().initialize_on_transport(transport)
225
        repo = bzrdir.create_repository()
226
        repo_transport = bzrdir.get_repository_transport(None)
227
        self.assertTrue(repo_transport.has('obsolete_packs'))
228
        # these files are in use by another client and typically can't be deleted
229
        repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
230
        repo._pack_collection._clear_obsolete_packs()
231
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
232
233
    def test_pack_after_two_commits_packs_everything(self):
234
        format = self.get_format()
235
        tree = self.make_branch_and_tree('.', format=format)
236
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
237
        tree.commit('start')
238
        tree.commit('more work')
239
        tree.branch.repository.pack()
240
        # there should be 1 pack:
241
        index = GraphIndex(trans, 'pack-names', None)
242
        self.assertEqual(1, len(list(index.iter_all_entries())))
243
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
244
245
    def test_pack_layout(self):
246
        format = self.get_format()
247
        tree = self.make_branch_and_tree('.', format=format)
248
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
249
        tree.commit('start', rev_id='1')
250
        tree.commit('more work', rev_id='2')
251
        tree.branch.repository.pack()
252
        tree.lock_read()
253
        self.addCleanup(tree.unlock)
254
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
255
            tree.branch.repository._pack_collection.names()[0])
256
        # revision access tends to be tip->ancestor, so ordering that way on 
257
        # disk is a good idea.
258
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
259
            if key == ('1',):
260
                pos_1 = int(val[1:].split()[0])
261
            else:
262
                pos_2 = int(val[1:].split()[0])
263
        self.assertTrue(pos_2 < pos_1)
264
265
    def test_pack_repositories_support_multiple_write_locks(self):
266
        format = self.get_format()
267
        self.make_repository('.', shared=True, format=format)
268
        r1 = repository.Repository.open('.')
269
        r2 = repository.Repository.open('.')
270
        r1.lock_write()
271
        self.addCleanup(r1.unlock)
272
        r2.lock_write()
273
        r2.unlock()
274
275
    def _add_text(self, repo, fileid):
276
        """Add a text to the repository within a write group."""
277
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
278
279
    def test_concurrent_writers_merge_new_packs(self):
280
        format = self.get_format()
281
        self.make_repository('.', shared=True, format=format)
282
        r1 = repository.Repository.open('.')
283
        r2 = repository.Repository.open('.')
284
        r1.lock_write()
285
        try:
286
            # access enough data to load the names list
287
            list(r1.all_revision_ids())
288
            r2.lock_write()
289
            try:
290
                # access enough data to load the names list
291
                list(r2.all_revision_ids())
292
                r1.start_write_group()
293
                try:
294
                    r2.start_write_group()
295
                    try:
296
                        self._add_text(r1, 'fileidr1')
297
                        self._add_text(r2, 'fileidr2')
298
                    except:
299
                        r2.abort_write_group()
300
                        raise
301
                except:
302
                    r1.abort_write_group()
303
                    raise
304
                # both r1 and r2 have open write groups with data in them
305
                # created while the other's write group was open.
306
                # Commit both which requires a merge to the pack-names.
307
                try:
308
                    r1.commit_write_group()
309
                except:
310
                    r1.abort_write_group()
311
                    r2.abort_write_group()
312
                    raise
313
                r2.commit_write_group()
314
                # tell r1 to reload from disk
315
                r1._pack_collection.reset()
316
                # Now both repositories should know about both names
317
                r1._pack_collection.ensure_loaded()
318
                r2._pack_collection.ensure_loaded()
319
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
320
                self.assertEqual(2, len(r1._pack_collection.names()))
321
            finally:
322
                r2.unlock()
323
        finally:
324
            r1.unlock()
325
326
    def test_concurrent_writer_second_preserves_dropping_a_pack(self):
327
        format = self.get_format()
328
        self.make_repository('.', shared=True, format=format)
329
        r1 = repository.Repository.open('.')
330
        r2 = repository.Repository.open('.')
331
        # add a pack to drop
332
        r1.lock_write()
333
        try:
334
            r1.start_write_group()
335
            try:
336
                self._add_text(r1, 'fileidr1')
337
            except:
338
                r1.abort_write_group()
339
                raise
340
            else:
341
                r1.commit_write_group()
342
            r1._pack_collection.ensure_loaded()
343
            name_to_drop = r1._pack_collection.all_packs()[0].name
344
        finally:
345
            r1.unlock()
346
        r1.lock_write()
347
        try:
348
            # access enough data to load the names list
349
            list(r1.all_revision_ids())
350
            r2.lock_write()
351
            try:
352
                # access enough data to load the names list
353
                list(r2.all_revision_ids())
354
                r1._pack_collection.ensure_loaded()
355
                try:
356
                    r2.start_write_group()
357
                    try:
358
                        # in r1, drop the pack
359
                        r1._pack_collection._remove_pack_from_memory(
360
                            r1._pack_collection.get_pack_by_name(name_to_drop))
361
                        # in r2, add a pack
362
                        self._add_text(r2, 'fileidr2')
363
                    except:
364
                        r2.abort_write_group()
365
                        raise
366
                except:
367
                    r1._pack_collection.reset()
368
                    raise
369
                # r1 has a changed names list, and r2 an open write groups with
370
                # changes.
371
                # save r1, and then commit the r2 write group, which requires a
372
                # merge to the pack-names, which should not reinstate
373
                # name_to_drop
374
                try:
375
                    r1._pack_collection._save_pack_names()
376
                    r1._pack_collection.reset()
377
                except:
378
                    r2.abort_write_group()
379
                    raise
380
                try:
381
                    r2.commit_write_group()
382
                except:
383
                    r2.abort_write_group()
384
                    raise
385
                # Now both repositories should now about just one name.
386
                r1._pack_collection.ensure_loaded()
387
                r2._pack_collection.ensure_loaded()
388
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
389
                self.assertEqual(1, len(r1._pack_collection.names()))
390
                self.assertFalse(name_to_drop in r1._pack_collection.names())
391
            finally:
392
                r2.unlock()
393
        finally:
394
            r1.unlock()
395
396
    def test_lock_write_does_not_physically_lock(self):
397
        repo = self.make_repository('.', format=self.get_format())
398
        repo.lock_write()
399
        self.addCleanup(repo.unlock)
400
        self.assertFalse(repo.get_physical_lock_status())
401
402
    def prepare_for_break_lock(self):
403
        # Setup the global ui factory state so that a break-lock method call
404
        # will find usable input in the input stream.
405
        old_factory = ui.ui_factory
406
        def restoreFactory():
407
            ui.ui_factory = old_factory
408
        self.addCleanup(restoreFactory)
409
        ui.ui_factory = ui.SilentUIFactory()
410
        ui.ui_factory.stdin = StringIO("y\n")
411
412
    def test_break_lock_breaks_physical_lock(self):
413
        repo = self.make_repository('.', format=self.get_format())
414
        repo._pack_collection.lock_names()
415
        repo2 = repository.Repository.open('.')
416
        self.assertTrue(repo.get_physical_lock_status())
417
        self.prepare_for_break_lock()
418
        repo2.break_lock()
419
        self.assertFalse(repo.get_physical_lock_status())
420
421
    def test_broken_physical_locks_error_on__unlock_names_lock(self):
422
        repo = self.make_repository('.', format=self.get_format())
423
        repo._pack_collection.lock_names()
424
        self.assertTrue(repo.get_physical_lock_status())
425
        repo2 = repository.Repository.open('.')
426
        self.prepare_for_break_lock()
427
        repo2.break_lock()
428
        self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
429
430
    def test_fetch_without_find_ghosts_ignores_ghosts(self):
431
        # we want two repositories at this point:
432
        # one with a revision that is a ghost in the other
433
        # repository.
434
        # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
435
        # 'references' is present in both repositories, and 'tip' is present
436
        # just in has_ghost.
437
        # has_ghost       missing_ghost
438
        #------------------------------
439
        # 'ghost'             -
440
        # 'references'    'references'
441
        # 'tip'               -
442
        # In this test we fetch 'tip' which should not fetch 'ghost'
443
        has_ghost = self.make_repository('has_ghost', format=self.get_format())
444
        missing_ghost = self.make_repository('missing_ghost',
445
            format=self.get_format())
446
447
        def add_commit(repo, revision_id, parent_ids):
448
            repo.lock_write()
449
            repo.start_write_group()
450
            inv = inventory.Inventory(revision_id=revision_id)
451
            inv.root.revision = revision_id
452
            root_id = inv.root.file_id
453
            sha1 = repo.add_inventory(revision_id, inv, [])
454
            repo.texts.add_lines((root_id, revision_id), [], [])
455
            rev = _mod_revision.Revision(timestamp=0,
456
                                         timezone=None,
457
                                         committer="Foo Bar <foo@example.com>",
458
                                         message="Message",
459
                                         inventory_sha1=sha1,
460
                                         revision_id=revision_id)
461
            rev.parent_ids = parent_ids
462
            repo.add_revision(revision_id, rev)
463
            repo.commit_write_group()
464
            repo.unlock()
465
        add_commit(has_ghost, 'ghost', [])
466
        add_commit(has_ghost, 'references', ['ghost'])
467
        add_commit(missing_ghost, 'references', ['ghost'])
468
        add_commit(has_ghost, 'tip', ['references'])
469
        missing_ghost.fetch(has_ghost, 'tip')
470
        # missing ghost now has tip and not ghost.
471
        rev = missing_ghost.get_revision('tip')
472
        inv = missing_ghost.get_inventory('tip')
473
        self.assertRaises(errors.NoSuchRevision,
474
            missing_ghost.get_revision, 'ghost')
475
        self.assertRaises(errors.NoSuchRevision,
476
            missing_ghost.get_inventory, 'ghost')
477
478
    def test_supports_external_lookups(self):
479
        repo = self.make_repository('.', format=self.get_format())
480
        self.assertEqual(self.format_supports_external_lookups,
481
            repo._format.supports_external_lookups)
482
483
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
484
## class TestExternalDevelopment1(TestCaseWithTransport):
485
## 
486
## 
487
## 
488
##     # mixin class for testing stack-supporting development formats
489
## 
490
##     def test_stack_checks_compatibility(self):
491
##         # early versions of the packing code relied on pack internals to
492
##         # stack, but the current version should be able to stack on any
493
##         # format.
494
##         #
495
##         # TODO: Possibly this should be run per-repository-format and raise
496
##         # TestNotApplicable on formats that don't support stacking. -- mbp
497
##         # 20080729
498
##         repo = self.make_repository('repo', format=self.get_format())
499
##         if repo.supports_rich_root():
500
##             # can only stack on repositories that have compatible internal
501
##             # metadata
502
##             matching_format_name = 'pack-0.92-subtree'
503
##             mismatching_format_name = 'pack-0.92'
504
##         else:
505
##             matching_format_name = 'pack-0.92'
506
##             mismatching_format_name = 'pack-0.92-subtree'
507
##         base = self.make_repository('base', format=matching_format_name)
508
##         repo.add_fallback_repository(base)
509
##         # you can't stack on something with incompatible data
510
##         bad_repo = self.make_repository('mismatch',
511
##             format=mismatching_format_name)
512
##         e = self.assertRaises(errors.IncompatibleRepositories,
513
##             repo.add_fallback_repository, bad_repo)
514
##         self.assertContainsRe(str(e),
515
##             r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
516
##             r'KnitPackRepository.*/repo/.*\n'
517
##             r'different rich-root support')
518
## 
519
##     def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
520
##         base = self.make_branch_and_tree('base', format=self.get_format())
521
##         base.commit('foo')
522
##         referencing = self.make_branch_and_tree('repo', format=self.get_format())
523
##         referencing.branch.repository.add_fallback_repository(base.branch.repository)
524
##         referencing.commit('bar')
525
##         new_instance = referencing.bzrdir.open_repository()
526
##         new_instance.lock_read()
527
##         self.addCleanup(new_instance.unlock)
528
##         new_instance._pack_collection.ensure_loaded()
529
##         self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
530
## 
531
##     def test_autopack_only_considers_main_repo_packs(self):
532
##         base = self.make_branch_and_tree('base', format=self.get_format())
533
##         base.commit('foo')
534
##         tree = self.make_branch_and_tree('repo', format=self.get_format())
535
##         tree.branch.repository.add_fallback_repository(base.branch.repository)
536
##         trans = tree.branch.repository.bzrdir.get_repository_transport(None)
537
##         # This test could be a little cheaper by replacing the packs
538
##         # attribute on the repository to allow a different pack distribution
539
##         # and max packs policy - so we are checking the policy is honoured
540
##         # in the test. But for now 11 commits is not a big deal in a single
541
##         # test.
542
##         for x in range(9):
543
##             tree.commit('commit %s' % x)
544
##         # there should be 9 packs:
545
##         index = GraphIndex(trans, 'pack-names', None)
546
##         self.assertEqual(9, len(list(index.iter_all_entries())))
547
##         # committing one more should coalesce to 1 of 10.
548
##         tree.commit('commit triggering pack')
549
##         index = GraphIndex(trans, 'pack-names', None)
550
##         self.assertEqual(1, len(list(index.iter_all_entries())))
551
##         # packing should not damage data
552
##         tree = tree.bzrdir.open_workingtree()
553
##         check_result = tree.branch.repository.check(
554
##             [tree.branch.last_revision()])
555
##         # We should have 50 (10x5) files in the obsolete_packs directory.
556
##         obsolete_files = list(trans.list_dir('obsolete_packs'))
557
##         self.assertFalse('foo' in obsolete_files)
558
##         self.assertFalse('bar' in obsolete_files)
559
##         self.assertEqual(50, len(obsolete_files))
560
##         # XXX: Todo check packs obsoleted correctly - old packs and indices
561
##         # in the obsolete_packs directory.
562
##         large_pack_name = list(index.iter_all_entries())[0][1][0]
563
##         # finally, committing again should not touch the large pack.
564
##         tree.commit('commit not triggering pack')
565
##         index = GraphIndex(trans, 'pack-names', None)
566
##         self.assertEqual(2, len(list(index.iter_all_entries())))
567
##         pack_names = [node[1][0] for node in index.iter_all_entries()]
568
##         self.assertTrue(large_pack_name in pack_names)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
569
570
571
def load_tests(basic_tests, module, test_loader):
572
    scenarios_params = [
573
         dict(format_name='pack-0.92',
574
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
575
              format_supports_external_lookups=False),
576
         dict(format_name='pack-0.92-subtree',
577
              format_string="Bazaar pack repository format 1 "
578
              "with subtree support (needs bzr 0.92)\n",
579
              format_supports_external_lookups=False),
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
580
         dict(format_name='1.6',
581
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
582
              format_supports_external_lookups=True),
583
         dict(format_name='1.6-rich-root',
584
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
585
                  "(bzr 1.6)\n",
586
              format_supports_external_lookups=True),
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
587
         dict(format_name='development0',
588
              format_string="Bazaar development format 0 "
589
                  "(needs bzr.dev from before 1.3)\n",
590
              format_supports_external_lookups=False),
591
         dict(format_name='development0-subtree',
592
              format_string="Bazaar development format 0 "
593
                  "with subtree support (needs bzr.dev from before 1.3)\n",
594
              format_supports_external_lookups=False),
595
         dict(format_name='development',
596
              format_string="Bazaar development format 1 "
597
                  "(needs bzr.dev from before 1.6)\n",
598
              format_supports_external_lookups=True),
599
         dict(format_name='development-subtree',
600
              format_string="Bazaar development format 1 "
601
                  "with subtree support (needs bzr.dev from before 1.6)\n",
602
              format_supports_external_lookups=True),
603
         ]
604
    adapter = tests.TestScenarioApplier()
605
    # name of the scenario is the format name
606
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
607
    suite = tests.TestSuite()
608
    tests.adapt_tests(basic_tests, adapter, suite)
609
    return suite