/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
1
# Copyright (C) 2008 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
17
"""Tests for pack repositories.
18
19
These tests are repeated for all pack-based repository formats.
20
"""
21
22
from StringIO import StringIO
23
from stat import S_ISDIR
24
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
26
from bzrlib import (
27
    bzrdir,
28
    errors,
29
    inventory,
30
    progress,
31
    repository,
32
    revision as _mod_revision,
33
    symbol_versioning,
34
    tests,
35
    ui,
36
    upgrade,
37
    workingtree,
38
    )
39
from bzrlib.tests import (
40
    TestCase,
41
    TestCaseWithTransport,
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
42
    TestNotApplicable,
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
43
    TestSkipped,
44
    )
45
from bzrlib.transport import (
46
    fakenfs,
47
    get_transport,
48
    )
49
50
51
class TestPackRepository(TestCaseWithTransport):
52
    """Tests to be repeated across all pack-based formats.
53
54
    The following are populated from the test scenario:
55
56
    :ivar format_name: Registered name fo the format to test.
57
    :ivar format_string: On-disk format marker.
58
    :ivar format_supports_external_lookups: Boolean.
59
    """
60
61
    def get_format(self):
62
        return bzrdir.format_registry.make_bzrdir(self.format_name)
63
64
    def test_attribute__fetch_order(self):
65
        """Packs do not need ordered data retrieval."""
66
        format = self.get_format()
67
        repo = self.make_repository('.', format=format)
68
        self.assertEqual('unsorted', repo._fetch_order)
69
70
    def test_attribute__fetch_uses_deltas(self):
71
        """Packs reuse deltas."""
72
        format = self.get_format()
73
        repo = self.make_repository('.', format=format)
74
        self.assertEqual(True, repo._fetch_uses_deltas)
75
76
    def test_disk_layout(self):
77
        format = self.get_format()
78
        repo = self.make_repository('.', format=format)
79
        # in case of side effects of locking.
80
        repo.lock_write()
81
        repo.unlock()
82
        t = repo.bzrdir.get_repository_transport(None)
83
        self.check_format(t)
84
        # XXX: no locks left when unlocked at the moment
85
        # self.assertEqualDiff('', t.get('lock').read())
86
        self.check_databases(t)
87
88
    def check_format(self, t):
89
        self.assertEqualDiff(
90
            self.format_string, # from scenario
91
            t.get('format').read())
92
93
    def assertHasNoKndx(self, t, knit_name):
94
        """Assert that knit_name has no index on t."""
95
        self.assertFalse(t.has(knit_name + '.kndx'))
96
97
    def assertHasNoKnit(self, t, knit_name):
98
        """Assert that knit_name exists on t."""
99
        # no default content
100
        self.assertFalse(t.has(knit_name + '.knit'))
101
102
    def check_databases(self, t):
103
        """check knit content for a repository."""
104
        # check conversion worked
105
        self.assertHasNoKndx(t, 'inventory')
106
        self.assertHasNoKnit(t, 'inventory')
107
        self.assertHasNoKndx(t, 'revisions')
108
        self.assertHasNoKnit(t, 'revisions')
109
        self.assertHasNoKndx(t, 'signatures')
110
        self.assertHasNoKnit(t, 'signatures')
111
        self.assertFalse(t.has('knits'))
112
        # revision-indexes file-container directory
113
        self.assertEqual([],
114
            list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
115
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
116
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
117
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
118
        self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
119
120
    def test_shared_disk_layout(self):
121
        format = self.get_format()
122
        repo = self.make_repository('.', shared=True, format=format)
123
        # we want:
124
        t = repo.bzrdir.get_repository_transport(None)
125
        self.check_format(t)
126
        # XXX: no locks left when unlocked at the moment
127
        # self.assertEqualDiff('', t.get('lock').read())
128
        # We should have a 'shared-storage' marker file.
129
        self.assertEqualDiff('', t.get('shared-storage').read())
130
        self.check_databases(t)
131
132
    def test_shared_no_tree_disk_layout(self):
133
        format = self.get_format()
134
        repo = self.make_repository('.', shared=True, format=format)
135
        repo.set_make_working_trees(False)
136
        # we want:
137
        t = repo.bzrdir.get_repository_transport(None)
138
        self.check_format(t)
139
        # XXX: no locks left when unlocked at the moment
140
        # self.assertEqualDiff('', t.get('lock').read())
141
        # We should have a 'shared-storage' marker file.
142
        self.assertEqualDiff('', t.get('shared-storage').read())
143
        # We should have a marker for the no-working-trees flag.
144
        self.assertEqualDiff('', t.get('no-working-trees').read())
145
        # The marker should go when we toggle the setting.
146
        repo.set_make_working_trees(True)
147
        self.assertFalse(t.has('no-working-trees'))
148
        self.check_databases(t)
149
150
    def test_adding_revision_creates_pack_indices(self):
151
        format = self.get_format()
152
        tree = self.make_branch_and_tree('.', format=format)
153
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
154
        self.assertEqual([],
155
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
156
        tree.commit('foobarbaz')
157
        index = GraphIndex(trans, 'pack-names', None)
158
        index_nodes = list(index.iter_all_entries())
159
        self.assertEqual(1, len(index_nodes))
160
        node = index_nodes[0]
161
        name = node[1][0]
162
        # the pack sizes should be listed in the index
163
        pack_value = node[2]
164
        sizes = [int(digits) for digits in pack_value.split(' ')]
165
        for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
166
            stat = trans.stat('indices/%s%s' % (name, suffix))
167
            self.assertEqual(size, stat.st_size)
168
169
    def test_pulling_nothing_leads_to_no_new_names(self):
170
        format = self.get_format()
171
        tree1 = self.make_branch_and_tree('1', format=format)
172
        tree2 = self.make_branch_and_tree('2', format=format)
173
        tree1.branch.repository.fetch(tree2.branch.repository)
174
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
175
        self.assertEqual([],
176
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
177
178
    def test_commit_across_pack_shape_boundary_autopacks(self):
179
        format = self.get_format()
180
        tree = self.make_branch_and_tree('.', format=format)
181
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
182
        # This test could be a little cheaper by replacing the packs
183
        # attribute on the repository to allow a different pack distribution
184
        # and max packs policy - so we are checking the policy is honoured
185
        # in the test. But for now 11 commits is not a big deal in a single
186
        # test.
187
        for x in range(9):
188
            tree.commit('commit %s' % x)
189
        # there should be 9 packs:
190
        index = GraphIndex(trans, 'pack-names', None)
191
        self.assertEqual(9, len(list(index.iter_all_entries())))
192
        # insert some files in obsolete_packs which should be removed by pack.
193
        trans.put_bytes('obsolete_packs/foo', '123')
194
        trans.put_bytes('obsolete_packs/bar', '321')
195
        # committing one more should coalesce to 1 of 10.
196
        tree.commit('commit triggering pack')
197
        index = GraphIndex(trans, 'pack-names', None)
198
        self.assertEqual(1, len(list(index.iter_all_entries())))
199
        # packing should not damage data
200
        tree = tree.bzrdir.open_workingtree()
201
        check_result = tree.branch.repository.check(
202
            [tree.branch.last_revision()])
203
        # We should have 50 (10x5) files in the obsolete_packs directory.
204
        obsolete_files = list(trans.list_dir('obsolete_packs'))
205
        self.assertFalse('foo' in obsolete_files)
206
        self.assertFalse('bar' in obsolete_files)
207
        self.assertEqual(50, len(obsolete_files))
208
        # XXX: Todo check packs obsoleted correctly - old packs and indices
209
        # in the obsolete_packs directory.
210
        large_pack_name = list(index.iter_all_entries())[0][1][0]
211
        # finally, committing again should not touch the large pack.
212
        tree.commit('commit not triggering pack')
213
        index = GraphIndex(trans, 'pack-names', None)
214
        self.assertEqual(2, len(list(index.iter_all_entries())))
215
        pack_names = [node[1][0] for node in index.iter_all_entries()]
216
        self.assertTrue(large_pack_name in pack_names)
217
218
    def test_fail_obsolete_deletion(self):
219
        # failing to delete obsolete packs is not fatal
220
        format = self.get_format()
221
        server = fakenfs.FakeNFSServer()
222
        server.setUp()
223
        self.addCleanup(server.tearDown)
224
        transport = get_transport(server.get_url())
225
        bzrdir = self.get_format().initialize_on_transport(transport)
226
        repo = bzrdir.create_repository()
227
        repo_transport = bzrdir.get_repository_transport(None)
228
        self.assertTrue(repo_transport.has('obsolete_packs'))
229
        # these files are in use by another client and typically can't be deleted
230
        repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
231
        repo._pack_collection._clear_obsolete_packs()
232
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
233
234
    def test_pack_after_two_commits_packs_everything(self):
235
        format = self.get_format()
236
        tree = self.make_branch_and_tree('.', format=format)
237
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
238
        tree.commit('start')
239
        tree.commit('more work')
240
        tree.branch.repository.pack()
241
        # there should be 1 pack:
242
        index = GraphIndex(trans, 'pack-names', None)
243
        self.assertEqual(1, len(list(index.iter_all_entries())))
244
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
245
246
    def test_pack_layout(self):
247
        format = self.get_format()
248
        tree = self.make_branch_and_tree('.', format=format)
249
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
250
        tree.commit('start', rev_id='1')
251
        tree.commit('more work', rev_id='2')
252
        tree.branch.repository.pack()
253
        tree.lock_read()
254
        self.addCleanup(tree.unlock)
255
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
256
            tree.branch.repository._pack_collection.names()[0])
257
        # revision access tends to be tip->ancestor, so ordering that way on 
258
        # disk is a good idea.
259
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
260
            if key == ('1',):
261
                pos_1 = int(val[1:].split()[0])
262
            else:
263
                pos_2 = int(val[1:].split()[0])
264
        self.assertTrue(pos_2 < pos_1)
265
266
    def test_pack_repositories_support_multiple_write_locks(self):
267
        format = self.get_format()
268
        self.make_repository('.', shared=True, format=format)
269
        r1 = repository.Repository.open('.')
270
        r2 = repository.Repository.open('.')
271
        r1.lock_write()
272
        self.addCleanup(r1.unlock)
273
        r2.lock_write()
274
        r2.unlock()
275
276
    def _add_text(self, repo, fileid):
277
        """Add a text to the repository within a write group."""
278
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
279
280
    def test_concurrent_writers_merge_new_packs(self):
281
        format = self.get_format()
282
        self.make_repository('.', shared=True, format=format)
283
        r1 = repository.Repository.open('.')
284
        r2 = repository.Repository.open('.')
285
        r1.lock_write()
286
        try:
287
            # access enough data to load the names list
288
            list(r1.all_revision_ids())
289
            r2.lock_write()
290
            try:
291
                # access enough data to load the names list
292
                list(r2.all_revision_ids())
293
                r1.start_write_group()
294
                try:
295
                    r2.start_write_group()
296
                    try:
297
                        self._add_text(r1, 'fileidr1')
298
                        self._add_text(r2, 'fileidr2')
299
                    except:
300
                        r2.abort_write_group()
301
                        raise
302
                except:
303
                    r1.abort_write_group()
304
                    raise
305
                # both r1 and r2 have open write groups with data in them
306
                # created while the other's write group was open.
307
                # Commit both which requires a merge to the pack-names.
308
                try:
309
                    r1.commit_write_group()
310
                except:
311
                    r1.abort_write_group()
312
                    r2.abort_write_group()
313
                    raise
314
                r2.commit_write_group()
315
                # tell r1 to reload from disk
316
                r1._pack_collection.reset()
317
                # Now both repositories should know about both names
318
                r1._pack_collection.ensure_loaded()
319
                r2._pack_collection.ensure_loaded()
320
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
321
                self.assertEqual(2, len(r1._pack_collection.names()))
322
            finally:
323
                r2.unlock()
324
        finally:
325
            r1.unlock()
326
327
    def test_concurrent_writer_second_preserves_dropping_a_pack(self):
328
        format = self.get_format()
329
        self.make_repository('.', shared=True, format=format)
330
        r1 = repository.Repository.open('.')
331
        r2 = repository.Repository.open('.')
332
        # add a pack to drop
333
        r1.lock_write()
334
        try:
335
            r1.start_write_group()
336
            try:
337
                self._add_text(r1, 'fileidr1')
338
            except:
339
                r1.abort_write_group()
340
                raise
341
            else:
342
                r1.commit_write_group()
343
            r1._pack_collection.ensure_loaded()
344
            name_to_drop = r1._pack_collection.all_packs()[0].name
345
        finally:
346
            r1.unlock()
347
        r1.lock_write()
348
        try:
349
            # access enough data to load the names list
350
            list(r1.all_revision_ids())
351
            r2.lock_write()
352
            try:
353
                # access enough data to load the names list
354
                list(r2.all_revision_ids())
355
                r1._pack_collection.ensure_loaded()
356
                try:
357
                    r2.start_write_group()
358
                    try:
359
                        # in r1, drop the pack
360
                        r1._pack_collection._remove_pack_from_memory(
361
                            r1._pack_collection.get_pack_by_name(name_to_drop))
362
                        # in r2, add a pack
363
                        self._add_text(r2, 'fileidr2')
364
                    except:
365
                        r2.abort_write_group()
366
                        raise
367
                except:
368
                    r1._pack_collection.reset()
369
                    raise
370
                # r1 has a changed names list, and r2 an open write groups with
371
                # changes.
372
                # save r1, and then commit the r2 write group, which requires a
373
                # merge to the pack-names, which should not reinstate
374
                # name_to_drop
375
                try:
376
                    r1._pack_collection._save_pack_names()
377
                    r1._pack_collection.reset()
378
                except:
379
                    r2.abort_write_group()
380
                    raise
381
                try:
382
                    r2.commit_write_group()
383
                except:
384
                    r2.abort_write_group()
385
                    raise
386
                # Now both repositories should now about just one name.
387
                r1._pack_collection.ensure_loaded()
388
                r2._pack_collection.ensure_loaded()
389
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
390
                self.assertEqual(1, len(r1._pack_collection.names()))
391
                self.assertFalse(name_to_drop in r1._pack_collection.names())
392
            finally:
393
                r2.unlock()
394
        finally:
395
            r1.unlock()
396
397
    def test_lock_write_does_not_physically_lock(self):
398
        repo = self.make_repository('.', format=self.get_format())
399
        repo.lock_write()
400
        self.addCleanup(repo.unlock)
401
        self.assertFalse(repo.get_physical_lock_status())
402
403
    def prepare_for_break_lock(self):
404
        # Setup the global ui factory state so that a break-lock method call
405
        # will find usable input in the input stream.
406
        old_factory = ui.ui_factory
407
        def restoreFactory():
408
            ui.ui_factory = old_factory
409
        self.addCleanup(restoreFactory)
410
        ui.ui_factory = ui.SilentUIFactory()
411
        ui.ui_factory.stdin = StringIO("y\n")
412
413
    def test_break_lock_breaks_physical_lock(self):
414
        repo = self.make_repository('.', format=self.get_format())
415
        repo._pack_collection.lock_names()
416
        repo2 = repository.Repository.open('.')
417
        self.assertTrue(repo.get_physical_lock_status())
418
        self.prepare_for_break_lock()
419
        repo2.break_lock()
420
        self.assertFalse(repo.get_physical_lock_status())
421
422
    def test_broken_physical_locks_error_on__unlock_names_lock(self):
423
        repo = self.make_repository('.', format=self.get_format())
424
        repo._pack_collection.lock_names()
425
        self.assertTrue(repo.get_physical_lock_status())
426
        repo2 = repository.Repository.open('.')
427
        self.prepare_for_break_lock()
428
        repo2.break_lock()
429
        self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
430
431
    def test_fetch_without_find_ghosts_ignores_ghosts(self):
432
        # we want two repositories at this point:
433
        # one with a revision that is a ghost in the other
434
        # repository.
435
        # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
436
        # 'references' is present in both repositories, and 'tip' is present
437
        # just in has_ghost.
438
        # has_ghost       missing_ghost
439
        #------------------------------
440
        # 'ghost'             -
441
        # 'references'    'references'
442
        # 'tip'               -
443
        # In this test we fetch 'tip' which should not fetch 'ghost'
444
        has_ghost = self.make_repository('has_ghost', format=self.get_format())
445
        missing_ghost = self.make_repository('missing_ghost',
446
            format=self.get_format())
447
448
        def add_commit(repo, revision_id, parent_ids):
449
            repo.lock_write()
450
            repo.start_write_group()
451
            inv = inventory.Inventory(revision_id=revision_id)
452
            inv.root.revision = revision_id
453
            root_id = inv.root.file_id
454
            sha1 = repo.add_inventory(revision_id, inv, [])
455
            repo.texts.add_lines((root_id, revision_id), [], [])
456
            rev = _mod_revision.Revision(timestamp=0,
457
                                         timezone=None,
458
                                         committer="Foo Bar <foo@example.com>",
459
                                         message="Message",
460
                                         inventory_sha1=sha1,
461
                                         revision_id=revision_id)
462
            rev.parent_ids = parent_ids
463
            repo.add_revision(revision_id, rev)
464
            repo.commit_write_group()
465
            repo.unlock()
466
        add_commit(has_ghost, 'ghost', [])
467
        add_commit(has_ghost, 'references', ['ghost'])
468
        add_commit(missing_ghost, 'references', ['ghost'])
469
        add_commit(has_ghost, 'tip', ['references'])
470
        missing_ghost.fetch(has_ghost, 'tip')
471
        # missing ghost now has tip and not ghost.
472
        rev = missing_ghost.get_revision('tip')
473
        inv = missing_ghost.get_inventory('tip')
474
        self.assertRaises(errors.NoSuchRevision,
475
            missing_ghost.get_revision, 'ghost')
476
        self.assertRaises(errors.NoSuchRevision,
477
            missing_ghost.get_inventory, 'ghost')
478
479
    def test_supports_external_lookups(self):
480
        repo = self.make_repository('.', format=self.get_format())
481
        self.assertEqual(self.format_supports_external_lookups,
482
            repo._format.supports_external_lookups)
483
484
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
485
class TestPackRepositoryStacking(TestCaseWithTransport):
486
487
    """Tests for stacking pack repositories"""
488
489
    def setUp(self):
490
        if not self.format_supports_external_lookups:
491
            raise TestNotApplicable("%r doesn't support stacking" 
492
                % (self.format_name,))
493
        super(TestPackRepositoryStacking, self).setUp()
494
495
    def get_format(self):
496
        return bzrdir.format_registry.make_bzrdir(self.format_name)
497
498
    def test_stack_checks_compatibility(self):
499
        # early versions of the packing code relied on pack internals to
500
        # stack, but the current version should be able to stack on any
501
        # format.
502
        #
503
        # TODO: Possibly this should be run per-repository-format and raise
504
        # TestNotApplicable on formats that don't support stacking. -- mbp
505
        # 20080729
506
        repo = self.make_repository('repo', format=self.get_format())
507
        if repo.supports_rich_root():
508
            # can only stack on repositories that have compatible internal
509
            # metadata
510
            matching_format_name = 'pack-0.92-subtree'
511
            mismatching_format_name = 'pack-0.92'
512
        else:
513
            matching_format_name = 'pack-0.92'
514
            mismatching_format_name = 'pack-0.92-subtree'
515
        base = self.make_repository('base', format=matching_format_name)
516
        repo.add_fallback_repository(base)
517
        # you can't stack on something with incompatible data
518
        bad_repo = self.make_repository('mismatch',
519
            format=mismatching_format_name)
520
        e = self.assertRaises(errors.IncompatibleRepositories,
521
            repo.add_fallback_repository, bad_repo)
522
        self.assertContainsRe(str(e),
523
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
524
            r'KnitPackRepository.*/repo/.*\n'
525
            r'different rich-root support')
526
527
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
528
        base = self.make_branch_and_tree('base', format=self.get_format())
529
        base.commit('foo')
530
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
531
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
532
        referencing.commit('bar')
533
        new_instance = referencing.bzrdir.open_repository()
534
        new_instance.lock_read()
535
        self.addCleanup(new_instance.unlock)
536
        new_instance._pack_collection.ensure_loaded()
537
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
538
539
    def test_autopack_only_considers_main_repo_packs(self):
540
        base = self.make_branch_and_tree('base', format=self.get_format())
541
        base.commit('foo')
542
        tree = self.make_branch_and_tree('repo', format=self.get_format())
543
        tree.branch.repository.add_fallback_repository(base.branch.repository)
544
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
545
        # This test could be a little cheaper by replacing the packs
546
        # attribute on the repository to allow a different pack distribution
547
        # and max packs policy - so we are checking the policy is honoured
548
        # in the test. But for now 11 commits is not a big deal in a single
549
        # test.
550
        for x in range(9):
551
            tree.commit('commit %s' % x)
552
        # there should be 9 packs:
553
        index = GraphIndex(trans, 'pack-names', None)
554
        self.assertEqual(9, len(list(index.iter_all_entries())))
555
        # committing one more should coalesce to 1 of 10.
556
        tree.commit('commit triggering pack')
557
        index = GraphIndex(trans, 'pack-names', None)
558
        self.assertEqual(1, len(list(index.iter_all_entries())))
559
        # packing should not damage data
560
        tree = tree.bzrdir.open_workingtree()
561
        check_result = tree.branch.repository.check(
562
            [tree.branch.last_revision()])
563
        # We should have 50 (10x5) files in the obsolete_packs directory.
564
        obsolete_files = list(trans.list_dir('obsolete_packs'))
565
        self.assertFalse('foo' in obsolete_files)
566
        self.assertFalse('bar' in obsolete_files)
567
        self.assertEqual(50, len(obsolete_files))
568
        # XXX: Todo check packs obsoleted correctly - old packs and indices
569
        # in the obsolete_packs directory.
570
        large_pack_name = list(index.iter_all_entries())[0][1][0]
571
        # finally, committing again should not touch the large pack.
572
        tree.commit('commit not triggering pack')
573
        index = GraphIndex(trans, 'pack-names', None)
574
        self.assertEqual(2, len(list(index.iter_all_entries())))
575
        pack_names = [node[1][0] for node in index.iter_all_entries()]
576
        self.assertTrue(large_pack_name in pack_names)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
577
578
579
def load_tests(basic_tests, module, test_loader):
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
580
    # these give the bzrdir canned format name, and the repository on-disk
581
    # format string
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
582
    scenarios_params = [
583
         dict(format_name='pack-0.92',
584
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
585
              format_supports_external_lookups=False),
586
         dict(format_name='pack-0.92-subtree',
587
              format_string="Bazaar pack repository format 1 "
588
              "with subtree support (needs bzr 0.92)\n",
589
              format_supports_external_lookups=False),
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
590
         dict(format_name='1.6',
591
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
592
              format_supports_external_lookups=True),
593
         dict(format_name='1.6-rich-root',
594
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
595
                  "(bzr 1.6)\n",
596
              format_supports_external_lookups=True),
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
597
         dict(format_name='development0',
598
              format_string="Bazaar development format 0 "
599
                  "(needs bzr.dev from before 1.3)\n",
600
              format_supports_external_lookups=False),
601
         dict(format_name='development0-subtree',
602
              format_string="Bazaar development format 0 "
603
                  "with subtree support (needs bzr.dev from before 1.3)\n",
604
              format_supports_external_lookups=False),
605
         dict(format_name='development',
606
              format_string="Bazaar development format 1 "
607
                  "(needs bzr.dev from before 1.6)\n",
608
              format_supports_external_lookups=True),
609
         dict(format_name='development-subtree',
610
              format_string="Bazaar development format 1 "
611
                  "with subtree support (needs bzr.dev from before 1.6)\n",
612
              format_supports_external_lookups=True),
613
         ]
614
    adapter = tests.TestScenarioApplier()
615
    # name of the scenario is the format name
616
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
617
    suite = tests.TestSuite()
618
    tests.adapt_tests(basic_tests, adapter, suite)
619
    return suite