/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
1
# Copyright (C) 2008 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
17
"""Tests for pack repositories.
18
19
These tests are repeated for all pack-based repository formats.
20
"""
21
3582.3.4 by Martin Pool
Use cStringIO rather than StringIO
22
from cStringIO import StringIO
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
23
from stat import S_ISDIR
24
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
26
from bzrlib import (
27
    bzrdir,
28
    errors,
29
    inventory,
30
    progress,
31
    repository,
32
    revision as _mod_revision,
33
    symbol_versioning,
34
    tests,
35
    ui,
36
    upgrade,
37
    workingtree,
38
    )
39
from bzrlib.tests import (
40
    TestCase,
41
    TestCaseWithTransport,
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
42
    TestNotApplicable,
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
43
    TestSkipped,
44
    )
45
from bzrlib.transport import (
46
    fakenfs,
47
    get_transport,
48
    )
49
50
51
class TestPackRepository(TestCaseWithTransport):
52
    """Tests to be repeated across all pack-based formats.
53
54
    The following are populated from the test scenario:
55
56
    :ivar format_name: Registered name fo the format to test.
57
    :ivar format_string: On-disk format marker.
58
    :ivar format_supports_external_lookups: Boolean.
59
    """
60
61
    def get_format(self):
62
        return bzrdir.format_registry.make_bzrdir(self.format_name)
63
64
    def test_attribute__fetch_order(self):
3606.7.2 by John Arbash Meinel
Find a couple more places with incorrect logic, and fix the tests effected.
65
        """Packs do not need ordered data retrieval.
66
67
        Except experience shows they need ordered data insertion, so for now,
68
        they request topological.
69
        """
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
70
        format = self.get_format()
71
        repo = self.make_repository('.', format=format)
3606.7.2 by John Arbash Meinel
Find a couple more places with incorrect logic, and fix the tests effected.
72
        self.assertEqual('topological', repo._fetch_order)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
73
74
    def test_attribute__fetch_uses_deltas(self):
75
        """Packs reuse deltas."""
76
        format = self.get_format()
77
        repo = self.make_repository('.', format=format)
78
        self.assertEqual(True, repo._fetch_uses_deltas)
79
80
    def test_disk_layout(self):
81
        format = self.get_format()
82
        repo = self.make_repository('.', format=format)
83
        # in case of side effects of locking.
84
        repo.lock_write()
85
        repo.unlock()
86
        t = repo.bzrdir.get_repository_transport(None)
87
        self.check_format(t)
88
        # XXX: no locks left when unlocked at the moment
89
        # self.assertEqualDiff('', t.get('lock').read())
90
        self.check_databases(t)
91
92
    def check_format(self, t):
93
        self.assertEqualDiff(
94
            self.format_string, # from scenario
95
            t.get('format').read())
96
97
    def assertHasNoKndx(self, t, knit_name):
98
        """Assert that knit_name has no index on t."""
99
        self.assertFalse(t.has(knit_name + '.kndx'))
100
101
    def assertHasNoKnit(self, t, knit_name):
102
        """Assert that knit_name exists on t."""
103
        # no default content
104
        self.assertFalse(t.has(knit_name + '.knit'))
105
106
    def check_databases(self, t):
107
        """check knit content for a repository."""
108
        # check conversion worked
109
        self.assertHasNoKndx(t, 'inventory')
110
        self.assertHasNoKnit(t, 'inventory')
111
        self.assertHasNoKndx(t, 'revisions')
112
        self.assertHasNoKnit(t, 'revisions')
113
        self.assertHasNoKndx(t, 'signatures')
114
        self.assertHasNoKnit(t, 'signatures')
115
        self.assertFalse(t.has('knits'))
116
        # revision-indexes file-container directory
117
        self.assertEqual([],
118
            list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
119
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
120
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
121
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
122
        self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
123
124
    def test_shared_disk_layout(self):
125
        format = self.get_format()
126
        repo = self.make_repository('.', shared=True, format=format)
127
        # we want:
128
        t = repo.bzrdir.get_repository_transport(None)
129
        self.check_format(t)
130
        # XXX: no locks left when unlocked at the moment
131
        # self.assertEqualDiff('', t.get('lock').read())
132
        # We should have a 'shared-storage' marker file.
133
        self.assertEqualDiff('', t.get('shared-storage').read())
134
        self.check_databases(t)
135
136
    def test_shared_no_tree_disk_layout(self):
137
        format = self.get_format()
138
        repo = self.make_repository('.', shared=True, format=format)
139
        repo.set_make_working_trees(False)
140
        # we want:
141
        t = repo.bzrdir.get_repository_transport(None)
142
        self.check_format(t)
143
        # XXX: no locks left when unlocked at the moment
144
        # self.assertEqualDiff('', t.get('lock').read())
145
        # We should have a 'shared-storage' marker file.
146
        self.assertEqualDiff('', t.get('shared-storage').read())
147
        # We should have a marker for the no-working-trees flag.
148
        self.assertEqualDiff('', t.get('no-working-trees').read())
149
        # The marker should go when we toggle the setting.
150
        repo.set_make_working_trees(True)
151
        self.assertFalse(t.has('no-working-trees'))
152
        self.check_databases(t)
153
154
    def test_adding_revision_creates_pack_indices(self):
155
        format = self.get_format()
156
        tree = self.make_branch_and_tree('.', format=format)
157
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
158
        self.assertEqual([],
159
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
160
        tree.commit('foobarbaz')
161
        index = GraphIndex(trans, 'pack-names', None)
162
        index_nodes = list(index.iter_all_entries())
163
        self.assertEqual(1, len(index_nodes))
164
        node = index_nodes[0]
165
        name = node[1][0]
166
        # the pack sizes should be listed in the index
167
        pack_value = node[2]
168
        sizes = [int(digits) for digits in pack_value.split(' ')]
169
        for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
170
            stat = trans.stat('indices/%s%s' % (name, suffix))
171
            self.assertEqual(size, stat.st_size)
172
173
    def test_pulling_nothing_leads_to_no_new_names(self):
174
        format = self.get_format()
175
        tree1 = self.make_branch_and_tree('1', format=format)
176
        tree2 = self.make_branch_and_tree('2', format=format)
177
        tree1.branch.repository.fetch(tree2.branch.repository)
178
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
179
        self.assertEqual([],
180
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
181
182
    def test_commit_across_pack_shape_boundary_autopacks(self):
183
        format = self.get_format()
184
        tree = self.make_branch_and_tree('.', format=format)
185
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
186
        # This test could be a little cheaper by replacing the packs
187
        # attribute on the repository to allow a different pack distribution
188
        # and max packs policy - so we are checking the policy is honoured
189
        # in the test. But for now 11 commits is not a big deal in a single
190
        # test.
191
        for x in range(9):
192
            tree.commit('commit %s' % x)
193
        # there should be 9 packs:
194
        index = GraphIndex(trans, 'pack-names', None)
195
        self.assertEqual(9, len(list(index.iter_all_entries())))
196
        # insert some files in obsolete_packs which should be removed by pack.
197
        trans.put_bytes('obsolete_packs/foo', '123')
198
        trans.put_bytes('obsolete_packs/bar', '321')
199
        # committing one more should coalesce to 1 of 10.
200
        tree.commit('commit triggering pack')
201
        index = GraphIndex(trans, 'pack-names', None)
202
        self.assertEqual(1, len(list(index.iter_all_entries())))
203
        # packing should not damage data
204
        tree = tree.bzrdir.open_workingtree()
205
        check_result = tree.branch.repository.check(
206
            [tree.branch.last_revision()])
207
        # We should have 50 (10x5) files in the obsolete_packs directory.
208
        obsolete_files = list(trans.list_dir('obsolete_packs'))
209
        self.assertFalse('foo' in obsolete_files)
210
        self.assertFalse('bar' in obsolete_files)
211
        self.assertEqual(50, len(obsolete_files))
212
        # XXX: Todo check packs obsoleted correctly - old packs and indices
213
        # in the obsolete_packs directory.
214
        large_pack_name = list(index.iter_all_entries())[0][1][0]
215
        # finally, committing again should not touch the large pack.
216
        tree.commit('commit not triggering pack')
217
        index = GraphIndex(trans, 'pack-names', None)
218
        self.assertEqual(2, len(list(index.iter_all_entries())))
219
        pack_names = [node[1][0] for node in index.iter_all_entries()]
220
        self.assertTrue(large_pack_name in pack_names)
221
222
    def test_fail_obsolete_deletion(self):
223
        # failing to delete obsolete packs is not fatal
224
        format = self.get_format()
225
        server = fakenfs.FakeNFSServer()
226
        server.setUp()
227
        self.addCleanup(server.tearDown)
228
        transport = get_transport(server.get_url())
229
        bzrdir = self.get_format().initialize_on_transport(transport)
230
        repo = bzrdir.create_repository()
231
        repo_transport = bzrdir.get_repository_transport(None)
232
        self.assertTrue(repo_transport.has('obsolete_packs'))
233
        # these files are in use by another client and typically can't be deleted
234
        repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
235
        repo._pack_collection._clear_obsolete_packs()
236
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
237
238
    def test_pack_after_two_commits_packs_everything(self):
239
        format = self.get_format()
240
        tree = self.make_branch_and_tree('.', format=format)
241
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
242
        tree.commit('start')
243
        tree.commit('more work')
244
        tree.branch.repository.pack()
245
        # there should be 1 pack:
246
        index = GraphIndex(trans, 'pack-names', None)
247
        self.assertEqual(1, len(list(index.iter_all_entries())))
248
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
249
250
    def test_pack_layout(self):
251
        format = self.get_format()
252
        tree = self.make_branch_and_tree('.', format=format)
253
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
254
        tree.commit('start', rev_id='1')
255
        tree.commit('more work', rev_id='2')
256
        tree.branch.repository.pack()
257
        tree.lock_read()
258
        self.addCleanup(tree.unlock)
259
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
260
            tree.branch.repository._pack_collection.names()[0])
261
        # revision access tends to be tip->ancestor, so ordering that way on 
262
        # disk is a good idea.
263
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
264
            if key == ('1',):
265
                pos_1 = int(val[1:].split()[0])
266
            else:
267
                pos_2 = int(val[1:].split()[0])
268
        self.assertTrue(pos_2 < pos_1)
269
270
    def test_pack_repositories_support_multiple_write_locks(self):
271
        format = self.get_format()
272
        self.make_repository('.', shared=True, format=format)
273
        r1 = repository.Repository.open('.')
274
        r2 = repository.Repository.open('.')
275
        r1.lock_write()
276
        self.addCleanup(r1.unlock)
277
        r2.lock_write()
278
        r2.unlock()
279
280
    def _add_text(self, repo, fileid):
281
        """Add a text to the repository within a write group."""
282
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
283
284
    def test_concurrent_writers_merge_new_packs(self):
285
        format = self.get_format()
286
        self.make_repository('.', shared=True, format=format)
287
        r1 = repository.Repository.open('.')
288
        r2 = repository.Repository.open('.')
289
        r1.lock_write()
290
        try:
291
            # access enough data to load the names list
292
            list(r1.all_revision_ids())
293
            r2.lock_write()
294
            try:
295
                # access enough data to load the names list
296
                list(r2.all_revision_ids())
297
                r1.start_write_group()
298
                try:
299
                    r2.start_write_group()
300
                    try:
301
                        self._add_text(r1, 'fileidr1')
302
                        self._add_text(r2, 'fileidr2')
303
                    except:
304
                        r2.abort_write_group()
305
                        raise
306
                except:
307
                    r1.abort_write_group()
308
                    raise
309
                # both r1 and r2 have open write groups with data in them
310
                # created while the other's write group was open.
311
                # Commit both which requires a merge to the pack-names.
312
                try:
313
                    r1.commit_write_group()
314
                except:
315
                    r1.abort_write_group()
316
                    r2.abort_write_group()
317
                    raise
318
                r2.commit_write_group()
319
                # tell r1 to reload from disk
320
                r1._pack_collection.reset()
321
                # Now both repositories should know about both names
322
                r1._pack_collection.ensure_loaded()
323
                r2._pack_collection.ensure_loaded()
324
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
325
                self.assertEqual(2, len(r1._pack_collection.names()))
326
            finally:
327
                r2.unlock()
328
        finally:
329
            r1.unlock()
330
331
    def test_concurrent_writer_second_preserves_dropping_a_pack(self):
332
        format = self.get_format()
333
        self.make_repository('.', shared=True, format=format)
334
        r1 = repository.Repository.open('.')
335
        r2 = repository.Repository.open('.')
336
        # add a pack to drop
337
        r1.lock_write()
338
        try:
339
            r1.start_write_group()
340
            try:
341
                self._add_text(r1, 'fileidr1')
342
            except:
343
                r1.abort_write_group()
344
                raise
345
            else:
346
                r1.commit_write_group()
347
            r1._pack_collection.ensure_loaded()
348
            name_to_drop = r1._pack_collection.all_packs()[0].name
349
        finally:
350
            r1.unlock()
351
        r1.lock_write()
352
        try:
353
            # access enough data to load the names list
354
            list(r1.all_revision_ids())
355
            r2.lock_write()
356
            try:
357
                # access enough data to load the names list
358
                list(r2.all_revision_ids())
359
                r1._pack_collection.ensure_loaded()
360
                try:
361
                    r2.start_write_group()
362
                    try:
363
                        # in r1, drop the pack
364
                        r1._pack_collection._remove_pack_from_memory(
365
                            r1._pack_collection.get_pack_by_name(name_to_drop))
366
                        # in r2, add a pack
367
                        self._add_text(r2, 'fileidr2')
368
                    except:
369
                        r2.abort_write_group()
370
                        raise
371
                except:
372
                    r1._pack_collection.reset()
373
                    raise
374
                # r1 has a changed names list, and r2 an open write groups with
375
                # changes.
376
                # save r1, and then commit the r2 write group, which requires a
377
                # merge to the pack-names, which should not reinstate
378
                # name_to_drop
379
                try:
380
                    r1._pack_collection._save_pack_names()
381
                    r1._pack_collection.reset()
382
                except:
383
                    r2.abort_write_group()
384
                    raise
385
                try:
386
                    r2.commit_write_group()
387
                except:
388
                    r2.abort_write_group()
389
                    raise
390
                # Now both repositories should now about just one name.
391
                r1._pack_collection.ensure_loaded()
392
                r2._pack_collection.ensure_loaded()
393
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
394
                self.assertEqual(1, len(r1._pack_collection.names()))
395
                self.assertFalse(name_to_drop in r1._pack_collection.names())
396
            finally:
397
                r2.unlock()
398
        finally:
399
            r1.unlock()
400
401
    def test_lock_write_does_not_physically_lock(self):
402
        repo = self.make_repository('.', format=self.get_format())
403
        repo.lock_write()
404
        self.addCleanup(repo.unlock)
405
        self.assertFalse(repo.get_physical_lock_status())
406
407
    def prepare_for_break_lock(self):
408
        # Setup the global ui factory state so that a break-lock method call
409
        # will find usable input in the input stream.
410
        old_factory = ui.ui_factory
411
        def restoreFactory():
412
            ui.ui_factory = old_factory
413
        self.addCleanup(restoreFactory)
414
        ui.ui_factory = ui.SilentUIFactory()
415
        ui.ui_factory.stdin = StringIO("y\n")
416
417
    def test_break_lock_breaks_physical_lock(self):
418
        repo = self.make_repository('.', format=self.get_format())
419
        repo._pack_collection.lock_names()
420
        repo2 = repository.Repository.open('.')
421
        self.assertTrue(repo.get_physical_lock_status())
422
        self.prepare_for_break_lock()
423
        repo2.break_lock()
424
        self.assertFalse(repo.get_physical_lock_status())
425
426
    def test_broken_physical_locks_error_on__unlock_names_lock(self):
427
        repo = self.make_repository('.', format=self.get_format())
428
        repo._pack_collection.lock_names()
429
        self.assertTrue(repo.get_physical_lock_status())
430
        repo2 = repository.Repository.open('.')
431
        self.prepare_for_break_lock()
432
        repo2.break_lock()
433
        self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
434
435
    def test_fetch_without_find_ghosts_ignores_ghosts(self):
436
        # we want two repositories at this point:
437
        # one with a revision that is a ghost in the other
438
        # repository.
439
        # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
440
        # 'references' is present in both repositories, and 'tip' is present
441
        # just in has_ghost.
442
        # has_ghost       missing_ghost
443
        #------------------------------
444
        # 'ghost'             -
445
        # 'references'    'references'
446
        # 'tip'               -
447
        # In this test we fetch 'tip' which should not fetch 'ghost'
448
        has_ghost = self.make_repository('has_ghost', format=self.get_format())
449
        missing_ghost = self.make_repository('missing_ghost',
450
            format=self.get_format())
451
452
        def add_commit(repo, revision_id, parent_ids):
453
            repo.lock_write()
454
            repo.start_write_group()
455
            inv = inventory.Inventory(revision_id=revision_id)
456
            inv.root.revision = revision_id
457
            root_id = inv.root.file_id
458
            sha1 = repo.add_inventory(revision_id, inv, [])
459
            repo.texts.add_lines((root_id, revision_id), [], [])
460
            rev = _mod_revision.Revision(timestamp=0,
461
                                         timezone=None,
462
                                         committer="Foo Bar <foo@example.com>",
463
                                         message="Message",
464
                                         inventory_sha1=sha1,
465
                                         revision_id=revision_id)
466
            rev.parent_ids = parent_ids
467
            repo.add_revision(revision_id, rev)
468
            repo.commit_write_group()
469
            repo.unlock()
470
        add_commit(has_ghost, 'ghost', [])
471
        add_commit(has_ghost, 'references', ['ghost'])
472
        add_commit(missing_ghost, 'references', ['ghost'])
473
        add_commit(has_ghost, 'tip', ['references'])
474
        missing_ghost.fetch(has_ghost, 'tip')
475
        # missing ghost now has tip and not ghost.
476
        rev = missing_ghost.get_revision('tip')
477
        inv = missing_ghost.get_inventory('tip')
478
        self.assertRaises(errors.NoSuchRevision,
479
            missing_ghost.get_revision, 'ghost')
480
        self.assertRaises(errors.NoSuchRevision,
481
            missing_ghost.get_inventory, 'ghost')
482
483
    def test_supports_external_lookups(self):
484
        repo = self.make_repository('.', format=self.get_format())
485
        self.assertEqual(self.format_supports_external_lookups,
486
            repo._format.supports_external_lookups)
487
488
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
489
class TestPackRepositoryStacking(TestCaseWithTransport):
490
491
    """Tests for stacking pack repositories"""
492
493
    def setUp(self):
494
        if not self.format_supports_external_lookups:
495
            raise TestNotApplicable("%r doesn't support stacking" 
496
                % (self.format_name,))
497
        super(TestPackRepositoryStacking, self).setUp()
498
499
    def get_format(self):
500
        return bzrdir.format_registry.make_bzrdir(self.format_name)
501
502
    def test_stack_checks_compatibility(self):
503
        # early versions of the packing code relied on pack internals to
504
        # stack, but the current version should be able to stack on any
505
        # format.
506
        #
507
        # TODO: Possibly this should be run per-repository-format and raise
508
        # TestNotApplicable on formats that don't support stacking. -- mbp
509
        # 20080729
510
        repo = self.make_repository('repo', format=self.get_format())
511
        if repo.supports_rich_root():
512
            # can only stack on repositories that have compatible internal
513
            # metadata
514
            matching_format_name = 'pack-0.92-subtree'
515
            mismatching_format_name = 'pack-0.92'
516
        else:
517
            matching_format_name = 'pack-0.92'
518
            mismatching_format_name = 'pack-0.92-subtree'
519
        base = self.make_repository('base', format=matching_format_name)
520
        repo.add_fallback_repository(base)
521
        # you can't stack on something with incompatible data
522
        bad_repo = self.make_repository('mismatch',
523
            format=mismatching_format_name)
524
        e = self.assertRaises(errors.IncompatibleRepositories,
525
            repo.add_fallback_repository, bad_repo)
526
        self.assertContainsRe(str(e),
527
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
528
            r'KnitPackRepository.*/repo/.*\n'
529
            r'different rich-root support')
530
531
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
532
        base = self.make_branch_and_tree('base', format=self.get_format())
533
        base.commit('foo')
534
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
535
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
536
        referencing.commit('bar')
537
        new_instance = referencing.bzrdir.open_repository()
538
        new_instance.lock_read()
539
        self.addCleanup(new_instance.unlock)
540
        new_instance._pack_collection.ensure_loaded()
541
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
542
543
    def test_autopack_only_considers_main_repo_packs(self):
544
        base = self.make_branch_and_tree('base', format=self.get_format())
545
        base.commit('foo')
546
        tree = self.make_branch_and_tree('repo', format=self.get_format())
547
        tree.branch.repository.add_fallback_repository(base.branch.repository)
548
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
549
        # This test could be a little cheaper by replacing the packs
550
        # attribute on the repository to allow a different pack distribution
551
        # and max packs policy - so we are checking the policy is honoured
552
        # in the test. But for now 11 commits is not a big deal in a single
553
        # test.
554
        for x in range(9):
555
            tree.commit('commit %s' % x)
556
        # there should be 9 packs:
557
        index = GraphIndex(trans, 'pack-names', None)
558
        self.assertEqual(9, len(list(index.iter_all_entries())))
559
        # committing one more should coalesce to 1 of 10.
560
        tree.commit('commit triggering pack')
561
        index = GraphIndex(trans, 'pack-names', None)
562
        self.assertEqual(1, len(list(index.iter_all_entries())))
563
        # packing should not damage data
564
        tree = tree.bzrdir.open_workingtree()
565
        check_result = tree.branch.repository.check(
566
            [tree.branch.last_revision()])
567
        # We should have 50 (10x5) files in the obsolete_packs directory.
568
        obsolete_files = list(trans.list_dir('obsolete_packs'))
569
        self.assertFalse('foo' in obsolete_files)
570
        self.assertFalse('bar' in obsolete_files)
571
        self.assertEqual(50, len(obsolete_files))
572
        # XXX: Todo check packs obsoleted correctly - old packs and indices
573
        # in the obsolete_packs directory.
574
        large_pack_name = list(index.iter_all_entries())[0][1][0]
575
        # finally, committing again should not touch the large pack.
576
        tree.commit('commit not triggering pack')
577
        index = GraphIndex(trans, 'pack-names', None)
578
        self.assertEqual(2, len(list(index.iter_all_entries())))
579
        pack_names = [node[1][0] for node in index.iter_all_entries()]
580
        self.assertTrue(large_pack_name in pack_names)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
581
582
583
def load_tests(basic_tests, module, test_loader):
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
584
    # these give the bzrdir canned format name, and the repository on-disk
585
    # format string
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
586
    scenarios_params = [
587
         dict(format_name='pack-0.92',
588
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
589
              format_supports_external_lookups=False),
590
         dict(format_name='pack-0.92-subtree',
591
              format_string="Bazaar pack repository format 1 "
592
              "with subtree support (needs bzr 0.92)\n",
593
              format_supports_external_lookups=False),
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
594
         dict(format_name='1.6',
595
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
596
              format_supports_external_lookups=True),
597
         dict(format_name='1.6-rich-root',
598
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
599
                  "(bzr 1.6)\n",
600
              format_supports_external_lookups=True),
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
601
         dict(format_name='development0',
602
              format_string="Bazaar development format 0 "
603
                  "(needs bzr.dev from before 1.3)\n",
604
              format_supports_external_lookups=False),
605
         dict(format_name='development0-subtree',
606
              format_string="Bazaar development format 0 "
607
                  "with subtree support (needs bzr.dev from before 1.3)\n",
608
              format_supports_external_lookups=False),
609
         dict(format_name='development',
610
              format_string="Bazaar development format 1 "
611
                  "(needs bzr.dev from before 1.6)\n",
612
              format_supports_external_lookups=True),
613
         dict(format_name='development-subtree',
614
              format_string="Bazaar development format 1 "
615
                  "with subtree support (needs bzr.dev from before 1.6)\n",
616
              format_supports_external_lookups=True),
617
         ]
618
    adapter = tests.TestScenarioApplier()
619
    # name of the scenario is the format name
620
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
621
    suite = tests.TestSuite()
622
    tests.adapt_tests(basic_tests, adapter, suite)
623
    return suite