/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
1
# Copyright (C) 2008 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
17
"""Tests for pack repositories.
18
19
These tests are repeated for all pack-based repository formats.
20
"""
21
3582.3.4 by Martin Pool
Use cStringIO rather than StringIO
22
from cStringIO import StringIO
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
23
from stat import S_ISDIR
24
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
26
from bzrlib import (
27
    bzrdir,
28
    errors,
29
    inventory,
30
    progress,
31
    repository,
32
    revision as _mod_revision,
33
    symbol_versioning,
34
    tests,
35
    ui,
36
    upgrade,
37
    workingtree,
38
    )
39
from bzrlib.tests import (
40
    TestCase,
41
    TestCaseWithTransport,
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
42
    TestNotApplicable,
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
43
    TestSkipped,
44
    )
45
from bzrlib.transport import (
46
    fakenfs,
47
    get_transport,
48
    )
49
50
51
class TestPackRepository(TestCaseWithTransport):
52
    """Tests to be repeated across all pack-based formats.
53
54
    The following are populated from the test scenario:
55
56
    :ivar format_name: Registered name fo the format to test.
57
    :ivar format_string: On-disk format marker.
58
    :ivar format_supports_external_lookups: Boolean.
59
    """
60
61
    def get_format(self):
62
        return bzrdir.format_registry.make_bzrdir(self.format_name)
63
64
    def test_attribute__fetch_order(self):
3606.7.3 by John Arbash Meinel
We don't have to fetch in topological order, as long as we fix all of the delta logic pieces.
65
        """Packs do not need ordered data retrieval."""
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
66
        format = self.get_format()
67
        repo = self.make_repository('.', format=format)
3606.7.8 by John Arbash Meinel
Switch names to 'unordered' that I missed before.
68
        self.assertEqual('unordered', repo._fetch_order)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
69
70
    def test_attribute__fetch_uses_deltas(self):
71
        """Packs reuse deltas."""
72
        format = self.get_format()
73
        repo = self.make_repository('.', format=format)
74
        self.assertEqual(True, repo._fetch_uses_deltas)
75
76
    def test_disk_layout(self):
77
        format = self.get_format()
78
        repo = self.make_repository('.', format=format)
79
        # in case of side effects of locking.
80
        repo.lock_write()
81
        repo.unlock()
82
        t = repo.bzrdir.get_repository_transport(None)
83
        self.check_format(t)
84
        # XXX: no locks left when unlocked at the moment
85
        # self.assertEqualDiff('', t.get('lock').read())
86
        self.check_databases(t)
87
88
    def check_format(self, t):
89
        self.assertEqualDiff(
90
            self.format_string, # from scenario
91
            t.get('format').read())
92
93
    def assertHasNoKndx(self, t, knit_name):
94
        """Assert that knit_name has no index on t."""
95
        self.assertFalse(t.has(knit_name + '.kndx'))
96
97
    def assertHasNoKnit(self, t, knit_name):
98
        """Assert that knit_name exists on t."""
99
        # no default content
100
        self.assertFalse(t.has(knit_name + '.knit'))
101
102
    def check_databases(self, t):
103
        """check knit content for a repository."""
104
        # check conversion worked
105
        self.assertHasNoKndx(t, 'inventory')
106
        self.assertHasNoKnit(t, 'inventory')
107
        self.assertHasNoKndx(t, 'revisions')
108
        self.assertHasNoKnit(t, 'revisions')
109
        self.assertHasNoKndx(t, 'signatures')
110
        self.assertHasNoKnit(t, 'signatures')
111
        self.assertFalse(t.has('knits'))
112
        # revision-indexes file-container directory
113
        self.assertEqual([],
114
            list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
115
        self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
116
        self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
117
        self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
118
        self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
119
120
    def test_shared_disk_layout(self):
121
        format = self.get_format()
122
        repo = self.make_repository('.', shared=True, format=format)
123
        # we want:
124
        t = repo.bzrdir.get_repository_transport(None)
125
        self.check_format(t)
126
        # XXX: no locks left when unlocked at the moment
127
        # self.assertEqualDiff('', t.get('lock').read())
128
        # We should have a 'shared-storage' marker file.
129
        self.assertEqualDiff('', t.get('shared-storage').read())
130
        self.check_databases(t)
131
132
    def test_shared_no_tree_disk_layout(self):
133
        format = self.get_format()
134
        repo = self.make_repository('.', shared=True, format=format)
135
        repo.set_make_working_trees(False)
136
        # we want:
137
        t = repo.bzrdir.get_repository_transport(None)
138
        self.check_format(t)
139
        # XXX: no locks left when unlocked at the moment
140
        # self.assertEqualDiff('', t.get('lock').read())
141
        # We should have a 'shared-storage' marker file.
142
        self.assertEqualDiff('', t.get('shared-storage').read())
143
        # We should have a marker for the no-working-trees flag.
144
        self.assertEqualDiff('', t.get('no-working-trees').read())
145
        # The marker should go when we toggle the setting.
146
        repo.set_make_working_trees(True)
147
        self.assertFalse(t.has('no-working-trees'))
148
        self.check_databases(t)
149
150
    def test_adding_revision_creates_pack_indices(self):
151
        format = self.get_format()
152
        tree = self.make_branch_and_tree('.', format=format)
153
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
154
        self.assertEqual([],
155
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
156
        tree.commit('foobarbaz')
157
        index = GraphIndex(trans, 'pack-names', None)
158
        index_nodes = list(index.iter_all_entries())
159
        self.assertEqual(1, len(index_nodes))
160
        node = index_nodes[0]
161
        name = node[1][0]
162
        # the pack sizes should be listed in the index
163
        pack_value = node[2]
164
        sizes = [int(digits) for digits in pack_value.split(' ')]
165
        for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
166
            stat = trans.stat('indices/%s%s' % (name, suffix))
167
            self.assertEqual(size, stat.st_size)
168
169
    def test_pulling_nothing_leads_to_no_new_names(self):
170
        format = self.get_format()
171
        tree1 = self.make_branch_and_tree('1', format=format)
172
        tree2 = self.make_branch_and_tree('2', format=format)
173
        tree1.branch.repository.fetch(tree2.branch.repository)
174
        trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
175
        self.assertEqual([],
176
            list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
177
178
    def test_commit_across_pack_shape_boundary_autopacks(self):
179
        format = self.get_format()
180
        tree = self.make_branch_and_tree('.', format=format)
181
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
182
        # This test could be a little cheaper by replacing the packs
183
        # attribute on the repository to allow a different pack distribution
184
        # and max packs policy - so we are checking the policy is honoured
185
        # in the test. But for now 11 commits is not a big deal in a single
186
        # test.
187
        for x in range(9):
188
            tree.commit('commit %s' % x)
189
        # there should be 9 packs:
190
        index = GraphIndex(trans, 'pack-names', None)
191
        self.assertEqual(9, len(list(index.iter_all_entries())))
192
        # insert some files in obsolete_packs which should be removed by pack.
193
        trans.put_bytes('obsolete_packs/foo', '123')
194
        trans.put_bytes('obsolete_packs/bar', '321')
195
        # committing one more should coalesce to 1 of 10.
196
        tree.commit('commit triggering pack')
197
        index = GraphIndex(trans, 'pack-names', None)
198
        self.assertEqual(1, len(list(index.iter_all_entries())))
199
        # packing should not damage data
200
        tree = tree.bzrdir.open_workingtree()
201
        check_result = tree.branch.repository.check(
202
            [tree.branch.last_revision()])
203
        # We should have 50 (10x5) files in the obsolete_packs directory.
204
        obsolete_files = list(trans.list_dir('obsolete_packs'))
205
        self.assertFalse('foo' in obsolete_files)
206
        self.assertFalse('bar' in obsolete_files)
207
        self.assertEqual(50, len(obsolete_files))
208
        # XXX: Todo check packs obsoleted correctly - old packs and indices
209
        # in the obsolete_packs directory.
210
        large_pack_name = list(index.iter_all_entries())[0][1][0]
211
        # finally, committing again should not touch the large pack.
212
        tree.commit('commit not triggering pack')
213
        index = GraphIndex(trans, 'pack-names', None)
214
        self.assertEqual(2, len(list(index.iter_all_entries())))
215
        pack_names = [node[1][0] for node in index.iter_all_entries()]
216
        self.assertTrue(large_pack_name in pack_names)
217
218
    def test_fail_obsolete_deletion(self):
219
        # failing to delete obsolete packs is not fatal
220
        format = self.get_format()
221
        server = fakenfs.FakeNFSServer()
222
        server.setUp()
223
        self.addCleanup(server.tearDown)
224
        transport = get_transport(server.get_url())
225
        bzrdir = self.get_format().initialize_on_transport(transport)
226
        repo = bzrdir.create_repository()
227
        repo_transport = bzrdir.get_repository_transport(None)
228
        self.assertTrue(repo_transport.has('obsolete_packs'))
229
        # these files are in use by another client and typically can't be deleted
230
        repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
231
        repo._pack_collection._clear_obsolete_packs()
232
        self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
233
234
    def test_pack_after_two_commits_packs_everything(self):
235
        format = self.get_format()
236
        tree = self.make_branch_and_tree('.', format=format)
237
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
238
        tree.commit('start')
239
        tree.commit('more work')
240
        tree.branch.repository.pack()
241
        # there should be 1 pack:
242
        index = GraphIndex(trans, 'pack-names', None)
243
        self.assertEqual(1, len(list(index.iter_all_entries())))
244
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
245
246
    def test_pack_layout(self):
247
        format = self.get_format()
248
        tree = self.make_branch_and_tree('.', format=format)
249
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
250
        tree.commit('start', rev_id='1')
251
        tree.commit('more work', rev_id='2')
252
        tree.branch.repository.pack()
253
        tree.lock_read()
254
        self.addCleanup(tree.unlock)
255
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
256
            tree.branch.repository._pack_collection.names()[0])
257
        # revision access tends to be tip->ancestor, so ordering that way on 
258
        # disk is a good idea.
259
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
260
            if key == ('1',):
261
                pos_1 = int(val[1:].split()[0])
262
            else:
263
                pos_2 = int(val[1:].split()[0])
264
        self.assertTrue(pos_2 < pos_1)
265
266
    def test_pack_repositories_support_multiple_write_locks(self):
267
        format = self.get_format()
268
        self.make_repository('.', shared=True, format=format)
269
        r1 = repository.Repository.open('.')
270
        r2 = repository.Repository.open('.')
271
        r1.lock_write()
272
        self.addCleanup(r1.unlock)
273
        r2.lock_write()
274
        r2.unlock()
275
276
    def _add_text(self, repo, fileid):
277
        """Add a text to the repository within a write group."""
278
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
279
280
    def test_concurrent_writers_merge_new_packs(self):
281
        format = self.get_format()
282
        self.make_repository('.', shared=True, format=format)
283
        r1 = repository.Repository.open('.')
284
        r2 = repository.Repository.open('.')
285
        r1.lock_write()
286
        try:
287
            # access enough data to load the names list
288
            list(r1.all_revision_ids())
289
            r2.lock_write()
290
            try:
291
                # access enough data to load the names list
292
                list(r2.all_revision_ids())
293
                r1.start_write_group()
294
                try:
295
                    r2.start_write_group()
296
                    try:
297
                        self._add_text(r1, 'fileidr1')
298
                        self._add_text(r2, 'fileidr2')
299
                    except:
300
                        r2.abort_write_group()
301
                        raise
302
                except:
303
                    r1.abort_write_group()
304
                    raise
305
                # both r1 and r2 have open write groups with data in them
306
                # created while the other's write group was open.
307
                # Commit both which requires a merge to the pack-names.
308
                try:
309
                    r1.commit_write_group()
310
                except:
311
                    r1.abort_write_group()
312
                    r2.abort_write_group()
313
                    raise
314
                r2.commit_write_group()
315
                # tell r1 to reload from disk
316
                r1._pack_collection.reset()
317
                # Now both repositories should know about both names
318
                r1._pack_collection.ensure_loaded()
319
                r2._pack_collection.ensure_loaded()
320
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
321
                self.assertEqual(2, len(r1._pack_collection.names()))
322
            finally:
323
                r2.unlock()
324
        finally:
325
            r1.unlock()
326
327
    def test_concurrent_writer_second_preserves_dropping_a_pack(self):
328
        format = self.get_format()
329
        self.make_repository('.', shared=True, format=format)
330
        r1 = repository.Repository.open('.')
331
        r2 = repository.Repository.open('.')
332
        # add a pack to drop
333
        r1.lock_write()
334
        try:
335
            r1.start_write_group()
336
            try:
337
                self._add_text(r1, 'fileidr1')
338
            except:
339
                r1.abort_write_group()
340
                raise
341
            else:
342
                r1.commit_write_group()
343
            r1._pack_collection.ensure_loaded()
344
            name_to_drop = r1._pack_collection.all_packs()[0].name
345
        finally:
346
            r1.unlock()
347
        r1.lock_write()
348
        try:
349
            # access enough data to load the names list
350
            list(r1.all_revision_ids())
351
            r2.lock_write()
352
            try:
353
                # access enough data to load the names list
354
                list(r2.all_revision_ids())
355
                r1._pack_collection.ensure_loaded()
356
                try:
357
                    r2.start_write_group()
358
                    try:
359
                        # in r1, drop the pack
360
                        r1._pack_collection._remove_pack_from_memory(
361
                            r1._pack_collection.get_pack_by_name(name_to_drop))
362
                        # in r2, add a pack
363
                        self._add_text(r2, 'fileidr2')
364
                    except:
365
                        r2.abort_write_group()
366
                        raise
367
                except:
368
                    r1._pack_collection.reset()
369
                    raise
370
                # r1 has a changed names list, and r2 an open write groups with
371
                # changes.
372
                # save r1, and then commit the r2 write group, which requires a
373
                # merge to the pack-names, which should not reinstate
374
                # name_to_drop
375
                try:
376
                    r1._pack_collection._save_pack_names()
377
                    r1._pack_collection.reset()
378
                except:
379
                    r2.abort_write_group()
380
                    raise
381
                try:
382
                    r2.commit_write_group()
383
                except:
384
                    r2.abort_write_group()
385
                    raise
386
                # Now both repositories should now about just one name.
387
                r1._pack_collection.ensure_loaded()
388
                r2._pack_collection.ensure_loaded()
389
                self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
390
                self.assertEqual(1, len(r1._pack_collection.names()))
391
                self.assertFalse(name_to_drop in r1._pack_collection.names())
392
            finally:
393
                r2.unlock()
394
        finally:
395
            r1.unlock()
396
397
    def test_lock_write_does_not_physically_lock(self):
398
        repo = self.make_repository('.', format=self.get_format())
399
        repo.lock_write()
400
        self.addCleanup(repo.unlock)
401
        self.assertFalse(repo.get_physical_lock_status())
402
403
    def prepare_for_break_lock(self):
404
        # Setup the global ui factory state so that a break-lock method call
405
        # will find usable input in the input stream.
406
        old_factory = ui.ui_factory
407
        def restoreFactory():
408
            ui.ui_factory = old_factory
409
        self.addCleanup(restoreFactory)
410
        ui.ui_factory = ui.SilentUIFactory()
411
        ui.ui_factory.stdin = StringIO("y\n")
412
413
    def test_break_lock_breaks_physical_lock(self):
414
        repo = self.make_repository('.', format=self.get_format())
415
        repo._pack_collection.lock_names()
3650.4.1 by Aaron Bentley
Fix test kipple in test_break_lock_breaks_physical_lock
416
        repo.control_files.leave_in_place()
417
        repo.unlock()
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
418
        repo2 = repository.Repository.open('.')
419
        self.assertTrue(repo.get_physical_lock_status())
420
        self.prepare_for_break_lock()
421
        repo2.break_lock()
422
        self.assertFalse(repo.get_physical_lock_status())
423
424
    def test_broken_physical_locks_error_on__unlock_names_lock(self):
425
        repo = self.make_repository('.', format=self.get_format())
426
        repo._pack_collection.lock_names()
427
        self.assertTrue(repo.get_physical_lock_status())
428
        repo2 = repository.Repository.open('.')
429
        self.prepare_for_break_lock()
430
        repo2.break_lock()
431
        self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
432
433
    def test_fetch_without_find_ghosts_ignores_ghosts(self):
434
        # we want two repositories at this point:
435
        # one with a revision that is a ghost in the other
436
        # repository.
437
        # 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
438
        # 'references' is present in both repositories, and 'tip' is present
439
        # just in has_ghost.
440
        # has_ghost       missing_ghost
441
        #------------------------------
442
        # 'ghost'             -
443
        # 'references'    'references'
444
        # 'tip'               -
445
        # In this test we fetch 'tip' which should not fetch 'ghost'
446
        has_ghost = self.make_repository('has_ghost', format=self.get_format())
447
        missing_ghost = self.make_repository('missing_ghost',
448
            format=self.get_format())
449
450
        def add_commit(repo, revision_id, parent_ids):
451
            repo.lock_write()
452
            repo.start_write_group()
453
            inv = inventory.Inventory(revision_id=revision_id)
454
            inv.root.revision = revision_id
455
            root_id = inv.root.file_id
456
            sha1 = repo.add_inventory(revision_id, inv, [])
457
            repo.texts.add_lines((root_id, revision_id), [], [])
458
            rev = _mod_revision.Revision(timestamp=0,
459
                                         timezone=None,
460
                                         committer="Foo Bar <foo@example.com>",
461
                                         message="Message",
462
                                         inventory_sha1=sha1,
463
                                         revision_id=revision_id)
464
            rev.parent_ids = parent_ids
465
            repo.add_revision(revision_id, rev)
466
            repo.commit_write_group()
467
            repo.unlock()
468
        add_commit(has_ghost, 'ghost', [])
469
        add_commit(has_ghost, 'references', ['ghost'])
470
        add_commit(missing_ghost, 'references', ['ghost'])
471
        add_commit(has_ghost, 'tip', ['references'])
472
        missing_ghost.fetch(has_ghost, 'tip')
473
        # missing ghost now has tip and not ghost.
474
        rev = missing_ghost.get_revision('tip')
475
        inv = missing_ghost.get_inventory('tip')
476
        self.assertRaises(errors.NoSuchRevision,
477
            missing_ghost.get_revision, 'ghost')
478
        self.assertRaises(errors.NoSuchRevision,
479
            missing_ghost.get_inventory, 'ghost')
480
481
    def test_supports_external_lookups(self):
482
        repo = self.make_repository('.', format=self.get_format())
483
        self.assertEqual(self.format_supports_external_lookups,
484
            repo._format.supports_external_lookups)
485
486
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
487
class TestPackRepositoryStacking(TestCaseWithTransport):
488
489
    """Tests for stacking pack repositories"""
490
491
    def setUp(self):
492
        if not self.format_supports_external_lookups:
493
            raise TestNotApplicable("%r doesn't support stacking" 
494
                % (self.format_name,))
495
        super(TestPackRepositoryStacking, self).setUp()
496
497
    def get_format(self):
498
        return bzrdir.format_registry.make_bzrdir(self.format_name)
499
3606.10.5 by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root.
500
    def test_stack_checks_rich_root_compatibility(self):
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
501
        # early versions of the packing code relied on pack internals to
502
        # stack, but the current version should be able to stack on any
503
        # format.
504
        #
505
        # TODO: Possibly this should be run per-repository-format and raise
506
        # TestNotApplicable on formats that don't support stacking. -- mbp
507
        # 20080729
508
        repo = self.make_repository('repo', format=self.get_format())
509
        if repo.supports_rich_root():
510
            # can only stack on repositories that have compatible internal
511
            # metadata
3606.10.5 by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root.
512
            if getattr(repo._format, 'supports_tree_reference', False):
513
                matching_format_name = 'pack-0.92-subtree'
514
            else:
515
                matching_format_name = 'rich-root-pack'
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
516
            mismatching_format_name = 'pack-0.92'
517
        else:
518
            matching_format_name = 'pack-0.92'
519
            mismatching_format_name = 'pack-0.92-subtree'
520
        base = self.make_repository('base', format=matching_format_name)
521
        repo.add_fallback_repository(base)
522
        # you can't stack on something with incompatible data
523
        bad_repo = self.make_repository('mismatch',
524
            format=mismatching_format_name)
525
        e = self.assertRaises(errors.IncompatibleRepositories,
526
            repo.add_fallback_repository, bad_repo)
527
        self.assertContainsRe(str(e),
528
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
529
            r'KnitPackRepository.*/repo/.*\n'
530
            r'different rich-root support')
531
3606.10.5 by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root.
532
    def test_stack_checks_serializers_compatibility(self):
533
        repo = self.make_repository('repo', format=self.get_format())
534
        if getattr(repo._format, 'supports_tree_reference', False):
535
            # can only stack on repositories that have compatible internal
536
            # metadata
537
            matching_format_name = 'pack-0.92-subtree'
538
            mismatching_format_name = 'rich-root-pack'
539
        else:
540
            if repo.supports_rich_root():
541
                matching_format_name = 'rich-root-pack'
542
                mismatching_format_name = 'pack-0.92-subtree'
543
            else:
544
                raise TestNotApplicable('No formats use non-v5 serializer'
545
                    ' without having rich-root also set')
546
        base = self.make_repository('base', format=matching_format_name)
547
        repo.add_fallback_repository(base)
548
        # you can't stack on something with incompatible data
549
        bad_repo = self.make_repository('mismatch',
550
            format=mismatching_format_name)
551
        e = self.assertRaises(errors.IncompatibleRepositories,
552
            repo.add_fallback_repository, bad_repo)
553
        self.assertContainsRe(str(e),
554
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
555
            r'KnitPackRepository.*/repo/.*\n'
556
            r'different serializers')
557
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
558
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
559
        base = self.make_branch_and_tree('base', format=self.get_format())
560
        base.commit('foo')
561
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
562
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
563
        referencing.commit('bar')
564
        new_instance = referencing.bzrdir.open_repository()
565
        new_instance.lock_read()
566
        self.addCleanup(new_instance.unlock)
567
        new_instance._pack_collection.ensure_loaded()
568
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
569
570
    def test_autopack_only_considers_main_repo_packs(self):
571
        base = self.make_branch_and_tree('base', format=self.get_format())
572
        base.commit('foo')
573
        tree = self.make_branch_and_tree('repo', format=self.get_format())
574
        tree.branch.repository.add_fallback_repository(base.branch.repository)
575
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
576
        # This test could be a little cheaper by replacing the packs
577
        # attribute on the repository to allow a different pack distribution
578
        # and max packs policy - so we are checking the policy is honoured
579
        # in the test. But for now 11 commits is not a big deal in a single
580
        # test.
581
        for x in range(9):
582
            tree.commit('commit %s' % x)
583
        # there should be 9 packs:
584
        index = GraphIndex(trans, 'pack-names', None)
585
        self.assertEqual(9, len(list(index.iter_all_entries())))
586
        # committing one more should coalesce to 1 of 10.
587
        tree.commit('commit triggering pack')
588
        index = GraphIndex(trans, 'pack-names', None)
589
        self.assertEqual(1, len(list(index.iter_all_entries())))
590
        # packing should not damage data
591
        tree = tree.bzrdir.open_workingtree()
592
        check_result = tree.branch.repository.check(
593
            [tree.branch.last_revision()])
594
        # We should have 50 (10x5) files in the obsolete_packs directory.
595
        obsolete_files = list(trans.list_dir('obsolete_packs'))
596
        self.assertFalse('foo' in obsolete_files)
597
        self.assertFalse('bar' in obsolete_files)
598
        self.assertEqual(50, len(obsolete_files))
599
        # XXX: Todo check packs obsoleted correctly - old packs and indices
600
        # in the obsolete_packs directory.
601
        large_pack_name = list(index.iter_all_entries())[0][1][0]
602
        # finally, committing again should not touch the large pack.
603
        tree.commit('commit not triggering pack')
604
        index = GraphIndex(trans, 'pack-names', None)
605
        self.assertEqual(2, len(list(index.iter_all_entries())))
606
        pack_names = [node[1][0] for node in index.iter_all_entries()]
607
        self.assertTrue(large_pack_name in pack_names)
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
608
609
610
def load_tests(basic_tests, module, test_loader):
3582.3.3 by Martin Pool
Reenable tests for stacking pack repositories
611
    # these give the bzrdir canned format name, and the repository on-disk
612
    # format string
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
613
    scenarios_params = [
614
         dict(format_name='pack-0.92',
615
              format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
616
              format_supports_external_lookups=False),
617
         dict(format_name='pack-0.92-subtree',
618
              format_string="Bazaar pack repository format 1 "
619
              "with subtree support (needs bzr 0.92)\n",
620
              format_supports_external_lookups=False),
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
621
         dict(format_name='1.6',
622
              format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
623
              format_supports_external_lookups=True),
3606.10.5 by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root.
624
         dict(format_name='1.6.1-rich-root',
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
625
              format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
3606.10.5 by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root.
626
                  "(bzr 1.6.1)\n",
3582.3.2 by Martin Pool
Add 1.6 formats to pack repository tests
627
              format_supports_external_lookups=True),
3582.3.1 by Martin Pool
Split pack repository tests into their own file and use scenarios
628
         dict(format_name='development0',
629
              format_string="Bazaar development format 0 "
630
                  "(needs bzr.dev from before 1.3)\n",
631
              format_supports_external_lookups=False),
632
         dict(format_name='development0-subtree',
633
              format_string="Bazaar development format 0 "
634
                  "with subtree support (needs bzr.dev from before 1.3)\n",
635
              format_supports_external_lookups=False),
636
         dict(format_name='development',
637
              format_string="Bazaar development format 1 "
638
                  "(needs bzr.dev from before 1.6)\n",
639
              format_supports_external_lookups=True),
640
         dict(format_name='development-subtree',
641
              format_string="Bazaar development format 1 "
642
                  "with subtree support (needs bzr.dev from before 1.6)\n",
643
              format_supports_external_lookups=True),
644
         ]
645
    adapter = tests.TestScenarioApplier()
646
    # name of the scenario is the format name
647
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
648
    suite = tests.TestSuite()
649
    tests.adapt_tests(basic_tests, adapter, suite)
650
    return suite