/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/tests/per_pack_repository.py

  • Committer: Andrew Bennetts
  • Date: 2010-01-12 03:53:21 UTC
  • mfrom: (4948 +trunk)
  • mto: This revision was merged to the branch mainline in revision 4964.
  • Revision ID: andrew.bennetts@canonical.com-20100112035321-hofpz5p10224ryj3
Merge lp:bzr, resolving conflicts.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2008 Canonical Ltd
 
1
# Copyright (C) 2008, 2009 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
12
12
#
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
 
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
16
 
17
17
"""Tests for pack repositories.
18
18
 
28
28
    bzrdir,
29
29
    errors,
30
30
    inventory,
 
31
    osutils,
31
32
    progress,
32
33
    repository,
33
34
    revision as _mod_revision,
37
38
    upgrade,
38
39
    workingtree,
39
40
    )
 
41
from bzrlib.repofmt import (
 
42
    pack_repo,
 
43
    groupcompress_repo,
 
44
    )
 
45
from bzrlib.repofmt.groupcompress_repo import RepositoryFormat2a
40
46
from bzrlib.smart import (
41
47
    client,
42
48
    server,
72
78
        """Packs do not need ordered data retrieval."""
73
79
        format = self.get_format()
74
80
        repo = self.make_repository('.', format=format)
75
 
        self.assertEqual('unordered', repo._fetch_order)
 
81
        self.assertEqual('unordered', repo._format._fetch_order)
76
82
 
77
83
    def test_attribute__fetch_uses_deltas(self):
78
84
        """Packs reuse deltas."""
79
85
        format = self.get_format()
80
86
        repo = self.make_repository('.', format=format)
81
 
        self.assertEqual(True, repo._fetch_uses_deltas)
 
87
        if isinstance(format.repository_format, RepositoryFormat2a):
 
88
            # TODO: This is currently a workaround. CHK format repositories
 
89
            #       ignore the 'deltas' flag, but during conversions, we can't
 
90
            #       do unordered delta fetches. Remove this clause once we
 
91
            #       improve the inter-format fetching.
 
92
            self.assertEqual(False, repo._format._fetch_uses_deltas)
 
93
        else:
 
94
            self.assertEqual(True, repo._format._fetch_uses_deltas)
82
95
 
83
96
    def test_disk_layout(self):
84
97
        format = self.get_format()
207
220
        tree = tree.bzrdir.open_workingtree()
208
221
        check_result = tree.branch.repository.check(
209
222
            [tree.branch.last_revision()])
210
 
        # We should have 50 (10x5) files in the obsolete_packs directory.
 
223
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
 
224
        if tree.branch.repository._format.supports_chks:
 
225
            nb_files += 1 # .cix
 
226
        # We should have 10 x nb_files files in the obsolete_packs directory.
211
227
        obsolete_files = list(trans.list_dir('obsolete_packs'))
212
228
        self.assertFalse('foo' in obsolete_files)
213
229
        self.assertFalse('bar' in obsolete_files)
214
 
        self.assertEqual(50, len(obsolete_files))
 
230
        self.assertEqual(10 * nb_files, len(obsolete_files))
215
231
        # XXX: Todo check packs obsoleted correctly - old packs and indices
216
232
        # in the obsolete_packs directory.
217
233
        large_pack_name = list(index.iter_all_entries())[0][1][0]
222
238
        pack_names = [node[1][0] for node in index.iter_all_entries()]
223
239
        self.assertTrue(large_pack_name in pack_names)
224
240
 
 
241
    def test_commit_write_group_returns_new_pack_names(self):
 
242
        # This test doesn't need real disk.
 
243
        self.vfs_transport_factory = tests.MemoryServer
 
244
        format = self.get_format()
 
245
        repo = self.make_repository('foo', format=format)
 
246
        repo.lock_write()
 
247
        try:
 
248
            # All current pack repository styles autopack at 10 revisions; and
 
249
            # autopack as well as regular commit write group needs to return
 
250
            # the new pack name. Looping is a little ugly, but we don't have a
 
251
            # clean way to test both the autopack logic and the normal code
 
252
            # path without doing this loop.
 
253
            for pos in range(10):
 
254
                revid = str(pos)
 
255
                repo.start_write_group()
 
256
                try:
 
257
                    inv = inventory.Inventory(revision_id=revid)
 
258
                    inv.root.revision = revid
 
259
                    repo.texts.add_lines((inv.root.file_id, revid), [], [])
 
260
                    rev = _mod_revision.Revision(timestamp=0, timezone=None,
 
261
                        committer="Foo Bar <foo@example.com>", message="Message",
 
262
                        revision_id=revid)
 
263
                    rev.parent_ids = ()
 
264
                    repo.add_revision(revid, rev, inv=inv)
 
265
                except:
 
266
                    repo.abort_write_group()
 
267
                    raise
 
268
                else:
 
269
                    old_names = repo._pack_collection._names.keys()
 
270
                    result = repo.commit_write_group()
 
271
                    cur_names = repo._pack_collection._names.keys()
 
272
                    new_names = list(set(cur_names) - set(old_names))
 
273
                    self.assertEqual(new_names, result)
 
274
        finally:
 
275
            repo.unlock()
 
276
 
225
277
    def test_fail_obsolete_deletion(self):
226
278
        # failing to delete obsolete packs is not fatal
227
279
        format = self.get_format()
228
280
        server = fakenfs.FakeNFSServer()
229
 
        server.setUp()
230
 
        self.addCleanup(server.tearDown)
 
281
        self.start_server(server)
231
282
        transport = get_transport(server.get_url())
232
283
        bzrdir = self.get_format().initialize_on_transport(transport)
233
284
        repo = bzrdir.create_repository()
250
301
        self.assertEqual(1, len(list(index.iter_all_entries())))
251
302
        self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
252
303
 
 
304
    def test_pack_preserves_all_inventories(self):
 
305
        # This is related to bug:
 
306
        #   https://bugs.launchpad.net/bzr/+bug/412198
 
307
        # Stacked repositories need to keep the inventory for parents, even
 
308
        # after a pack operation. However, it is harder to test that, then just
 
309
        # test that all inventory texts are preserved.
 
310
        format = self.get_format()
 
311
        builder = self.make_branch_builder('source', format=format)
 
312
        builder.start_series()
 
313
        builder.build_snapshot('A-id', None, [
 
314
            ('add', ('', 'root-id', 'directory', None))])
 
315
        builder.build_snapshot('B-id', None, [
 
316
            ('add', ('file', 'file-id', 'file', 'B content\n'))])
 
317
        builder.build_snapshot('C-id', None, [
 
318
            ('modify', ('file-id', 'C content\n'))])
 
319
        builder.finish_series()
 
320
        b = builder.get_branch()
 
321
        b.lock_read()
 
322
        self.addCleanup(b.unlock)
 
323
        repo = self.make_repository('repo', shared=True, format=format)
 
324
        repo.lock_write()
 
325
        self.addCleanup(repo.unlock)
 
326
        repo.fetch(b.repository, revision_id='B-id')
 
327
        inv = b.repository.iter_inventories(['C-id']).next()
 
328
        repo.start_write_group()
 
329
        repo.add_inventory('C-id', inv, ['B-id'])
 
330
        repo.commit_write_group()
 
331
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
 
332
                         sorted(repo.inventories.keys()))
 
333
        repo.pack()
 
334
        self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
 
335
                         sorted(repo.inventories.keys()))
 
336
        # Content should be preserved as well
 
337
        self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
 
338
 
253
339
    def test_pack_layout(self):
 
340
        # Test that the ordering of revisions in pack repositories is
 
341
        # tip->ancestor
254
342
        format = self.get_format()
255
343
        tree = self.make_branch_and_tree('.', format=format)
256
344
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
261
349
        self.addCleanup(tree.unlock)
262
350
        pack = tree.branch.repository._pack_collection.get_pack_by_name(
263
351
            tree.branch.repository._pack_collection.names()[0])
264
 
        # revision access tends to be tip->ancestor, so ordering that way on 
 
352
        # revision access tends to be tip->ancestor, so ordering that way on
265
353
        # disk is a good idea.
266
354
        for _1, key, val, refs in pack.revision_index.iter_all_entries():
 
355
            if type(format.repository_format) is RepositoryFormat2a:
 
356
                # group_start, group_len, internal_start, internal_len
 
357
                pos = map(int, val.split())
 
358
            else:
 
359
                # eol_flag, start, len
 
360
                pos = int(val[1:].split()[0])
267
361
            if key == ('1',):
268
 
                pos_1 = int(val[1:].split()[0])
 
362
                pos_1 = pos
269
363
            else:
270
 
                pos_2 = int(val[1:].split()[0])
271
 
        self.assertTrue(pos_2 < pos_1)
 
364
                pos_2 = pos
 
365
        self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
 
366
                                       % (pos_1, pos_2))
272
367
 
273
368
    def test_pack_repositories_support_multiple_write_locks(self):
274
369
        format = self.get_format()
282
377
 
283
378
    def _add_text(self, repo, fileid):
284
379
        """Add a text to the repository within a write group."""
285
 
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
 
380
        repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
 
381
            ['smaplerev+'+fileid])
286
382
 
287
383
    def test_concurrent_writers_merge_new_packs(self):
288
384
        format = self.get_format()
449
545
        finally:
450
546
            tree.unlock()
451
547
 
 
548
    def test_concurrent_pack_during_autopack(self):
 
549
        tree = self.make_branch_and_tree('tree')
 
550
        tree.lock_write()
 
551
        try:
 
552
            for i in xrange(9):
 
553
                tree.commit('rev %d' % (i,))
 
554
            r2 = repository.Repository.open('tree')
 
555
            r2.lock_write()
 
556
            try:
 
557
                # Monkey patch so that pack occurs while the other repo is
 
558
                # autopacking. This is slightly bad, but all current pack
 
559
                # repository implementations have a _pack_collection, and we
 
560
                # test that it gets triggered. So if a future format changes
 
561
                # things, the test will fail rather than succeed accidentally.
 
562
                autopack_count = [0]
 
563
                r1 = tree.branch.repository
 
564
                orig = r1._pack_collection.pack_distribution
 
565
                def trigger_during_auto(*args, **kwargs):
 
566
                    ret = orig(*args, **kwargs)
 
567
                    if not autopack_count[0]:
 
568
                        r2.pack()
 
569
                    autopack_count[0] += 1
 
570
                    return ret
 
571
                r1._pack_collection.pack_distribution = trigger_during_auto
 
572
                tree.commit('autopack-rev')
 
573
                # This triggers 2 autopacks. The first one causes r2.pack() to
 
574
                # fire, but r2 doesn't see the new pack file yet. The
 
575
                # autopack restarts and sees there are 2 files and there
 
576
                # should be only 1 for 10 commits. So it goes ahead and
 
577
                # finishes autopacking.
 
578
                self.assertEqual([2], autopack_count)
 
579
            finally:
 
580
                r2.unlock()
 
581
        finally:
 
582
            tree.unlock()
 
583
 
452
584
    def test_lock_write_does_not_physically_lock(self):
453
585
        repo = self.make_repository('.', format=self.get_format())
454
586
        repo.lock_write()
462
594
        def restoreFactory():
463
595
            ui.ui_factory = old_factory
464
596
        self.addCleanup(restoreFactory)
465
 
        ui.ui_factory = ui.SilentUIFactory()
466
 
        ui.ui_factory.stdin = StringIO("y\n")
 
597
        ui.ui_factory = ui.CannedInputUIFactory([True])
467
598
 
468
599
    def test_break_lock_breaks_physical_lock(self):
469
600
        repo = self.make_repository('.', format=self.get_format())
533
664
        self.assertRaises(errors.NoSuchRevision,
534
665
            missing_ghost.get_inventory, 'ghost')
535
666
 
 
667
    def make_write_ready_repo(self):
 
668
        format = self.get_format()
 
669
        if isinstance(format.repository_format, RepositoryFormat2a):
 
670
            raise TestNotApplicable("No missing compression parents")
 
671
        repo = self.make_repository('.', format=format)
 
672
        repo.lock_write()
 
673
        self.addCleanup(repo.unlock)
 
674
        repo.start_write_group()
 
675
        self.addCleanup(repo.abort_write_group)
 
676
        return repo
 
677
 
 
678
    def test_missing_inventories_compression_parent_prevents_commit(self):
 
679
        repo = self.make_write_ready_repo()
 
680
        key = ('junk',)
 
681
        repo.inventories._index._missing_compression_parents.add(key)
 
682
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
683
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
684
 
 
685
    def test_missing_revisions_compression_parent_prevents_commit(self):
 
686
        repo = self.make_write_ready_repo()
 
687
        key = ('junk',)
 
688
        repo.revisions._index._missing_compression_parents.add(key)
 
689
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
690
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
691
 
 
692
    def test_missing_signatures_compression_parent_prevents_commit(self):
 
693
        repo = self.make_write_ready_repo()
 
694
        key = ('junk',)
 
695
        repo.signatures._index._missing_compression_parents.add(key)
 
696
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
697
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
698
 
 
699
    def test_missing_text_compression_parent_prevents_commit(self):
 
700
        repo = self.make_write_ready_repo()
 
701
        key = ('some', 'junk')
 
702
        repo.texts._index._missing_compression_parents.add(key)
 
703
        self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
704
        e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
 
705
 
536
706
    def test_supports_external_lookups(self):
537
707
        repo = self.make_repository('.', format=self.get_format())
538
708
        self.assertEqual(self.format_supports_external_lookups,
544
714
        Also requires that the exception is logged.
545
715
        """
546
716
        self.vfs_transport_factory = memory.MemoryServer
547
 
        repo = self.make_repository('repo')
 
717
        repo = self.make_repository('repo', format=self.get_format())
548
718
        token = repo.lock_write()
549
719
        self.addCleanup(repo.unlock)
550
720
        repo.start_write_group()
553
723
        # abort_write_group will not raise an error
554
724
        self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
555
725
        # But it does log an error
556
 
        log_file = self._get_log(keep_log_file=True)
557
 
        self.assertContainsRe(log_file, 'abort_write_group failed')
558
 
        self.assertContainsRe(log_file, r'INFO  bzr: ERROR \(ignored\):')
 
726
        log = self.get_log()
 
727
        self.assertContainsRe(log, 'abort_write_group failed')
 
728
        self.assertContainsRe(log, r'INFO  bzr: ERROR \(ignored\):')
559
729
        if token is not None:
560
730
            repo.leave_lock_in_place()
561
 
        
 
731
 
562
732
    def test_abort_write_group_does_raise_when_not_suppressed(self):
563
733
        self.vfs_transport_factory = memory.MemoryServer
564
 
        repo = self.make_repository('repo')
 
734
        repo = self.make_repository('repo', format=self.get_format())
565
735
        token = repo.lock_write()
566
736
        self.addCleanup(repo.unlock)
567
737
        repo.start_write_group()
571
741
        self.assertRaises(Exception, repo.abort_write_group)
572
742
        if token is not None:
573
743
            repo.leave_lock_in_place()
574
 
        
 
744
 
 
745
    def test_suspend_write_group(self):
 
746
        self.vfs_transport_factory = memory.MemoryServer
 
747
        repo = self.make_repository('repo', format=self.get_format())
 
748
        token = repo.lock_write()
 
749
        self.addCleanup(repo.unlock)
 
750
        repo.start_write_group()
 
751
        repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
 
752
        wg_tokens = repo.suspend_write_group()
 
753
        expected_pack_name = wg_tokens[0] + '.pack'
 
754
        expected_names = [wg_tokens[0] + ext for ext in
 
755
                            ('.rix', '.iix', '.tix', '.six')]
 
756
        if repo.chk_bytes is not None:
 
757
            expected_names.append(wg_tokens[0] + '.cix')
 
758
        expected_names.append(expected_pack_name)
 
759
        upload_transport = repo._pack_collection._upload_transport
 
760
        limbo_files = upload_transport.list_dir('')
 
761
        self.assertEqual(sorted(expected_names), sorted(limbo_files))
 
762
        md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
 
763
        self.assertEqual(wg_tokens[0], md5.hexdigest())
 
764
 
 
765
    def test_resume_chk_bytes(self):
 
766
        self.vfs_transport_factory = memory.MemoryServer
 
767
        repo = self.make_repository('repo', format=self.get_format())
 
768
        if repo.chk_bytes is None:
 
769
            raise TestNotApplicable('no chk_bytes for this repository')
 
770
        token = repo.lock_write()
 
771
        self.addCleanup(repo.unlock)
 
772
        repo.start_write_group()
 
773
        text = 'a bit of text\n'
 
774
        key = ('sha1:' + osutils.sha_string(text),)
 
775
        repo.chk_bytes.add_lines(key, (), [text])
 
776
        wg_tokens = repo.suspend_write_group()
 
777
        same_repo = repo.bzrdir.open_repository()
 
778
        same_repo.lock_write()
 
779
        self.addCleanup(same_repo.unlock)
 
780
        same_repo.resume_write_group(wg_tokens)
 
781
        self.assertEqual([key], list(same_repo.chk_bytes.keys()))
 
782
        self.assertEqual(
 
783
            text, same_repo.chk_bytes.get_record_stream([key],
 
784
                'unordered', True).next().get_bytes_as('fulltext'))
 
785
        same_repo.abort_write_group()
 
786
        self.assertEqual([], list(same_repo.chk_bytes.keys()))
 
787
 
 
788
    def test_resume_write_group_then_abort(self):
 
789
        # Create a repo, start a write group, insert some data, suspend.
 
790
        self.vfs_transport_factory = memory.MemoryServer
 
791
        repo = self.make_repository('repo', format=self.get_format())
 
792
        token = repo.lock_write()
 
793
        self.addCleanup(repo.unlock)
 
794
        repo.start_write_group()
 
795
        text_key = ('file-id', 'revid')
 
796
        repo.texts.add_lines(text_key, (), ['lines'])
 
797
        wg_tokens = repo.suspend_write_group()
 
798
        # Get a fresh repository object for the repo on the filesystem.
 
799
        same_repo = repo.bzrdir.open_repository()
 
800
        # Resume
 
801
        same_repo.lock_write()
 
802
        self.addCleanup(same_repo.unlock)
 
803
        same_repo.resume_write_group(wg_tokens)
 
804
        same_repo.abort_write_group()
 
805
        self.assertEqual(
 
806
            [], same_repo._pack_collection._upload_transport.list_dir(''))
 
807
        self.assertEqual(
 
808
            [], same_repo._pack_collection._pack_transport.list_dir(''))
 
809
 
 
810
    def test_commit_resumed_write_group(self):
 
811
        self.vfs_transport_factory = memory.MemoryServer
 
812
        repo = self.make_repository('repo', format=self.get_format())
 
813
        token = repo.lock_write()
 
814
        self.addCleanup(repo.unlock)
 
815
        repo.start_write_group()
 
816
        text_key = ('file-id', 'revid')
 
817
        repo.texts.add_lines(text_key, (), ['lines'])
 
818
        wg_tokens = repo.suspend_write_group()
 
819
        # Get a fresh repository object for the repo on the filesystem.
 
820
        same_repo = repo.bzrdir.open_repository()
 
821
        # Resume
 
822
        same_repo.lock_write()
 
823
        self.addCleanup(same_repo.unlock)
 
824
        same_repo.resume_write_group(wg_tokens)
 
825
        same_repo.commit_write_group()
 
826
        expected_pack_name = wg_tokens[0] + '.pack'
 
827
        expected_names = [wg_tokens[0] + ext for ext in
 
828
                            ('.rix', '.iix', '.tix', '.six')]
 
829
        if repo.chk_bytes is not None:
 
830
            expected_names.append(wg_tokens[0] + '.cix')
 
831
        self.assertEqual(
 
832
            [], same_repo._pack_collection._upload_transport.list_dir(''))
 
833
        index_names = repo._pack_collection._index_transport.list_dir('')
 
834
        self.assertEqual(sorted(expected_names), sorted(index_names))
 
835
        pack_names = repo._pack_collection._pack_transport.list_dir('')
 
836
        self.assertEqual([expected_pack_name], pack_names)
 
837
 
 
838
    def test_resume_malformed_token(self):
 
839
        self.vfs_transport_factory = memory.MemoryServer
 
840
        # Make a repository with a suspended write group
 
841
        repo = self.make_repository('repo', format=self.get_format())
 
842
        token = repo.lock_write()
 
843
        self.addCleanup(repo.unlock)
 
844
        repo.start_write_group()
 
845
        text_key = ('file-id', 'revid')
 
846
        repo.texts.add_lines(text_key, (), ['lines'])
 
847
        wg_tokens = repo.suspend_write_group()
 
848
        # Make a new repository
 
849
        new_repo = self.make_repository('new_repo', format=self.get_format())
 
850
        token = new_repo.lock_write()
 
851
        self.addCleanup(new_repo.unlock)
 
852
        hacked_wg_token = (
 
853
            '../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
 
854
        self.assertRaises(
 
855
            errors.UnresumableWriteGroup,
 
856
            new_repo.resume_write_group, [hacked_wg_token])
 
857
 
575
858
 
576
859
class TestPackRepositoryStacking(TestCaseWithTransport):
577
860
 
579
862
 
580
863
    def setUp(self):
581
864
        if not self.format_supports_external_lookups:
582
 
            raise TestNotApplicable("%r doesn't support stacking" 
 
865
            raise TestNotApplicable("%r doesn't support stacking"
583
866
                % (self.format_name,))
584
867
        super(TestPackRepositoryStacking, self).setUp()
585
868
 
601
884
            if getattr(repo._format, 'supports_tree_reference', False):
602
885
                matching_format_name = 'pack-0.92-subtree'
603
886
            else:
604
 
                matching_format_name = 'rich-root-pack'
 
887
                if repo._format.supports_chks:
 
888
                    matching_format_name = '2a'
 
889
                else:
 
890
                    matching_format_name = 'rich-root-pack'
605
891
            mismatching_format_name = 'pack-0.92'
606
892
        else:
607
 
            matching_format_name = 'pack-0.92'
 
893
            # We don't have a non-rich-root CHK format.
 
894
            if repo._format.supports_chks:
 
895
                raise AssertionError("no non-rich-root CHK formats known")
 
896
            else:
 
897
                matching_format_name = 'pack-0.92'
608
898
            mismatching_format_name = 'pack-0.92-subtree'
609
899
        base = self.make_repository('base', format=matching_format_name)
610
900
        repo.add_fallback_repository(base)
615
905
            repo.add_fallback_repository, bad_repo)
616
906
        self.assertContainsRe(str(e),
617
907
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
618
 
            r'KnitPackRepository.*/repo/.*\n'
 
908
            r'.*Repository.*/repo/.*\n'
619
909
            r'different rich-root support')
620
910
 
621
911
    def test_stack_checks_serializers_compatibility(self):
627
917
            mismatching_format_name = 'rich-root-pack'
628
918
        else:
629
919
            if repo.supports_rich_root():
630
 
                matching_format_name = 'rich-root-pack'
 
920
                if repo._format.supports_chks:
 
921
                    matching_format_name = '2a'
 
922
                else:
 
923
                    matching_format_name = 'rich-root-pack'
631
924
                mismatching_format_name = 'pack-0.92-subtree'
632
925
            else:
633
926
                raise TestNotApplicable('No formats use non-v5 serializer'
641
934
            repo.add_fallback_repository, bad_repo)
642
935
        self.assertContainsRe(str(e),
643
936
            r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
644
 
            r'KnitPackRepository.*/repo/.*\n'
 
937
            r'.*Repository.*/repo/.*\n'
645
938
            r'different serializers')
646
939
 
647
940
    def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
649
942
        base.commit('foo')
650
943
        referencing = self.make_branch_and_tree('repo', format=self.get_format())
651
944
        referencing.branch.repository.add_fallback_repository(base.branch.repository)
652
 
        referencing.commit('bar')
 
945
        local_tree = referencing.branch.create_checkout('local')
 
946
        local_tree.commit('bar')
653
947
        new_instance = referencing.bzrdir.open_repository()
654
948
        new_instance.lock_read()
655
949
        self.addCleanup(new_instance.unlock)
657
951
        self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
658
952
 
659
953
    def test_autopack_only_considers_main_repo_packs(self):
660
 
        base = self.make_branch_and_tree('base', format=self.get_format())
 
954
        format = self.get_format()
 
955
        base = self.make_branch_and_tree('base', format=format)
661
956
        base.commit('foo')
662
 
        tree = self.make_branch_and_tree('repo', format=self.get_format())
 
957
        tree = self.make_branch_and_tree('repo', format=format)
663
958
        tree.branch.repository.add_fallback_repository(base.branch.repository)
664
959
        trans = tree.branch.repository.bzrdir.get_repository_transport(None)
665
960
        # This test could be a little cheaper by replacing the packs
667
962
        # and max packs policy - so we are checking the policy is honoured
668
963
        # in the test. But for now 11 commits is not a big deal in a single
669
964
        # test.
 
965
        local_tree = tree.branch.create_checkout('local')
670
966
        for x in range(9):
671
 
            tree.commit('commit %s' % x)
 
967
            local_tree.commit('commit %s' % x)
672
968
        # there should be 9 packs:
673
969
        index = self.index_class(trans, 'pack-names', None)
674
970
        self.assertEqual(9, len(list(index.iter_all_entries())))
675
971
        # committing one more should coalesce to 1 of 10.
676
 
        tree.commit('commit triggering pack')
 
972
        local_tree.commit('commit triggering pack')
677
973
        index = self.index_class(trans, 'pack-names', None)
678
974
        self.assertEqual(1, len(list(index.iter_all_entries())))
679
975
        # packing should not damage data
680
976
        tree = tree.bzrdir.open_workingtree()
681
977
        check_result = tree.branch.repository.check(
682
978
            [tree.branch.last_revision()])
683
 
        # We should have 50 (10x5) files in the obsolete_packs directory.
 
979
        nb_files = 5 # .pack, .rix, .iix, .tix, .six
 
980
        if tree.branch.repository._format.supports_chks:
 
981
            nb_files += 1 # .cix
 
982
        # We should have 10 x nb_files files in the obsolete_packs directory.
684
983
        obsolete_files = list(trans.list_dir('obsolete_packs'))
685
984
        self.assertFalse('foo' in obsolete_files)
686
985
        self.assertFalse('bar' in obsolete_files)
687
 
        self.assertEqual(50, len(obsolete_files))
 
986
        self.assertEqual(10 * nb_files, len(obsolete_files))
688
987
        # XXX: Todo check packs obsoleted correctly - old packs and indices
689
988
        # in the obsolete_packs directory.
690
989
        large_pack_name = list(index.iter_all_entries())[0][1][0]
691
990
        # finally, committing again should not touch the large pack.
692
 
        tree.commit('commit not triggering pack')
 
991
        local_tree.commit('commit not triggering pack')
693
992
        index = self.index_class(trans, 'pack-names', None)
694
993
        self.assertEqual(2, len(list(index.iter_all_entries())))
695
994
        pack_names = [node[1][0] for node in index.iter_all_entries()]
696
995
        self.assertTrue(large_pack_name in pack_names)
697
996
 
698
997
 
 
998
class TestKeyDependencies(TestCaseWithTransport):
 
999
 
 
1000
    def get_format(self):
 
1001
        return bzrdir.format_registry.make_bzrdir(self.format_name)
 
1002
 
 
1003
    def create_source_and_target(self):
 
1004
        builder = self.make_branch_builder('source', format=self.get_format())
 
1005
        builder.start_series()
 
1006
        builder.build_snapshot('A-id', None, [
 
1007
            ('add', ('', 'root-id', 'directory', None))])
 
1008
        builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
 
1009
        builder.finish_series()
 
1010
        repo = self.make_repository('target', format=self.get_format())
 
1011
        b = builder.get_branch()
 
1012
        b.lock_read()
 
1013
        self.addCleanup(b.unlock)
 
1014
        repo.lock_write()
 
1015
        self.addCleanup(repo.unlock)
 
1016
        return b.repository, repo
 
1017
 
 
1018
    def test_key_dependencies_cleared_on_abort(self):
 
1019
        source_repo, target_repo = self.create_source_and_target()
 
1020
        target_repo.start_write_group()
 
1021
        try:
 
1022
            stream = source_repo.revisions.get_record_stream([('B-id',)],
 
1023
                                                             'unordered', True)
 
1024
            target_repo.revisions.insert_record_stream(stream)
 
1025
            key_refs = target_repo.revisions._index._key_dependencies
 
1026
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1027
        finally:
 
1028
            target_repo.abort_write_group()
 
1029
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1030
 
 
1031
    def test_key_dependencies_cleared_on_suspend(self):
 
1032
        source_repo, target_repo = self.create_source_and_target()
 
1033
        target_repo.start_write_group()
 
1034
        try:
 
1035
            stream = source_repo.revisions.get_record_stream([('B-id',)],
 
1036
                                                             'unordered', True)
 
1037
            target_repo.revisions.insert_record_stream(stream)
 
1038
            key_refs = target_repo.revisions._index._key_dependencies
 
1039
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1040
        finally:
 
1041
            target_repo.suspend_write_group()
 
1042
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1043
 
 
1044
    def test_key_dependencies_cleared_on_commit(self):
 
1045
        source_repo, target_repo = self.create_source_and_target()
 
1046
        target_repo.start_write_group()
 
1047
        try:
 
1048
            # Copy all texts, inventories, and chks so that nothing is missing
 
1049
            # for revision B-id.
 
1050
            for vf_name in ['texts', 'chk_bytes', 'inventories']:
 
1051
                source_vf = getattr(source_repo, vf_name, None)
 
1052
                if source_vf is None:
 
1053
                    continue
 
1054
                target_vf = getattr(target_repo, vf_name)
 
1055
                stream = source_vf.get_record_stream(
 
1056
                    source_vf.keys(), 'unordered', True)
 
1057
                target_vf.insert_record_stream(stream)
 
1058
            # Copy just revision B-id
 
1059
            stream = source_repo.revisions.get_record_stream(
 
1060
                [('B-id',)], 'unordered', True)
 
1061
            target_repo.revisions.insert_record_stream(stream)
 
1062
            key_refs = target_repo.revisions._index._key_dependencies
 
1063
            self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
 
1064
        finally:
 
1065
            target_repo.commit_write_group()
 
1066
        self.assertEqual([], sorted(key_refs.get_referrers()))
 
1067
 
 
1068
 
699
1069
class TestSmartServerAutopack(TestCaseWithTransport):
700
1070
 
701
1071
    def setUp(self):
703
1073
        # Create a smart server that publishes whatever the backing VFS server
704
1074
        # does.
705
1075
        self.smart_server = server.SmartTCPServer_for_testing()
706
 
        self.smart_server.setUp(self.get_server())
707
 
        self.addCleanup(self.smart_server.tearDown)
 
1076
        self.start_server(self.smart_server, self.get_server())
708
1077
        # Log all HPSS calls into self.hpss_calls.
709
1078
        client._SmartClient.hooks.install_named_hook(
710
1079
            'call', self.capture_hpss_call, None)
716
1085
    def get_format(self):
717
1086
        return bzrdir.format_registry.make_bzrdir(self.format_name)
718
1087
 
719
 
    def test_autopack_rpc_is_used_when_using_hpss(self):
 
1088
    def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
720
1089
        # Make local and remote repos
721
 
        tree = self.make_branch_and_tree('local', format=self.get_format())
722
 
        self.make_branch_and_tree('remote', format=self.get_format())
 
1090
        format = self.get_format()
 
1091
        tree = self.make_branch_and_tree('local', format=format)
 
1092
        self.make_branch_and_tree('remote', format=format)
723
1093
        remote_branch_url = self.smart_server.get_url() + 'remote'
724
1094
        remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
725
1095
        # Make 9 local revisions, and push them one at a time to the remote
731
1101
        self.hpss_calls = []
732
1102
        tree.commit('commit triggering pack')
733
1103
        tree.branch.push(remote_branch)
734
 
        self.assertTrue('PackRepository.autopack' in self.hpss_calls)
735
 
 
736
 
 
737
 
def load_tests(basic_tests, module, test_loader):
 
1104
        autopack_calls = len([call for call in self.hpss_calls if call ==
 
1105
            'PackRepository.autopack'])
 
1106
        streaming_calls = len([call for call in self.hpss_calls if call in
 
1107
            ('Repository.insert_stream', 'Repository.insert_stream_1.19')])
 
1108
        if autopack_calls:
 
1109
            # Non streaming server
 
1110
            self.assertEqual(1, autopack_calls)
 
1111
            self.assertEqual(0, streaming_calls)
 
1112
        else:
 
1113
            # Streaming was used, which autopacks on the remote end.
 
1114
            self.assertEqual(0, autopack_calls)
 
1115
            # NB: The 2 calls are because of the sanity check that the server
 
1116
            # supports the verb (see remote.py:RemoteSink.insert_stream for
 
1117
            # details).
 
1118
            self.assertEqual(2, streaming_calls)
 
1119
 
 
1120
 
 
1121
def load_tests(basic_tests, module, loader):
738
1122
    # these give the bzrdir canned format name, and the repository on-disk
739
1123
    # format string
740
1124
    scenarios_params = [
765
1149
                  "(bzr 1.9)\n",
766
1150
              format_supports_external_lookups=True,
767
1151
              index_class=BTreeGraphIndex),
768
 
         dict(format_name='development2',
769
 
              format_string="Bazaar development format 2 "
770
 
                  "(needs bzr.dev from before 1.8)\n",
771
 
              format_supports_external_lookups=True,
772
 
              index_class=BTreeGraphIndex),
773
 
         dict(format_name='development2-subtree',
774
 
              format_string="Bazaar development format 2 "
775
 
                  "with subtree support (needs bzr.dev from before 1.8)\n",
 
1152
         dict(format_name='2a',
 
1153
              format_string="Bazaar repository format 2a "
 
1154
                "(needs bzr 1.16 or later)\n",
776
1155
              format_supports_external_lookups=True,
777
1156
              index_class=BTreeGraphIndex),
778
1157
         ]
779
 
    adapter = tests.TestScenarioApplier()
780
1158
    # name of the scenario is the format name
781
 
    adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
782
 
    suite = tests.TestSuite()
783
 
    tests.adapt_tests(basic_tests, adapter, suite)
784
 
    return suite
 
1159
    scenarios = [(s['format_name'], s) for s in scenarios_params]
 
1160
    return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())