1
# Copyright (C) 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""Tests for pack repositories.
19
These tests are repeated for all pack-based repository formats.
22
from cStringIO import StringIO
23
from stat import S_ISDIR
25
from bzrlib.index import GraphIndex, InMemoryGraphIndex
32
revision as _mod_revision,
39
from bzrlib.tests import (
41
TestCaseWithTransport,
45
from bzrlib.transport import (
51
class TestPackRepository(TestCaseWithTransport):
52
"""Tests to be repeated across all pack-based formats.
54
The following are populated from the test scenario:
56
:ivar format_name: Registered name fo the format to test.
57
:ivar format_string: On-disk format marker.
58
:ivar format_supports_external_lookups: Boolean.
62
return bzrdir.format_registry.make_bzrdir(self.format_name)
64
def test_attribute__fetch_order(self):
65
"""Packs do not need ordered data retrieval."""
66
format = self.get_format()
67
repo = self.make_repository('.', format=format)
68
self.assertEqual('unordered', repo._fetch_order)
70
def test_attribute__fetch_uses_deltas(self):
71
"""Packs reuse deltas."""
72
format = self.get_format()
73
repo = self.make_repository('.', format=format)
74
self.assertEqual(True, repo._fetch_uses_deltas)
76
def test_disk_layout(self):
77
format = self.get_format()
78
repo = self.make_repository('.', format=format)
79
# in case of side effects of locking.
82
t = repo.bzrdir.get_repository_transport(None)
84
# XXX: no locks left when unlocked at the moment
85
# self.assertEqualDiff('', t.get('lock').read())
86
self.check_databases(t)
88
def check_format(self, t):
90
self.format_string, # from scenario
91
t.get('format').read())
93
def assertHasNoKndx(self, t, knit_name):
94
"""Assert that knit_name has no index on t."""
95
self.assertFalse(t.has(knit_name + '.kndx'))
97
def assertHasNoKnit(self, t, knit_name):
98
"""Assert that knit_name exists on t."""
100
self.assertFalse(t.has(knit_name + '.knit'))
102
def check_databases(self, t):
103
"""check knit content for a repository."""
104
# check conversion worked
105
self.assertHasNoKndx(t, 'inventory')
106
self.assertHasNoKnit(t, 'inventory')
107
self.assertHasNoKndx(t, 'revisions')
108
self.assertHasNoKnit(t, 'revisions')
109
self.assertHasNoKndx(t, 'signatures')
110
self.assertHasNoKnit(t, 'signatures')
111
self.assertFalse(t.has('knits'))
112
# revision-indexes file-container directory
114
list(GraphIndex(t, 'pack-names', None).iter_all_entries()))
115
self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
116
self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
117
self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
118
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
120
def test_shared_disk_layout(self):
121
format = self.get_format()
122
repo = self.make_repository('.', shared=True, format=format)
124
t = repo.bzrdir.get_repository_transport(None)
126
# XXX: no locks left when unlocked at the moment
127
# self.assertEqualDiff('', t.get('lock').read())
128
# We should have a 'shared-storage' marker file.
129
self.assertEqualDiff('', t.get('shared-storage').read())
130
self.check_databases(t)
132
def test_shared_no_tree_disk_layout(self):
133
format = self.get_format()
134
repo = self.make_repository('.', shared=True, format=format)
135
repo.set_make_working_trees(False)
137
t = repo.bzrdir.get_repository_transport(None)
139
# XXX: no locks left when unlocked at the moment
140
# self.assertEqualDiff('', t.get('lock').read())
141
# We should have a 'shared-storage' marker file.
142
self.assertEqualDiff('', t.get('shared-storage').read())
143
# We should have a marker for the no-working-trees flag.
144
self.assertEqualDiff('', t.get('no-working-trees').read())
145
# The marker should go when we toggle the setting.
146
repo.set_make_working_trees(True)
147
self.assertFalse(t.has('no-working-trees'))
148
self.check_databases(t)
150
def test_adding_revision_creates_pack_indices(self):
151
format = self.get_format()
152
tree = self.make_branch_and_tree('.', format=format)
153
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
155
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
156
tree.commit('foobarbaz')
157
index = GraphIndex(trans, 'pack-names', None)
158
index_nodes = list(index.iter_all_entries())
159
self.assertEqual(1, len(index_nodes))
160
node = index_nodes[0]
162
# the pack sizes should be listed in the index
164
sizes = [int(digits) for digits in pack_value.split(' ')]
165
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
166
stat = trans.stat('indices/%s%s' % (name, suffix))
167
self.assertEqual(size, stat.st_size)
169
def test_pulling_nothing_leads_to_no_new_names(self):
170
format = self.get_format()
171
tree1 = self.make_branch_and_tree('1', format=format)
172
tree2 = self.make_branch_and_tree('2', format=format)
173
tree1.branch.repository.fetch(tree2.branch.repository)
174
trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
176
list(GraphIndex(trans, 'pack-names', None).iter_all_entries()))
178
def test_commit_across_pack_shape_boundary_autopacks(self):
179
format = self.get_format()
180
tree = self.make_branch_and_tree('.', format=format)
181
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
182
# This test could be a little cheaper by replacing the packs
183
# attribute on the repository to allow a different pack distribution
184
# and max packs policy - so we are checking the policy is honoured
185
# in the test. But for now 11 commits is not a big deal in a single
188
tree.commit('commit %s' % x)
189
# there should be 9 packs:
190
index = GraphIndex(trans, 'pack-names', None)
191
self.assertEqual(9, len(list(index.iter_all_entries())))
192
# insert some files in obsolete_packs which should be removed by pack.
193
trans.put_bytes('obsolete_packs/foo', '123')
194
trans.put_bytes('obsolete_packs/bar', '321')
195
# committing one more should coalesce to 1 of 10.
196
tree.commit('commit triggering pack')
197
index = GraphIndex(trans, 'pack-names', None)
198
self.assertEqual(1, len(list(index.iter_all_entries())))
199
# packing should not damage data
200
tree = tree.bzrdir.open_workingtree()
201
check_result = tree.branch.repository.check(
202
[tree.branch.last_revision()])
203
# We should have 50 (10x5) files in the obsolete_packs directory.
204
obsolete_files = list(trans.list_dir('obsolete_packs'))
205
self.assertFalse('foo' in obsolete_files)
206
self.assertFalse('bar' in obsolete_files)
207
self.assertEqual(50, len(obsolete_files))
208
# XXX: Todo check packs obsoleted correctly - old packs and indices
209
# in the obsolete_packs directory.
210
large_pack_name = list(index.iter_all_entries())[0][1][0]
211
# finally, committing again should not touch the large pack.
212
tree.commit('commit not triggering pack')
213
index = GraphIndex(trans, 'pack-names', None)
214
self.assertEqual(2, len(list(index.iter_all_entries())))
215
pack_names = [node[1][0] for node in index.iter_all_entries()]
216
self.assertTrue(large_pack_name in pack_names)
218
def test_fail_obsolete_deletion(self):
219
# failing to delete obsolete packs is not fatal
220
format = self.get_format()
221
server = fakenfs.FakeNFSServer()
223
self.addCleanup(server.tearDown)
224
transport = get_transport(server.get_url())
225
bzrdir = self.get_format().initialize_on_transport(transport)
226
repo = bzrdir.create_repository()
227
repo_transport = bzrdir.get_repository_transport(None)
228
self.assertTrue(repo_transport.has('obsolete_packs'))
229
# these files are in use by another client and typically can't be deleted
230
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
231
repo._pack_collection._clear_obsolete_packs()
232
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
234
def test_pack_after_two_commits_packs_everything(self):
235
format = self.get_format()
236
tree = self.make_branch_and_tree('.', format=format)
237
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
239
tree.commit('more work')
240
tree.branch.repository.pack()
241
# there should be 1 pack:
242
index = GraphIndex(trans, 'pack-names', None)
243
self.assertEqual(1, len(list(index.iter_all_entries())))
244
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
246
def test_pack_layout(self):
247
format = self.get_format()
248
tree = self.make_branch_and_tree('.', format=format)
249
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
250
tree.commit('start', rev_id='1')
251
tree.commit('more work', rev_id='2')
252
tree.branch.repository.pack()
254
self.addCleanup(tree.unlock)
255
pack = tree.branch.repository._pack_collection.get_pack_by_name(
256
tree.branch.repository._pack_collection.names()[0])
257
# revision access tends to be tip->ancestor, so ordering that way on
258
# disk is a good idea.
259
for _1, key, val, refs in pack.revision_index.iter_all_entries():
261
pos_1 = int(val[1:].split()[0])
263
pos_2 = int(val[1:].split()[0])
264
self.assertTrue(pos_2 < pos_1)
266
def test_pack_repositories_support_multiple_write_locks(self):
267
format = self.get_format()
268
self.make_repository('.', shared=True, format=format)
269
r1 = repository.Repository.open('.')
270
r2 = repository.Repository.open('.')
272
self.addCleanup(r1.unlock)
276
def _add_text(self, repo, fileid):
277
"""Add a text to the repository within a write group."""
278
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], [])
280
def test_concurrent_writers_merge_new_packs(self):
281
format = self.get_format()
282
self.make_repository('.', shared=True, format=format)
283
r1 = repository.Repository.open('.')
284
r2 = repository.Repository.open('.')
287
# access enough data to load the names list
288
list(r1.all_revision_ids())
291
# access enough data to load the names list
292
list(r2.all_revision_ids())
293
r1.start_write_group()
295
r2.start_write_group()
297
self._add_text(r1, 'fileidr1')
298
self._add_text(r2, 'fileidr2')
300
r2.abort_write_group()
303
r1.abort_write_group()
305
# both r1 and r2 have open write groups with data in them
306
# created while the other's write group was open.
307
# Commit both which requires a merge to the pack-names.
309
r1.commit_write_group()
311
r1.abort_write_group()
312
r2.abort_write_group()
314
r2.commit_write_group()
315
# tell r1 to reload from disk
316
r1._pack_collection.reset()
317
# Now both repositories should know about both names
318
r1._pack_collection.ensure_loaded()
319
r2._pack_collection.ensure_loaded()
320
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
321
self.assertEqual(2, len(r1._pack_collection.names()))
327
def test_concurrent_writer_second_preserves_dropping_a_pack(self):
328
format = self.get_format()
329
self.make_repository('.', shared=True, format=format)
330
r1 = repository.Repository.open('.')
331
r2 = repository.Repository.open('.')
335
r1.start_write_group()
337
self._add_text(r1, 'fileidr1')
339
r1.abort_write_group()
342
r1.commit_write_group()
343
r1._pack_collection.ensure_loaded()
344
name_to_drop = r1._pack_collection.all_packs()[0].name
349
# access enough data to load the names list
350
list(r1.all_revision_ids())
353
# access enough data to load the names list
354
list(r2.all_revision_ids())
355
r1._pack_collection.ensure_loaded()
357
r2.start_write_group()
359
# in r1, drop the pack
360
r1._pack_collection._remove_pack_from_memory(
361
r1._pack_collection.get_pack_by_name(name_to_drop))
363
self._add_text(r2, 'fileidr2')
365
r2.abort_write_group()
368
r1._pack_collection.reset()
370
# r1 has a changed names list, and r2 an open write groups with
372
# save r1, and then commit the r2 write group, which requires a
373
# merge to the pack-names, which should not reinstate
376
r1._pack_collection._save_pack_names()
377
r1._pack_collection.reset()
379
r2.abort_write_group()
382
r2.commit_write_group()
384
r2.abort_write_group()
386
# Now both repositories should now about just one name.
387
r1._pack_collection.ensure_loaded()
388
r2._pack_collection.ensure_loaded()
389
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
390
self.assertEqual(1, len(r1._pack_collection.names()))
391
self.assertFalse(name_to_drop in r1._pack_collection.names())
397
def test_lock_write_does_not_physically_lock(self):
398
repo = self.make_repository('.', format=self.get_format())
400
self.addCleanup(repo.unlock)
401
self.assertFalse(repo.get_physical_lock_status())
403
def prepare_for_break_lock(self):
404
# Setup the global ui factory state so that a break-lock method call
405
# will find usable input in the input stream.
406
old_factory = ui.ui_factory
407
def restoreFactory():
408
ui.ui_factory = old_factory
409
self.addCleanup(restoreFactory)
410
ui.ui_factory = ui.SilentUIFactory()
411
ui.ui_factory.stdin = StringIO("y\n")
413
def test_break_lock_breaks_physical_lock(self):
414
repo = self.make_repository('.', format=self.get_format())
415
repo._pack_collection.lock_names()
416
repo.control_files.leave_in_place()
418
repo2 = repository.Repository.open('.')
419
self.assertTrue(repo.get_physical_lock_status())
420
self.prepare_for_break_lock()
422
self.assertFalse(repo.get_physical_lock_status())
424
def test_broken_physical_locks_error_on__unlock_names_lock(self):
425
repo = self.make_repository('.', format=self.get_format())
426
repo._pack_collection.lock_names()
427
self.assertTrue(repo.get_physical_lock_status())
428
repo2 = repository.Repository.open('.')
429
self.prepare_for_break_lock()
431
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
433
def test_fetch_without_find_ghosts_ignores_ghosts(self):
434
# we want two repositories at this point:
435
# one with a revision that is a ghost in the other
437
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
438
# 'references' is present in both repositories, and 'tip' is present
440
# has_ghost missing_ghost
441
#------------------------------
443
# 'references' 'references'
445
# In this test we fetch 'tip' which should not fetch 'ghost'
446
has_ghost = self.make_repository('has_ghost', format=self.get_format())
447
missing_ghost = self.make_repository('missing_ghost',
448
format=self.get_format())
450
def add_commit(repo, revision_id, parent_ids):
452
repo.start_write_group()
453
inv = inventory.Inventory(revision_id=revision_id)
454
inv.root.revision = revision_id
455
root_id = inv.root.file_id
456
sha1 = repo.add_inventory(revision_id, inv, [])
457
repo.texts.add_lines((root_id, revision_id), [], [])
458
rev = _mod_revision.Revision(timestamp=0,
460
committer="Foo Bar <foo@example.com>",
463
revision_id=revision_id)
464
rev.parent_ids = parent_ids
465
repo.add_revision(revision_id, rev)
466
repo.commit_write_group()
468
add_commit(has_ghost, 'ghost', [])
469
add_commit(has_ghost, 'references', ['ghost'])
470
add_commit(missing_ghost, 'references', ['ghost'])
471
add_commit(has_ghost, 'tip', ['references'])
472
missing_ghost.fetch(has_ghost, 'tip')
473
# missing ghost now has tip and not ghost.
474
rev = missing_ghost.get_revision('tip')
475
inv = missing_ghost.get_inventory('tip')
476
self.assertRaises(errors.NoSuchRevision,
477
missing_ghost.get_revision, 'ghost')
478
self.assertRaises(errors.NoSuchRevision,
479
missing_ghost.get_inventory, 'ghost')
481
def test_supports_external_lookups(self):
482
repo = self.make_repository('.', format=self.get_format())
483
self.assertEqual(self.format_supports_external_lookups,
484
repo._format.supports_external_lookups)
487
class TestPackRepositoryStacking(TestCaseWithTransport):
489
"""Tests for stacking pack repositories"""
492
if not self.format_supports_external_lookups:
493
raise TestNotApplicable("%r doesn't support stacking"
494
% (self.format_name,))
495
super(TestPackRepositoryStacking, self).setUp()
497
def get_format(self):
498
return bzrdir.format_registry.make_bzrdir(self.format_name)
500
def test_stack_checks_rich_root_compatibility(self):
501
# early versions of the packing code relied on pack internals to
502
# stack, but the current version should be able to stack on any
505
# TODO: Possibly this should be run per-repository-format and raise
506
# TestNotApplicable on formats that don't support stacking. -- mbp
508
repo = self.make_repository('repo', format=self.get_format())
509
if repo.supports_rich_root():
510
# can only stack on repositories that have compatible internal
512
if getattr(repo._format, 'supports_tree_reference', False):
513
matching_format_name = 'pack-0.92-subtree'
515
matching_format_name = 'rich-root-pack'
516
mismatching_format_name = 'pack-0.92'
518
matching_format_name = 'pack-0.92'
519
mismatching_format_name = 'pack-0.92-subtree'
520
base = self.make_repository('base', format=matching_format_name)
521
repo.add_fallback_repository(base)
522
# you can't stack on something with incompatible data
523
bad_repo = self.make_repository('mismatch',
524
format=mismatching_format_name)
525
e = self.assertRaises(errors.IncompatibleRepositories,
526
repo.add_fallback_repository, bad_repo)
527
self.assertContainsRe(str(e),
528
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
529
r'KnitPackRepository.*/repo/.*\n'
530
r'different rich-root support')
532
def test_stack_checks_serializers_compatibility(self):
533
repo = self.make_repository('repo', format=self.get_format())
534
if getattr(repo._format, 'supports_tree_reference', False):
535
# can only stack on repositories that have compatible internal
537
matching_format_name = 'pack-0.92-subtree'
538
mismatching_format_name = 'rich-root-pack'
540
if repo.supports_rich_root():
541
matching_format_name = 'rich-root-pack'
542
mismatching_format_name = 'pack-0.92-subtree'
544
raise TestNotApplicable('No formats use non-v5 serializer'
545
' without having rich-root also set')
546
base = self.make_repository('base', format=matching_format_name)
547
repo.add_fallback_repository(base)
548
# you can't stack on something with incompatible data
549
bad_repo = self.make_repository('mismatch',
550
format=mismatching_format_name)
551
e = self.assertRaises(errors.IncompatibleRepositories,
552
repo.add_fallback_repository, bad_repo)
553
self.assertContainsRe(str(e),
554
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
555
r'KnitPackRepository.*/repo/.*\n'
556
r'different serializers')
558
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
559
base = self.make_branch_and_tree('base', format=self.get_format())
561
referencing = self.make_branch_and_tree('repo', format=self.get_format())
562
referencing.branch.repository.add_fallback_repository(base.branch.repository)
563
referencing.commit('bar')
564
new_instance = referencing.bzrdir.open_repository()
565
new_instance.lock_read()
566
self.addCleanup(new_instance.unlock)
567
new_instance._pack_collection.ensure_loaded()
568
self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
570
def test_autopack_only_considers_main_repo_packs(self):
571
base = self.make_branch_and_tree('base', format=self.get_format())
573
tree = self.make_branch_and_tree('repo', format=self.get_format())
574
tree.branch.repository.add_fallback_repository(base.branch.repository)
575
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
576
# This test could be a little cheaper by replacing the packs
577
# attribute on the repository to allow a different pack distribution
578
# and max packs policy - so we are checking the policy is honoured
579
# in the test. But for now 11 commits is not a big deal in a single
582
tree.commit('commit %s' % x)
583
# there should be 9 packs:
584
index = GraphIndex(trans, 'pack-names', None)
585
self.assertEqual(9, len(list(index.iter_all_entries())))
586
# committing one more should coalesce to 1 of 10.
587
tree.commit('commit triggering pack')
588
index = GraphIndex(trans, 'pack-names', None)
589
self.assertEqual(1, len(list(index.iter_all_entries())))
590
# packing should not damage data
591
tree = tree.bzrdir.open_workingtree()
592
check_result = tree.branch.repository.check(
593
[tree.branch.last_revision()])
594
# We should have 50 (10x5) files in the obsolete_packs directory.
595
obsolete_files = list(trans.list_dir('obsolete_packs'))
596
self.assertFalse('foo' in obsolete_files)
597
self.assertFalse('bar' in obsolete_files)
598
self.assertEqual(50, len(obsolete_files))
599
# XXX: Todo check packs obsoleted correctly - old packs and indices
600
# in the obsolete_packs directory.
601
large_pack_name = list(index.iter_all_entries())[0][1][0]
602
# finally, committing again should not touch the large pack.
603
tree.commit('commit not triggering pack')
604
index = GraphIndex(trans, 'pack-names', None)
605
self.assertEqual(2, len(list(index.iter_all_entries())))
606
pack_names = [node[1][0] for node in index.iter_all_entries()]
607
self.assertTrue(large_pack_name in pack_names)
610
def load_tests(basic_tests, module, test_loader):
611
# these give the bzrdir canned format name, and the repository on-disk
614
dict(format_name='pack-0.92',
615
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
616
format_supports_external_lookups=False),
617
dict(format_name='pack-0.92-subtree',
618
format_string="Bazaar pack repository format 1 "
619
"with subtree support (needs bzr 0.92)\n",
620
format_supports_external_lookups=False),
621
dict(format_name='1.6',
622
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
623
format_supports_external_lookups=True),
624
dict(format_name='1.6.1-rich-root',
625
format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
627
format_supports_external_lookups=True),
628
dict(format_name='development',
629
format_string="Bazaar development format 1 "
630
"(needs bzr.dev from before 1.6)\n",
631
format_supports_external_lookups=True),
632
dict(format_name='development-subtree',
633
format_string="Bazaar development format 1 "
634
"with subtree support (needs bzr.dev from before 1.6)\n",
635
format_supports_external_lookups=True),
637
adapter = tests.TestScenarioApplier()
638
# name of the scenario is the format name
639
adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
640
suite = tests.TestSuite()
641
tests.adapt_tests(basic_tests, adapter, suite)