1
# Copyright (C) 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Tests for pack repositories.
19
These tests are repeated for all pack-based repository formats.
22
from cStringIO import StringIO
23
from stat import S_ISDIR
25
from bzrlib.btree_index import BTreeGraphIndex
26
from bzrlib.index import GraphIndex
34
revision as _mod_revision,
41
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1
42
from bzrlib.smart import (
46
from bzrlib.tests import (
48
TestCaseWithTransport,
52
from bzrlib.transport import (
57
from bzrlib.tests.per_repository import TestCaseWithRepository
60
class TestPackRepository(TestCaseWithTransport):
61
"""Tests to be repeated across all pack-based formats.
63
The following are populated from the test scenario:
65
:ivar format_name: Registered name fo the format to test.
66
:ivar format_string: On-disk format marker.
67
:ivar format_supports_external_lookups: Boolean.
71
return bzrdir.format_registry.make_bzrdir(self.format_name)
73
def test_attribute__fetch_order(self):
74
"""Packs do not need ordered data retrieval."""
75
format = self.get_format()
76
repo = self.make_repository('.', format=format)
77
self.assertEqual('unordered', repo._format._fetch_order)
79
def test_attribute__fetch_uses_deltas(self):
80
"""Packs reuse deltas."""
81
format = self.get_format()
82
repo = self.make_repository('.', format=format)
83
if isinstance(format.repository_format, RepositoryFormatCHK1):
84
# TODO: This is currently a workaround. CHK format repositories
85
# ignore the 'deltas' flag, but during conversions, we can't
86
# do unordered delta fetches. Remove this clause once we
87
# improve the inter-format fetching.
88
self.assertEqual(False, repo._format._fetch_uses_deltas)
90
self.assertEqual(True, repo._format._fetch_uses_deltas)
92
def test_disk_layout(self):
93
format = self.get_format()
94
repo = self.make_repository('.', format=format)
95
# in case of side effects of locking.
98
t = repo.bzrdir.get_repository_transport(None)
100
# XXX: no locks left when unlocked at the moment
101
# self.assertEqualDiff('', t.get('lock').read())
102
self.check_databases(t)
104
def check_format(self, t):
105
self.assertEqualDiff(
106
self.format_string, # from scenario
107
t.get('format').read())
109
def assertHasNoKndx(self, t, knit_name):
110
"""Assert that knit_name has no index on t."""
111
self.assertFalse(t.has(knit_name + '.kndx'))
113
def assertHasNoKnit(self, t, knit_name):
114
"""Assert that knit_name exists on t."""
116
self.assertFalse(t.has(knit_name + '.knit'))
118
def check_databases(self, t):
119
"""check knit content for a repository."""
120
# check conversion worked
121
self.assertHasNoKndx(t, 'inventory')
122
self.assertHasNoKnit(t, 'inventory')
123
self.assertHasNoKndx(t, 'revisions')
124
self.assertHasNoKnit(t, 'revisions')
125
self.assertHasNoKndx(t, 'signatures')
126
self.assertHasNoKnit(t, 'signatures')
127
self.assertFalse(t.has('knits'))
128
# revision-indexes file-container directory
130
list(self.index_class(t, 'pack-names', None).iter_all_entries()))
131
self.assertTrue(S_ISDIR(t.stat('packs').st_mode))
132
self.assertTrue(S_ISDIR(t.stat('upload').st_mode))
133
self.assertTrue(S_ISDIR(t.stat('indices').st_mode))
134
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode))
136
def test_shared_disk_layout(self):
137
format = self.get_format()
138
repo = self.make_repository('.', shared=True, format=format)
140
t = repo.bzrdir.get_repository_transport(None)
142
# XXX: no locks left when unlocked at the moment
143
# self.assertEqualDiff('', t.get('lock').read())
144
# We should have a 'shared-storage' marker file.
145
self.assertEqualDiff('', t.get('shared-storage').read())
146
self.check_databases(t)
148
def test_shared_no_tree_disk_layout(self):
149
format = self.get_format()
150
repo = self.make_repository('.', shared=True, format=format)
151
repo.set_make_working_trees(False)
153
t = repo.bzrdir.get_repository_transport(None)
155
# XXX: no locks left when unlocked at the moment
156
# self.assertEqualDiff('', t.get('lock').read())
157
# We should have a 'shared-storage' marker file.
158
self.assertEqualDiff('', t.get('shared-storage').read())
159
# We should have a marker for the no-working-trees flag.
160
self.assertEqualDiff('', t.get('no-working-trees').read())
161
# The marker should go when we toggle the setting.
162
repo.set_make_working_trees(True)
163
self.assertFalse(t.has('no-working-trees'))
164
self.check_databases(t)
166
def test_adding_revision_creates_pack_indices(self):
167
format = self.get_format()
168
tree = self.make_branch_and_tree('.', format=format)
169
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
171
list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
172
tree.commit('foobarbaz')
173
index = self.index_class(trans, 'pack-names', None)
174
index_nodes = list(index.iter_all_entries())
175
self.assertEqual(1, len(index_nodes))
176
node = index_nodes[0]
178
# the pack sizes should be listed in the index
180
sizes = [int(digits) for digits in pack_value.split(' ')]
181
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']):
182
stat = trans.stat('indices/%s%s' % (name, suffix))
183
self.assertEqual(size, stat.st_size)
185
def test_pulling_nothing_leads_to_no_new_names(self):
186
format = self.get_format()
187
tree1 = self.make_branch_and_tree('1', format=format)
188
tree2 = self.make_branch_and_tree('2', format=format)
189
tree1.branch.repository.fetch(tree2.branch.repository)
190
trans = tree1.branch.repository.bzrdir.get_repository_transport(None)
192
list(self.index_class(trans, 'pack-names', None).iter_all_entries()))
194
def test_commit_across_pack_shape_boundary_autopacks(self):
195
format = self.get_format()
196
tree = self.make_branch_and_tree('.', format=format)
197
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
198
# This test could be a little cheaper by replacing the packs
199
# attribute on the repository to allow a different pack distribution
200
# and max packs policy - so we are checking the policy is honoured
201
# in the test. But for now 11 commits is not a big deal in a single
204
tree.commit('commit %s' % x)
205
# there should be 9 packs:
206
index = self.index_class(trans, 'pack-names', None)
207
self.assertEqual(9, len(list(index.iter_all_entries())))
208
# insert some files in obsolete_packs which should be removed by pack.
209
trans.put_bytes('obsolete_packs/foo', '123')
210
trans.put_bytes('obsolete_packs/bar', '321')
211
# committing one more should coalesce to 1 of 10.
212
tree.commit('commit triggering pack')
213
index = self.index_class(trans, 'pack-names', None)
214
self.assertEqual(1, len(list(index.iter_all_entries())))
215
# packing should not damage data
216
tree = tree.bzrdir.open_workingtree()
217
check_result = tree.branch.repository.check(
218
[tree.branch.last_revision()])
219
nb_files = 5 # .pack, .rix, .iix, .tix, .six
220
if tree.branch.repository._format.supports_chks:
222
# We should have 10 x nb_files files in the obsolete_packs directory.
223
obsolete_files = list(trans.list_dir('obsolete_packs'))
224
self.assertFalse('foo' in obsolete_files)
225
self.assertFalse('bar' in obsolete_files)
226
self.assertEqual(10 * nb_files, len(obsolete_files))
227
# XXX: Todo check packs obsoleted correctly - old packs and indices
228
# in the obsolete_packs directory.
229
large_pack_name = list(index.iter_all_entries())[0][1][0]
230
# finally, committing again should not touch the large pack.
231
tree.commit('commit not triggering pack')
232
index = self.index_class(trans, 'pack-names', None)
233
self.assertEqual(2, len(list(index.iter_all_entries())))
234
pack_names = [node[1][0] for node in index.iter_all_entries()]
235
self.assertTrue(large_pack_name in pack_names)
237
def test_fail_obsolete_deletion(self):
238
# failing to delete obsolete packs is not fatal
239
format = self.get_format()
240
server = fakenfs.FakeNFSServer()
242
self.addCleanup(server.tearDown)
243
transport = get_transport(server.get_url())
244
bzrdir = self.get_format().initialize_on_transport(transport)
245
repo = bzrdir.create_repository()
246
repo_transport = bzrdir.get_repository_transport(None)
247
self.assertTrue(repo_transport.has('obsolete_packs'))
248
# these files are in use by another client and typically can't be deleted
249
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents')
250
repo._pack_collection._clear_obsolete_packs()
251
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah'))
253
def test_pack_after_two_commits_packs_everything(self):
254
format = self.get_format()
255
tree = self.make_branch_and_tree('.', format=format)
256
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
258
tree.commit('more work')
259
tree.branch.repository.pack()
260
# there should be 1 pack:
261
index = self.index_class(trans, 'pack-names', None)
262
self.assertEqual(1, len(list(index.iter_all_entries())))
263
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
265
def test_pack_layout(self):
266
# Test that the ordering of revisions in pack repositories is
268
format = self.get_format()
269
tree = self.make_branch_and_tree('.', format=format)
270
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
271
tree.commit('start', rev_id='1')
272
tree.commit('more work', rev_id='2')
273
tree.branch.repository.pack()
275
self.addCleanup(tree.unlock)
276
pack = tree.branch.repository._pack_collection.get_pack_by_name(
277
tree.branch.repository._pack_collection.names()[0])
278
# revision access tends to be tip->ancestor, so ordering that way on
279
# disk is a good idea.
280
for _1, key, val, refs in pack.revision_index.iter_all_entries():
281
if type(format.repository_format) is RepositoryFormatCHK1:
282
# group_start, group_len, internal_start, internal_len
283
pos = map(int, val.split())
285
# eol_flag, start, len
286
pos = int(val[1:].split()[0])
291
self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
294
def test_pack_repositories_support_multiple_write_locks(self):
295
format = self.get_format()
296
self.make_repository('.', shared=True, format=format)
297
r1 = repository.Repository.open('.')
298
r2 = repository.Repository.open('.')
300
self.addCleanup(r1.unlock)
304
def _add_text(self, repo, fileid):
305
"""Add a text to the repository within a write group."""
306
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [],
307
['smaplerev+'+fileid])
309
def test_concurrent_writers_merge_new_packs(self):
310
format = self.get_format()
311
self.make_repository('.', shared=True, format=format)
312
r1 = repository.Repository.open('.')
313
r2 = repository.Repository.open('.')
316
# access enough data to load the names list
317
list(r1.all_revision_ids())
320
# access enough data to load the names list
321
list(r2.all_revision_ids())
322
r1.start_write_group()
324
r2.start_write_group()
326
self._add_text(r1, 'fileidr1')
327
self._add_text(r2, 'fileidr2')
329
r2.abort_write_group()
332
r1.abort_write_group()
334
# both r1 and r2 have open write groups with data in them
335
# created while the other's write group was open.
336
# Commit both which requires a merge to the pack-names.
338
r1.commit_write_group()
340
r1.abort_write_group()
341
r2.abort_write_group()
343
r2.commit_write_group()
344
# tell r1 to reload from disk
345
r1._pack_collection.reset()
346
# Now both repositories should know about both names
347
r1._pack_collection.ensure_loaded()
348
r2._pack_collection.ensure_loaded()
349
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
350
self.assertEqual(2, len(r1._pack_collection.names()))
356
def test_concurrent_writer_second_preserves_dropping_a_pack(self):
357
format = self.get_format()
358
self.make_repository('.', shared=True, format=format)
359
r1 = repository.Repository.open('.')
360
r2 = repository.Repository.open('.')
364
r1.start_write_group()
366
self._add_text(r1, 'fileidr1')
368
r1.abort_write_group()
371
r1.commit_write_group()
372
r1._pack_collection.ensure_loaded()
373
name_to_drop = r1._pack_collection.all_packs()[0].name
378
# access enough data to load the names list
379
list(r1.all_revision_ids())
382
# access enough data to load the names list
383
list(r2.all_revision_ids())
384
r1._pack_collection.ensure_loaded()
386
r2.start_write_group()
388
# in r1, drop the pack
389
r1._pack_collection._remove_pack_from_memory(
390
r1._pack_collection.get_pack_by_name(name_to_drop))
392
self._add_text(r2, 'fileidr2')
394
r2.abort_write_group()
397
r1._pack_collection.reset()
399
# r1 has a changed names list, and r2 an open write groups with
401
# save r1, and then commit the r2 write group, which requires a
402
# merge to the pack-names, which should not reinstate
405
r1._pack_collection._save_pack_names()
406
r1._pack_collection.reset()
408
r2.abort_write_group()
411
r2.commit_write_group()
413
r2.abort_write_group()
415
# Now both repositories should now about just one name.
416
r1._pack_collection.ensure_loaded()
417
r2._pack_collection.ensure_loaded()
418
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names())
419
self.assertEqual(1, len(r1._pack_collection.names()))
420
self.assertFalse(name_to_drop in r1._pack_collection.names())
426
def test_concurrent_pack_triggers_reload(self):
427
# create 2 packs, which we will then collapse
428
tree = self.make_branch_and_tree('tree')
431
rev1 = tree.commit('one')
432
rev2 = tree.commit('two')
433
r2 = repository.Repository.open('tree')
436
# Now r2 has read the pack-names file, but will need to reload
437
# it after r1 has repacked
438
tree.branch.repository.pack()
439
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2]))
445
def test_concurrent_pack_during_get_record_reloads(self):
446
tree = self.make_branch_and_tree('tree')
449
rev1 = tree.commit('one')
450
rev2 = tree.commit('two')
451
keys = [(rev1,), (rev2,)]
452
r2 = repository.Repository.open('tree')
455
# At this point, we will start grabbing a record stream, and
456
# trigger a repack mid-way
459
record_stream = r2.revisions.get_record_stream(keys,
461
for record in record_stream:
462
result[record.key] = record
464
tree.branch.repository.pack()
466
# The first record will be found in the original location, but
467
# after the pack, we have to reload to find the next record
468
self.assertEqual(sorted(keys), sorted(result.keys()))
474
def test_lock_write_does_not_physically_lock(self):
475
repo = self.make_repository('.', format=self.get_format())
477
self.addCleanup(repo.unlock)
478
self.assertFalse(repo.get_physical_lock_status())
480
def prepare_for_break_lock(self):
481
# Setup the global ui factory state so that a break-lock method call
482
# will find usable input in the input stream.
483
old_factory = ui.ui_factory
484
def restoreFactory():
485
ui.ui_factory = old_factory
486
self.addCleanup(restoreFactory)
487
ui.ui_factory = ui.SilentUIFactory()
488
ui.ui_factory.stdin = StringIO("y\n")
490
def test_break_lock_breaks_physical_lock(self):
491
repo = self.make_repository('.', format=self.get_format())
492
repo._pack_collection.lock_names()
493
repo.control_files.leave_in_place()
495
repo2 = repository.Repository.open('.')
496
self.assertTrue(repo.get_physical_lock_status())
497
self.prepare_for_break_lock()
499
self.assertFalse(repo.get_physical_lock_status())
501
def test_broken_physical_locks_error_on__unlock_names_lock(self):
502
repo = self.make_repository('.', format=self.get_format())
503
repo._pack_collection.lock_names()
504
self.assertTrue(repo.get_physical_lock_status())
505
repo2 = repository.Repository.open('.')
506
self.prepare_for_break_lock()
508
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names)
510
def test_fetch_without_find_ghosts_ignores_ghosts(self):
511
# we want two repositories at this point:
512
# one with a revision that is a ghost in the other
514
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
515
# 'references' is present in both repositories, and 'tip' is present
517
# has_ghost missing_ghost
518
#------------------------------
520
# 'references' 'references'
522
# In this test we fetch 'tip' which should not fetch 'ghost'
523
has_ghost = self.make_repository('has_ghost', format=self.get_format())
524
missing_ghost = self.make_repository('missing_ghost',
525
format=self.get_format())
527
def add_commit(repo, revision_id, parent_ids):
529
repo.start_write_group()
530
inv = inventory.Inventory(revision_id=revision_id)
531
inv.root.revision = revision_id
532
root_id = inv.root.file_id
533
sha1 = repo.add_inventory(revision_id, inv, [])
534
repo.texts.add_lines((root_id, revision_id), [], [])
535
rev = _mod_revision.Revision(timestamp=0,
537
committer="Foo Bar <foo@example.com>",
540
revision_id=revision_id)
541
rev.parent_ids = parent_ids
542
repo.add_revision(revision_id, rev)
543
repo.commit_write_group()
545
add_commit(has_ghost, 'ghost', [])
546
add_commit(has_ghost, 'references', ['ghost'])
547
add_commit(missing_ghost, 'references', ['ghost'])
548
add_commit(has_ghost, 'tip', ['references'])
549
missing_ghost.fetch(has_ghost, 'tip')
550
# missing ghost now has tip and not ghost.
551
rev = missing_ghost.get_revision('tip')
552
inv = missing_ghost.get_inventory('tip')
553
self.assertRaises(errors.NoSuchRevision,
554
missing_ghost.get_revision, 'ghost')
555
self.assertRaises(errors.NoSuchRevision,
556
missing_ghost.get_inventory, 'ghost')
558
def make_write_ready_repo(self):
559
repo = self.make_repository('.', format=self.get_format())
561
repo.start_write_group()
564
def test_missing_inventories_compression_parent_prevents_commit(self):
565
repo = self.make_write_ready_repo()
567
if not getattr(repo.inventories._index, '_missing_compression_parents',
569
raise TestSkipped("No missing compression parents")
570
repo.inventories._index._missing_compression_parents.add(key)
571
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
572
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
573
repo.abort_write_group()
576
def test_missing_revisions_compression_parent_prevents_commit(self):
577
repo = self.make_write_ready_repo()
579
if not getattr(repo.inventories._index, '_missing_compression_parents',
581
raise TestSkipped("No missing compression parents")
582
repo.revisions._index._missing_compression_parents.add(key)
583
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
584
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
585
repo.abort_write_group()
588
def test_missing_signatures_compression_parent_prevents_commit(self):
589
repo = self.make_write_ready_repo()
591
if not getattr(repo.inventories._index, '_missing_compression_parents',
593
raise TestSkipped("No missing compression parents")
594
repo.signatures._index._missing_compression_parents.add(key)
595
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
596
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
597
repo.abort_write_group()
600
def test_missing_text_compression_parent_prevents_commit(self):
601
repo = self.make_write_ready_repo()
602
key = ('some', 'junk')
603
if not getattr(repo.inventories._index, '_missing_compression_parents',
605
raise TestSkipped("No missing compression parents")
606
repo.texts._index._missing_compression_parents.add(key)
607
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
608
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
609
repo.abort_write_group()
612
def test_supports_external_lookups(self):
613
repo = self.make_repository('.', format=self.get_format())
614
self.assertEqual(self.format_supports_external_lookups,
615
repo._format.supports_external_lookups)
617
def test_abort_write_group_does_not_raise_when_suppressed(self):
618
"""Similar to per_repository.test_write_group's test of the same name.
620
Also requires that the exception is logged.
622
self.vfs_transport_factory = memory.MemoryServer
623
repo = self.make_repository('repo')
624
token = repo.lock_write()
625
self.addCleanup(repo.unlock)
626
repo.start_write_group()
627
# Damage the repository on the filesystem
628
self.get_transport('').rename('repo', 'foo')
629
# abort_write_group will not raise an error
630
self.assertEqual(None, repo.abort_write_group(suppress_errors=True))
631
# But it does log an error
632
log_file = self._get_log(keep_log_file=True)
633
self.assertContainsRe(log_file, 'abort_write_group failed')
634
self.assertContainsRe(log_file, r'INFO bzr: ERROR \(ignored\):')
635
if token is not None:
636
repo.leave_lock_in_place()
638
def test_abort_write_group_does_raise_when_not_suppressed(self):
639
self.vfs_transport_factory = memory.MemoryServer
640
repo = self.make_repository('repo')
641
token = repo.lock_write()
642
self.addCleanup(repo.unlock)
643
repo.start_write_group()
644
# Damage the repository on the filesystem
645
self.get_transport('').rename('repo', 'foo')
646
# abort_write_group will not raise an error
647
self.assertRaises(Exception, repo.abort_write_group)
648
if token is not None:
649
repo.leave_lock_in_place()
651
def test_suspend_write_group(self):
652
self.vfs_transport_factory = memory.MemoryServer
653
repo = self.make_repository('repo')
654
token = repo.lock_write()
655
self.addCleanup(repo.unlock)
656
repo.start_write_group()
657
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
658
wg_tokens = repo.suspend_write_group()
659
expected_pack_name = wg_tokens[0] + '.pack'
660
upload_transport = repo._pack_collection._upload_transport
661
limbo_files = upload_transport.list_dir('')
662
self.assertTrue(expected_pack_name in limbo_files, limbo_files)
663
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
664
self.assertEqual(wg_tokens[0], md5.hexdigest())
666
def test_resume_write_group_then_abort(self):
667
# Create a repo, start a write group, insert some data, suspend.
668
self.vfs_transport_factory = memory.MemoryServer
669
repo = self.make_repository('repo')
670
token = repo.lock_write()
671
self.addCleanup(repo.unlock)
672
repo.start_write_group()
673
text_key = ('file-id', 'revid')
674
repo.texts.add_lines(text_key, (), ['lines'])
675
wg_tokens = repo.suspend_write_group()
676
# Get a fresh repository object for the repo on the filesystem.
677
same_repo = repo.bzrdir.open_repository()
679
same_repo.lock_write()
680
self.addCleanup(same_repo.unlock)
681
same_repo.resume_write_group(wg_tokens)
682
same_repo.abort_write_group()
684
[], same_repo._pack_collection._upload_transport.list_dir(''))
686
[], same_repo._pack_collection._pack_transport.list_dir(''))
688
def test_resume_malformed_token(self):
689
self.vfs_transport_factory = memory.MemoryServer
690
# Make a repository with a suspended write group
691
repo = self.make_repository('repo')
692
token = repo.lock_write()
693
self.addCleanup(repo.unlock)
694
repo.start_write_group()
695
text_key = ('file-id', 'revid')
696
repo.texts.add_lines(text_key, (), ['lines'])
697
wg_tokens = repo.suspend_write_group()
698
# Make a new repository
699
new_repo = self.make_repository('new_repo')
700
token = new_repo.lock_write()
701
self.addCleanup(new_repo.unlock)
703
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
705
errors.UnresumableWriteGroup,
706
new_repo.resume_write_group, [hacked_wg_token])
709
class TestPackRepositoryStacking(TestCaseWithTransport):
711
"""Tests for stacking pack repositories"""
714
if not self.format_supports_external_lookups:
715
raise TestNotApplicable("%r doesn't support stacking"
716
% (self.format_name,))
717
super(TestPackRepositoryStacking, self).setUp()
719
def get_format(self):
720
return bzrdir.format_registry.make_bzrdir(self.format_name)
722
def test_stack_checks_rich_root_compatibility(self):
723
# early versions of the packing code relied on pack internals to
724
# stack, but the current version should be able to stack on any
727
# TODO: Possibly this should be run per-repository-format and raise
728
# TestNotApplicable on formats that don't support stacking. -- mbp
730
repo = self.make_repository('repo', format=self.get_format())
731
if repo.supports_rich_root():
732
# can only stack on repositories that have compatible internal
734
if getattr(repo._format, 'supports_tree_reference', False):
735
if repo._format.supports_chks:
736
matching_format_name = 'development6-rich-root'
738
matching_format_name = 'pack-0.92-subtree'
740
matching_format_name = 'rich-root-pack'
741
mismatching_format_name = 'pack-0.92'
743
# We don't have a non-rich-root CHK format.
744
if repo._format.supports_chks:
745
raise AssertionError("no non-rich-root CHK formats known")
747
matching_format_name = 'pack-0.92'
748
mismatching_format_name = 'pack-0.92-subtree'
749
base = self.make_repository('base', format=matching_format_name)
750
repo.add_fallback_repository(base)
751
# you can't stack on something with incompatible data
752
bad_repo = self.make_repository('mismatch',
753
format=mismatching_format_name)
754
e = self.assertRaises(errors.IncompatibleRepositories,
755
repo.add_fallback_repository, bad_repo)
756
self.assertContainsRe(str(e),
757
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
758
r'.*Repository.*/repo/.*\n'
759
r'different rich-root support')
761
def test_stack_checks_serializers_compatibility(self):
762
repo = self.make_repository('repo', format=self.get_format())
763
if getattr(repo._format, 'supports_tree_reference', False):
764
# can only stack on repositories that have compatible internal
766
if repo._format.supports_chks:
767
# No CHK subtree formats in bzr.dev, so this doesn't execute.
768
matching_format_name = 'development6-subtree'
770
matching_format_name = 'pack-0.92-subtree'
771
mismatching_format_name = 'rich-root-pack'
773
if repo.supports_rich_root():
774
matching_format_name = 'rich-root-pack'
775
mismatching_format_name = 'pack-0.92-subtree'
777
raise TestNotApplicable('No formats use non-v5 serializer'
778
' without having rich-root also set')
779
base = self.make_repository('base', format=matching_format_name)
780
repo.add_fallback_repository(base)
781
# you can't stack on something with incompatible data
782
bad_repo = self.make_repository('mismatch',
783
format=mismatching_format_name)
784
e = self.assertRaises(errors.IncompatibleRepositories,
785
repo.add_fallback_repository, bad_repo)
786
self.assertContainsRe(str(e),
787
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n'
788
r'.*Repository.*/repo/.*\n'
789
r'different serializers')
791
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self):
792
base = self.make_branch_and_tree('base', format=self.get_format())
794
referencing = self.make_branch_and_tree('repo', format=self.get_format())
795
referencing.branch.repository.add_fallback_repository(base.branch.repository)
796
referencing.commit('bar')
797
new_instance = referencing.bzrdir.open_repository()
798
new_instance.lock_read()
799
self.addCleanup(new_instance.unlock)
800
new_instance._pack_collection.ensure_loaded()
801
self.assertEqual(1, len(new_instance._pack_collection.all_packs()))
803
def test_autopack_only_considers_main_repo_packs(self):
804
format = self.get_format()
805
base = self.make_branch_and_tree('base', format=format)
807
tree = self.make_branch_and_tree('repo', format=format)
808
tree.branch.repository.add_fallback_repository(base.branch.repository)
809
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
810
# This test could be a little cheaper by replacing the packs
811
# attribute on the repository to allow a different pack distribution
812
# and max packs policy - so we are checking the policy is honoured
813
# in the test. But for now 11 commits is not a big deal in a single
816
tree.commit('commit %s' % x)
817
# there should be 9 packs:
818
index = self.index_class(trans, 'pack-names', None)
819
self.assertEqual(9, len(list(index.iter_all_entries())))
820
# committing one more should coalesce to 1 of 10.
821
tree.commit('commit triggering pack')
822
index = self.index_class(trans, 'pack-names', None)
823
self.assertEqual(1, len(list(index.iter_all_entries())))
824
# packing should not damage data
825
tree = tree.bzrdir.open_workingtree()
826
check_result = tree.branch.repository.check(
827
[tree.branch.last_revision()])
828
nb_files = 5 # .pack, .rix, .iix, .tix, .six
829
if tree.branch.repository._format.supports_chks:
831
# We should have 10 x nb_files files in the obsolete_packs directory.
832
obsolete_files = list(trans.list_dir('obsolete_packs'))
833
self.assertFalse('foo' in obsolete_files)
834
self.assertFalse('bar' in obsolete_files)
835
self.assertEqual(10 * nb_files, len(obsolete_files))
836
# XXX: Todo check packs obsoleted correctly - old packs and indices
837
# in the obsolete_packs directory.
838
large_pack_name = list(index.iter_all_entries())[0][1][0]
839
# finally, committing again should not touch the large pack.
840
tree.commit('commit not triggering pack')
841
index = self.index_class(trans, 'pack-names', None)
842
self.assertEqual(2, len(list(index.iter_all_entries())))
843
pack_names = [node[1][0] for node in index.iter_all_entries()]
844
self.assertTrue(large_pack_name in pack_names)
847
class TestSmartServerAutopack(TestCaseWithTransport):
850
super(TestSmartServerAutopack, self).setUp()
851
# Create a smart server that publishes whatever the backing VFS server
853
self.smart_server = server.SmartTCPServer_for_testing()
854
self.smart_server.setUp(self.get_server())
855
self.addCleanup(self.smart_server.tearDown)
856
# Log all HPSS calls into self.hpss_calls.
857
client._SmartClient.hooks.install_named_hook(
858
'call', self.capture_hpss_call, None)
861
def capture_hpss_call(self, params):
862
self.hpss_calls.append(params.method)
864
def get_format(self):
865
return bzrdir.format_registry.make_bzrdir(self.format_name)
867
def test_autopack_or_streaming_rpc_is_used_when_using_hpss(self):
868
# Make local and remote repos
869
format = self.get_format()
870
tree = self.make_branch_and_tree('local', format=format)
871
self.make_branch_and_tree('remote', format=format)
872
remote_branch_url = self.smart_server.get_url() + 'remote'
873
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch()
874
# Make 9 local revisions, and push them one at a time to the remote
875
# repo to produce 9 pack files.
877
tree.commit('commit %s' % x)
878
tree.branch.push(remote_branch)
879
# Make one more push to trigger an autopack
881
tree.commit('commit triggering pack')
882
tree.branch.push(remote_branch)
883
autopack_calls = len([call for call in self.hpss_calls if call ==
884
'PackRepository.autopack'])
885
streaming_calls = len([call for call in self.hpss_calls if call ==
886
'Repository.insert_stream'])
888
# Non streaming server
889
self.assertEqual(1, autopack_calls)
890
self.assertEqual(0, streaming_calls)
892
# Streaming was used, which autopacks on the remote end.
893
self.assertEqual(0, autopack_calls)
894
# NB: The 2 calls are because of the sanity check that the server
895
# supports the verb (see remote.py:RemoteSink.insert_stream for
897
self.assertEqual(2, streaming_calls)
900
def load_tests(basic_tests, module, loader):
901
# these give the bzrdir canned format name, and the repository on-disk
904
dict(format_name='pack-0.92',
905
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n",
906
format_supports_external_lookups=False,
907
index_class=GraphIndex),
908
dict(format_name='pack-0.92-subtree',
909
format_string="Bazaar pack repository format 1 "
910
"with subtree support (needs bzr 0.92)\n",
911
format_supports_external_lookups=False,
912
index_class=GraphIndex),
913
dict(format_name='1.6',
914
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n",
915
format_supports_external_lookups=True,
916
index_class=GraphIndex),
917
dict(format_name='1.6.1-rich-root',
918
format_string="Bazaar RepositoryFormatKnitPack5RichRoot "
920
format_supports_external_lookups=True,
921
index_class=GraphIndex),
922
dict(format_name='1.9',
923
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n",
924
format_supports_external_lookups=True,
925
index_class=BTreeGraphIndex),
926
dict(format_name='1.9-rich-root',
927
format_string="Bazaar RepositoryFormatKnitPack6RichRoot "
929
format_supports_external_lookups=True,
930
index_class=BTreeGraphIndex),
931
dict(format_name='development6-rich-root',
932
format_string='Bazaar development format - group compression '
933
'and chk inventory (needs bzr.dev from 1.14)\n',
934
format_supports_external_lookups=False,
935
index_class=BTreeGraphIndex),
937
# name of the scenario is the format name
938
scenarios = [(s['format_name'], s) for s in scenarios_params]
939
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())