72
78
"""Packs do not need ordered data retrieval."""
73
79
format = self.get_format()
74
80
repo = self.make_repository('.', format=format)
75
self.assertEqual('unordered', repo._fetch_order)
81
self.assertEqual('unordered', repo._format._fetch_order)
77
83
def test_attribute__fetch_uses_deltas(self):
78
84
"""Packs reuse deltas."""
79
85
format = self.get_format()
80
86
repo = self.make_repository('.', format=format)
81
self.assertEqual(True, repo._fetch_uses_deltas)
87
if isinstance(format.repository_format, RepositoryFormat2a):
88
# TODO: This is currently a workaround. CHK format repositories
89
# ignore the 'deltas' flag, but during conversions, we can't
90
# do unordered delta fetches. Remove this clause once we
91
# improve the inter-format fetching.
92
self.assertEqual(False, repo._format._fetch_uses_deltas)
94
self.assertEqual(True, repo._format._fetch_uses_deltas)
83
96
def test_disk_layout(self):
84
97
format = self.get_format()
222
238
pack_names = [node[1][0] for node in index.iter_all_entries()]
223
239
self.assertTrue(large_pack_name in pack_names)
241
def test_commit_write_group_returns_new_pack_names(self):
242
# This test doesn't need real disk.
243
self.vfs_transport_factory = tests.MemoryServer
244
format = self.get_format()
245
repo = self.make_repository('foo', format=format)
248
# All current pack repository styles autopack at 10 revisions; and
249
# autopack as well as regular commit write group needs to return
250
# the new pack name. Looping is a little ugly, but we don't have a
251
# clean way to test both the autopack logic and the normal code
252
# path without doing this loop.
253
for pos in range(10):
255
repo.start_write_group()
257
inv = inventory.Inventory(revision_id=revid)
258
inv.root.revision = revid
259
repo.texts.add_lines((inv.root.file_id, revid), [], [])
260
rev = _mod_revision.Revision(timestamp=0, timezone=None,
261
committer="Foo Bar <foo@example.com>", message="Message",
264
repo.add_revision(revid, rev, inv=inv)
266
repo.abort_write_group()
269
old_names = repo._pack_collection._names.keys()
270
result = repo.commit_write_group()
271
cur_names = repo._pack_collection._names.keys()
272
new_names = list(set(cur_names) - set(old_names))
273
self.assertEqual(new_names, result)
225
277
def test_fail_obsolete_deletion(self):
226
278
# failing to delete obsolete packs is not fatal
227
279
format = self.get_format()
228
280
server = fakenfs.FakeNFSServer()
230
self.addCleanup(server.tearDown)
281
self.start_server(server)
231
282
transport = get_transport(server.get_url())
232
283
bzrdir = self.get_format().initialize_on_transport(transport)
233
284
repo = bzrdir.create_repository()
250
301
self.assertEqual(1, len(list(index.iter_all_entries())))
251
302
self.assertEqual(2, len(tree.branch.repository.all_revision_ids()))
304
def test_pack_preserves_all_inventories(self):
305
# This is related to bug:
306
# https://bugs.launchpad.net/bzr/+bug/412198
307
# Stacked repositories need to keep the inventory for parents, even
308
# after a pack operation. However, it is harder to test that, then just
309
# test that all inventory texts are preserved.
310
format = self.get_format()
311
builder = self.make_branch_builder('source', format=format)
312
builder.start_series()
313
builder.build_snapshot('A-id', None, [
314
('add', ('', 'root-id', 'directory', None))])
315
builder.build_snapshot('B-id', None, [
316
('add', ('file', 'file-id', 'file', 'B content\n'))])
317
builder.build_snapshot('C-id', None, [
318
('modify', ('file-id', 'C content\n'))])
319
builder.finish_series()
320
b = builder.get_branch()
322
self.addCleanup(b.unlock)
323
repo = self.make_repository('repo', shared=True, format=format)
325
self.addCleanup(repo.unlock)
326
repo.fetch(b.repository, revision_id='B-id')
327
inv = b.repository.iter_inventories(['C-id']).next()
328
repo.start_write_group()
329
repo.add_inventory('C-id', inv, ['B-id'])
330
repo.commit_write_group()
331
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
332
sorted(repo.inventories.keys()))
334
self.assertEqual([('A-id',), ('B-id',), ('C-id',)],
335
sorted(repo.inventories.keys()))
336
# Content should be preserved as well
337
self.assertEqual(inv, repo.iter_inventories(['C-id']).next())
253
339
def test_pack_layout(self):
340
# Test that the ordering of revisions in pack repositories is
254
342
format = self.get_format()
255
343
tree = self.make_branch_and_tree('.', format=format)
256
344
trans = tree.branch.repository.bzrdir.get_repository_transport(None)
261
349
self.addCleanup(tree.unlock)
262
350
pack = tree.branch.repository._pack_collection.get_pack_by_name(
263
351
tree.branch.repository._pack_collection.names()[0])
264
# revision access tends to be tip->ancestor, so ordering that way on
352
# revision access tends to be tip->ancestor, so ordering that way on
265
353
# disk is a good idea.
266
354
for _1, key, val, refs in pack.revision_index.iter_all_entries():
355
if type(format.repository_format) is RepositoryFormat2a:
356
# group_start, group_len, internal_start, internal_len
357
pos = map(int, val.split())
359
# eol_flag, start, len
360
pos = int(val[1:].split()[0])
267
361
if key == ('1',):
268
pos_1 = int(val[1:].split()[0])
270
pos_2 = int(val[1:].split()[0])
271
self.assertTrue(pos_2 < pos_1)
365
self.assertTrue(pos_2 < pos_1, 'rev 1 came before rev 2 %s > %s'
273
368
def test_pack_repositories_support_multiple_write_locks(self):
274
369
format = self.get_format()
548
def test_concurrent_pack_during_autopack(self):
549
tree = self.make_branch_and_tree('tree')
553
tree.commit('rev %d' % (i,))
554
r2 = repository.Repository.open('tree')
557
# Monkey patch so that pack occurs while the other repo is
558
# autopacking. This is slightly bad, but all current pack
559
# repository implementations have a _pack_collection, and we
560
# test that it gets triggered. So if a future format changes
561
# things, the test will fail rather than succeed accidentally.
563
r1 = tree.branch.repository
564
orig = r1._pack_collection.pack_distribution
565
def trigger_during_auto(*args, **kwargs):
566
ret = orig(*args, **kwargs)
567
if not autopack_count[0]:
569
autopack_count[0] += 1
571
r1._pack_collection.pack_distribution = trigger_during_auto
572
tree.commit('autopack-rev')
573
# This triggers 2 autopacks. The first one causes r2.pack() to
574
# fire, but r2 doesn't see the new pack file yet. The
575
# autopack restarts and sees there are 2 files and there
576
# should be only 1 for 10 commits. So it goes ahead and
577
# finishes autopacking.
578
self.assertEqual([2], autopack_count)
452
584
def test_lock_write_does_not_physically_lock(self):
453
585
repo = self.make_repository('.', format=self.get_format())
454
586
repo.lock_write()
533
664
self.assertRaises(errors.NoSuchRevision,
534
665
missing_ghost.get_inventory, 'ghost')
667
def make_write_ready_repo(self):
668
format = self.get_format()
669
if isinstance(format.repository_format, RepositoryFormat2a):
670
raise TestNotApplicable("No missing compression parents")
671
repo = self.make_repository('.', format=format)
673
self.addCleanup(repo.unlock)
674
repo.start_write_group()
675
self.addCleanup(repo.abort_write_group)
678
def test_missing_inventories_compression_parent_prevents_commit(self):
679
repo = self.make_write_ready_repo()
681
repo.inventories._index._missing_compression_parents.add(key)
682
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
683
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
685
def test_missing_revisions_compression_parent_prevents_commit(self):
686
repo = self.make_write_ready_repo()
688
repo.revisions._index._missing_compression_parents.add(key)
689
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
690
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
692
def test_missing_signatures_compression_parent_prevents_commit(self):
693
repo = self.make_write_ready_repo()
695
repo.signatures._index._missing_compression_parents.add(key)
696
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
697
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
699
def test_missing_text_compression_parent_prevents_commit(self):
700
repo = self.make_write_ready_repo()
701
key = ('some', 'junk')
702
repo.texts._index._missing_compression_parents.add(key)
703
self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
704
e = self.assertRaises(errors.BzrCheckError, repo.commit_write_group)
536
706
def test_supports_external_lookups(self):
537
707
repo = self.make_repository('.', format=self.get_format())
538
708
self.assertEqual(self.format_supports_external_lookups,
571
741
self.assertRaises(Exception, repo.abort_write_group)
572
742
if token is not None:
573
743
repo.leave_lock_in_place()
745
def test_suspend_write_group(self):
746
self.vfs_transport_factory = memory.MemoryServer
747
repo = self.make_repository('repo', format=self.get_format())
748
token = repo.lock_write()
749
self.addCleanup(repo.unlock)
750
repo.start_write_group()
751
repo.texts.add_lines(('file-id', 'revid'), (), ['lines'])
752
wg_tokens = repo.suspend_write_group()
753
expected_pack_name = wg_tokens[0] + '.pack'
754
expected_names = [wg_tokens[0] + ext for ext in
755
('.rix', '.iix', '.tix', '.six')]
756
if repo.chk_bytes is not None:
757
expected_names.append(wg_tokens[0] + '.cix')
758
expected_names.append(expected_pack_name)
759
upload_transport = repo._pack_collection._upload_transport
760
limbo_files = upload_transport.list_dir('')
761
self.assertEqual(sorted(expected_names), sorted(limbo_files))
762
md5 = osutils.md5(upload_transport.get_bytes(expected_pack_name))
763
self.assertEqual(wg_tokens[0], md5.hexdigest())
765
def test_resume_chk_bytes(self):
766
self.vfs_transport_factory = memory.MemoryServer
767
repo = self.make_repository('repo', format=self.get_format())
768
if repo.chk_bytes is None:
769
raise TestNotApplicable('no chk_bytes for this repository')
770
token = repo.lock_write()
771
self.addCleanup(repo.unlock)
772
repo.start_write_group()
773
text = 'a bit of text\n'
774
key = ('sha1:' + osutils.sha_string(text),)
775
repo.chk_bytes.add_lines(key, (), [text])
776
wg_tokens = repo.suspend_write_group()
777
same_repo = repo.bzrdir.open_repository()
778
same_repo.lock_write()
779
self.addCleanup(same_repo.unlock)
780
same_repo.resume_write_group(wg_tokens)
781
self.assertEqual([key], list(same_repo.chk_bytes.keys()))
783
text, same_repo.chk_bytes.get_record_stream([key],
784
'unordered', True).next().get_bytes_as('fulltext'))
785
same_repo.abort_write_group()
786
self.assertEqual([], list(same_repo.chk_bytes.keys()))
788
def test_resume_write_group_then_abort(self):
789
# Create a repo, start a write group, insert some data, suspend.
790
self.vfs_transport_factory = memory.MemoryServer
791
repo = self.make_repository('repo', format=self.get_format())
792
token = repo.lock_write()
793
self.addCleanup(repo.unlock)
794
repo.start_write_group()
795
text_key = ('file-id', 'revid')
796
repo.texts.add_lines(text_key, (), ['lines'])
797
wg_tokens = repo.suspend_write_group()
798
# Get a fresh repository object for the repo on the filesystem.
799
same_repo = repo.bzrdir.open_repository()
801
same_repo.lock_write()
802
self.addCleanup(same_repo.unlock)
803
same_repo.resume_write_group(wg_tokens)
804
same_repo.abort_write_group()
806
[], same_repo._pack_collection._upload_transport.list_dir(''))
808
[], same_repo._pack_collection._pack_transport.list_dir(''))
810
def test_commit_resumed_write_group(self):
811
self.vfs_transport_factory = memory.MemoryServer
812
repo = self.make_repository('repo', format=self.get_format())
813
token = repo.lock_write()
814
self.addCleanup(repo.unlock)
815
repo.start_write_group()
816
text_key = ('file-id', 'revid')
817
repo.texts.add_lines(text_key, (), ['lines'])
818
wg_tokens = repo.suspend_write_group()
819
# Get a fresh repository object for the repo on the filesystem.
820
same_repo = repo.bzrdir.open_repository()
822
same_repo.lock_write()
823
self.addCleanup(same_repo.unlock)
824
same_repo.resume_write_group(wg_tokens)
825
same_repo.commit_write_group()
826
expected_pack_name = wg_tokens[0] + '.pack'
827
expected_names = [wg_tokens[0] + ext for ext in
828
('.rix', '.iix', '.tix', '.six')]
829
if repo.chk_bytes is not None:
830
expected_names.append(wg_tokens[0] + '.cix')
832
[], same_repo._pack_collection._upload_transport.list_dir(''))
833
index_names = repo._pack_collection._index_transport.list_dir('')
834
self.assertEqual(sorted(expected_names), sorted(index_names))
835
pack_names = repo._pack_collection._pack_transport.list_dir('')
836
self.assertEqual([expected_pack_name], pack_names)
838
def test_resume_malformed_token(self):
839
self.vfs_transport_factory = memory.MemoryServer
840
# Make a repository with a suspended write group
841
repo = self.make_repository('repo', format=self.get_format())
842
token = repo.lock_write()
843
self.addCleanup(repo.unlock)
844
repo.start_write_group()
845
text_key = ('file-id', 'revid')
846
repo.texts.add_lines(text_key, (), ['lines'])
847
wg_tokens = repo.suspend_write_group()
848
# Make a new repository
849
new_repo = self.make_repository('new_repo', format=self.get_format())
850
token = new_repo.lock_write()
851
self.addCleanup(new_repo.unlock)
853
'../../../../repo/.bzr/repository/upload/' + wg_tokens[0])
855
errors.UnresumableWriteGroup,
856
new_repo.resume_write_group, [hacked_wg_token])
576
859
class TestPackRepositoryStacking(TestCaseWithTransport):
667
962
# and max packs policy - so we are checking the policy is honoured
668
963
# in the test. But for now 11 commits is not a big deal in a single
965
local_tree = tree.branch.create_checkout('local')
670
966
for x in range(9):
671
tree.commit('commit %s' % x)
967
local_tree.commit('commit %s' % x)
672
968
# there should be 9 packs:
673
969
index = self.index_class(trans, 'pack-names', None)
674
970
self.assertEqual(9, len(list(index.iter_all_entries())))
675
971
# committing one more should coalesce to 1 of 10.
676
tree.commit('commit triggering pack')
972
local_tree.commit('commit triggering pack')
677
973
index = self.index_class(trans, 'pack-names', None)
678
974
self.assertEqual(1, len(list(index.iter_all_entries())))
679
975
# packing should not damage data
680
976
tree = tree.bzrdir.open_workingtree()
681
977
check_result = tree.branch.repository.check(
682
978
[tree.branch.last_revision()])
683
# We should have 50 (10x5) files in the obsolete_packs directory.
979
nb_files = 5 # .pack, .rix, .iix, .tix, .six
980
if tree.branch.repository._format.supports_chks:
982
# We should have 10 x nb_files files in the obsolete_packs directory.
684
983
obsolete_files = list(trans.list_dir('obsolete_packs'))
685
984
self.assertFalse('foo' in obsolete_files)
686
985
self.assertFalse('bar' in obsolete_files)
687
self.assertEqual(50, len(obsolete_files))
986
self.assertEqual(10 * nb_files, len(obsolete_files))
688
987
# XXX: Todo check packs obsoleted correctly - old packs and indices
689
988
# in the obsolete_packs directory.
690
989
large_pack_name = list(index.iter_all_entries())[0][1][0]
691
990
# finally, committing again should not touch the large pack.
692
tree.commit('commit not triggering pack')
991
local_tree.commit('commit not triggering pack')
693
992
index = self.index_class(trans, 'pack-names', None)
694
993
self.assertEqual(2, len(list(index.iter_all_entries())))
695
994
pack_names = [node[1][0] for node in index.iter_all_entries()]
696
995
self.assertTrue(large_pack_name in pack_names)
998
class TestKeyDependencies(TestCaseWithTransport):
1000
def get_format(self):
1001
return bzrdir.format_registry.make_bzrdir(self.format_name)
1003
def create_source_and_target(self):
1004
builder = self.make_branch_builder('source', format=self.get_format())
1005
builder.start_series()
1006
builder.build_snapshot('A-id', None, [
1007
('add', ('', 'root-id', 'directory', None))])
1008
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
1009
builder.finish_series()
1010
repo = self.make_repository('target', format=self.get_format())
1011
b = builder.get_branch()
1013
self.addCleanup(b.unlock)
1015
self.addCleanup(repo.unlock)
1016
return b.repository, repo
1018
def test_key_dependencies_cleared_on_abort(self):
1019
source_repo, target_repo = self.create_source_and_target()
1020
target_repo.start_write_group()
1022
stream = source_repo.revisions.get_record_stream([('B-id',)],
1024
target_repo.revisions.insert_record_stream(stream)
1025
key_refs = target_repo.revisions._index._key_dependencies
1026
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1028
target_repo.abort_write_group()
1029
self.assertEqual([], sorted(key_refs.get_referrers()))
1031
def test_key_dependencies_cleared_on_suspend(self):
1032
source_repo, target_repo = self.create_source_and_target()
1033
target_repo.start_write_group()
1035
stream = source_repo.revisions.get_record_stream([('B-id',)],
1037
target_repo.revisions.insert_record_stream(stream)
1038
key_refs = target_repo.revisions._index._key_dependencies
1039
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1041
target_repo.suspend_write_group()
1042
self.assertEqual([], sorted(key_refs.get_referrers()))
1044
def test_key_dependencies_cleared_on_commit(self):
1045
source_repo, target_repo = self.create_source_and_target()
1046
target_repo.start_write_group()
1048
# Copy all texts, inventories, and chks so that nothing is missing
1049
# for revision B-id.
1050
for vf_name in ['texts', 'chk_bytes', 'inventories']:
1051
source_vf = getattr(source_repo, vf_name, None)
1052
if source_vf is None:
1054
target_vf = getattr(target_repo, vf_name)
1055
stream = source_vf.get_record_stream(
1056
source_vf.keys(), 'unordered', True)
1057
target_vf.insert_record_stream(stream)
1058
# Copy just revision B-id
1059
stream = source_repo.revisions.get_record_stream(
1060
[('B-id',)], 'unordered', True)
1061
target_repo.revisions.insert_record_stream(stream)
1062
key_refs = target_repo.revisions._index._key_dependencies
1063
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1065
target_repo.commit_write_group()
1066
self.assertEqual([], sorted(key_refs.get_referrers()))
699
1069
class TestSmartServerAutopack(TestCaseWithTransport):
701
1071
def setUp(self):
731
1101
self.hpss_calls = []
732
1102
tree.commit('commit triggering pack')
733
1103
tree.branch.push(remote_branch)
734
self.assertTrue('PackRepository.autopack' in self.hpss_calls)
737
def load_tests(basic_tests, module, test_loader):
1104
autopack_calls = len([call for call in self.hpss_calls if call ==
1105
'PackRepository.autopack'])
1106
streaming_calls = len([call for call in self.hpss_calls if call in
1107
('Repository.insert_stream', 'Repository.insert_stream_1.19')])
1109
# Non streaming server
1110
self.assertEqual(1, autopack_calls)
1111
self.assertEqual(0, streaming_calls)
1113
# Streaming was used, which autopacks on the remote end.
1114
self.assertEqual(0, autopack_calls)
1115
# NB: The 2 calls are because of the sanity check that the server
1116
# supports the verb (see remote.py:RemoteSink.insert_stream for
1118
self.assertEqual(2, streaming_calls)
1121
def load_tests(basic_tests, module, loader):
738
1122
# these give the bzrdir canned format name, and the repository on-disk
740
1124
scenarios_params = [
766
1150
format_supports_external_lookups=True,
767
1151
index_class=BTreeGraphIndex),
768
dict(format_name='development2',
769
format_string="Bazaar development format 2 "
770
"(needs bzr.dev from before 1.8)\n",
771
format_supports_external_lookups=True,
772
index_class=BTreeGraphIndex),
773
dict(format_name='development2-subtree',
774
format_string="Bazaar development format 2 "
775
"with subtree support (needs bzr.dev from before 1.8)\n",
1152
dict(format_name='2a',
1153
format_string="Bazaar repository format 2a "
1154
"(needs bzr 1.16 or later)\n",
776
1155
format_supports_external_lookups=True,
777
1156
index_class=BTreeGraphIndex),
779
adapter = tests.TestScenarioApplier()
780
1158
# name of the scenario is the format name
781
adapter.scenarios = [(s['format_name'], s) for s in scenarios_params]
782
suite = tests.TestSuite()
783
tests.adapt_tests(basic_tests, adapter, suite)
1159
scenarios = [(s['format_name'], s) for s in scenarios_params]
1160
return tests.multiply_tests(basic_tests, scenarios, loader.suiteClass())