171
170
def test_find_format_with_features(self):
172
171
tree = self.make_branch_and_tree('.', format='2a')
173
172
tree.branch.repository.update_feature_flags({b"name": b"necessity"})
174
found_format = bzrrepository.RepositoryFormatMetaDir.find_format(
176
self.assertIsInstance(
177
found_format, bzrrepository.RepositoryFormatMetaDir)
173
found_format = bzrrepository.RepositoryFormatMetaDir.find_format(tree.controldir)
174
self.assertIsInstance(found_format, bzrrepository.RepositoryFormatMetaDir)
178
175
self.assertEqual(found_format.features.get(b"name"), b"necessity")
180
bzrdir.MissingFeature, found_format.check_support_status, True)
182
bzrrepository.RepositoryFormatMetaDir.unregister_feature, b"name")
176
self.assertRaises(bzrdir.MissingFeature, found_format.check_support_status,
178
self.addCleanup(bzrrepository.RepositoryFormatMetaDir.unregister_feature,
183
180
bzrrepository.RepositoryFormatMetaDir.register_feature(b"name")
184
181
found_format.check_support_status(True)
225
220
def test_attribute__fetch_order(self):
226
221
"""Knits need topological data insertion."""
227
repo = self.make_repository(
228
'.', format=controldir.format_registry.get('knit')())
222
repo = self.make_repository('.',
223
format=controldir.format_registry.get('knit')())
229
224
self.assertEqual('topological', repo._format._fetch_order)
231
226
def test_attribute__fetch_uses_deltas(self):
232
227
"""Knits reuse deltas."""
233
repo = self.make_repository(
234
'.', format=controldir.format_registry.get('knit')())
228
repo = self.make_repository('.',
229
format=controldir.format_registry.get('knit')())
235
230
self.assertEqual(True, repo._format._fetch_uses_deltas)
237
232
def test_disk_layout(self):
255
250
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
256
251
self.check_knits(t)
257
252
# Check per-file knits.
258
control.create_branch()
253
branch = control.create_branch()
259
254
tree = control.create_workingtree()
260
255
tree.add(['foo'], [b'Nasty-IdC:'], ['file'])
261
256
tree.put_file_bytes_non_atomic('foo', b'')
262
257
tree.commit('1st post', rev_id=b'foo')
263
258
self.assertHasKnit(t, 'knits/e8/%254easty-%2549d%2543%253a',
264
b'\nfoo fulltext 0 81 :')
259
'\nfoo fulltext 0 81 :')
266
def assertHasKnit(self, t, knit_name, extra_content=b''):
261
def assertHasKnit(self, t, knit_name, extra_content=''):
267
262
"""Assert that knit_name exists on t."""
268
self.assertEqualDiff(b'# bzr knit index 8\n' + extra_content,
263
self.assertEqualDiff('# bzr knit index 8\n' + extra_content,
269
264
t.get(knit_name + '.kndx').read())
271
266
def check_knits(self, t):
285
280
# empty weaves directory
286
281
# a 'shared-storage' marker file.
287
282
t = control.get_repository_transport(None)
288
with t.get('format') as f:
289
self.assertEqualDiff(b'Bazaar-NG Knit Repository Format 1',
283
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
284
t.get('format').read())
291
285
# XXX: no locks left when unlocked at the moment
292
286
# self.assertEqualDiff('', t.get('lock').read())
293
self.assertEqualDiff(b'', t.get('shared-storage').read())
287
self.assertEqualDiff('', t.get('shared-storage').read())
294
288
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
295
289
self.check_knits(t)
297
291
def test_shared_no_tree_disk_layout(self):
298
292
control = bzrdir.BzrDirMetaFormat1().initialize(self.get_url())
299
repo = knitrepo.RepositoryFormatKnit1().initialize(
300
control, shared=True)
293
repo = knitrepo.RepositoryFormatKnit1().initialize(control, shared=True)
301
294
repo.set_make_working_trees(False)
303
296
# format 'Bazaar-NG Knit Repository Format 1'
307
300
# empty weaves directory
308
301
# a 'shared-storage' marker file.
309
302
t = control.get_repository_transport(None)
310
with t.get('format') as f:
311
self.assertEqualDiff(b'Bazaar-NG Knit Repository Format 1',
303
self.assertEqualDiff('Bazaar-NG Knit Repository Format 1',
304
t.get('format').read())
313
305
# XXX: no locks left when unlocked at the moment
314
306
# self.assertEqualDiff('', t.get('lock').read())
315
self.assertEqualDiff(b'', t.get('shared-storage').read())
316
self.assertEqualDiff(b'', t.get('no-working-trees').read())
307
self.assertEqualDiff('', t.get('shared-storage').read())
308
self.assertEqualDiff('', t.get('no-working-trees').read())
317
309
repo.set_make_working_trees(True)
318
310
self.assertFalse(t.has('no-working-trees'))
319
311
self.assertTrue(S_ISDIR(t.stat('knits').st_mode))
326
318
the whole inventory. So we grab the one from the expected text. Which
327
319
is valid when the api is not being abused.
329
repo = self.make_repository(
330
'.', format=controldir.format_registry.get('knit')())
331
inv_xml = b'<inventory format="5">\n</inventory>\n'
332
inv = repo._deserialise_inventory(b'test-rev-id', [inv_xml])
333
self.assertEqual(b'test-rev-id', inv.root.revision)
321
repo = self.make_repository('.',
322
format=controldir.format_registry.get('knit')())
323
inv_xml = '<inventory format="5">\n</inventory>\n'
324
inv = repo._deserialise_inventory('test-rev-id', inv_xml)
325
self.assertEqual('test-rev-id', inv.root.revision)
335
327
def test_deserialise_uses_global_revision_id(self):
336
328
"""If it is set, then we re-use the global revision id"""
337
repo = self.make_repository(
338
'.', format=controldir.format_registry.get('knit')())
339
inv_xml = (b'<inventory format="5" revision_id="other-rev-id">\n'
329
repo = self.make_repository('.',
330
format=controldir.format_registry.get('knit')())
331
inv_xml = ('<inventory format="5" revision_id="other-rev-id">\n'
341
333
# Arguably, the deserialise_inventory should detect a mismatch, and
342
334
# raise an error, rather than silently using one revision_id over the
344
336
self.assertRaises(AssertionError, repo._deserialise_inventory,
345
b'test-rev-id', [inv_xml])
346
inv = repo._deserialise_inventory(b'other-rev-id', [inv_xml])
347
self.assertEqual(b'other-rev-id', inv.root.revision)
337
'test-rev-id', inv_xml)
338
inv = repo._deserialise_inventory('other-rev-id', inv_xml)
339
self.assertEqual('other-rev-id', inv.root.revision)
349
341
def test_supports_external_lookups(self):
350
repo = self.make_repository(
351
'.', format=controldir.format_registry.get('knit')())
342
repo = self.make_repository('.',
343
format=controldir.format_registry.get('knit')())
352
344
self.assertFalse(repo._format.supports_external_lookups)
427
419
repo = self.make_repository('.')
428
420
# hack dummies to look like repo somewhat.
429
421
dummy_a._serializer = repo._serializer
430
dummy_a._format.supports_tree_reference = (
431
repo._format.supports_tree_reference)
422
dummy_a._format.supports_tree_reference = repo._format.supports_tree_reference
432
423
dummy_a._format.rich_root_data = repo._format.rich_root_data
433
dummy_a._format.supports_full_versioned_files = (
434
repo._format.supports_full_versioned_files)
424
dummy_a._format.supports_full_versioned_files = repo._format.supports_full_versioned_files
435
425
dummy_b._serializer = repo._serializer
436
dummy_b._format.supports_tree_reference = (
437
repo._format.supports_tree_reference)
426
dummy_b._format.supports_tree_reference = repo._format.supports_tree_reference
438
427
dummy_b._format.rich_root_data = repo._format.rich_root_data
439
dummy_b._format.supports_full_versioned_files = (
440
repo._format.supports_full_versioned_files)
428
dummy_b._format.supports_full_versioned_files = repo._format.supports_full_versioned_files
441
429
repository.InterRepository.register_optimiser(InterDummy)
443
431
# we should get the default for something InterDummy returns False
513
501
format = bzrdir.BzrDirMetaFormat1()
514
502
format.repository_format = knitrepo.RepositoryFormatKnit1()
515
503
tree = self.make_branch_and_tree('.', format)
516
tree.commit("Dull commit", rev_id=b"dull")
504
tree.commit("Dull commit", rev_id="dull")
517
505
revision_tree = tree.branch.repository.revision_tree(b'dull')
518
with revision_tree.lock_read():
520
errors.NoSuchFile, revision_tree.get_file_lines, u'')
506
revision_tree.lock_read()
508
self.assertRaises(errors.NoSuchFile, revision_tree.get_file_lines,
509
u'', revision_tree.get_root_id())
511
revision_tree.unlock()
521
512
format = bzrdir.BzrDirMetaFormat1()
522
513
format.repository_format = knitrepo.RepositoryFormatKnit3()
523
514
upgrade.Convert('.', format)
524
515
tree = workingtree.WorkingTree.open('.')
525
516
revision_tree = tree.branch.repository.revision_tree(b'dull')
526
with revision_tree.lock_read():
527
revision_tree.get_file_lines(u'')
517
revision_tree.lock_read()
519
revision_tree.get_file_lines(u'', revision_tree.get_root_id())
521
revision_tree.unlock()
528
522
tree.commit("Another dull commit", rev_id=b'dull2')
529
523
revision_tree = tree.branch.repository.revision_tree(b'dull2')
530
524
revision_tree.lock_read()
531
525
self.addCleanup(revision_tree.unlock)
532
self.assertEqual(b'dull', revision_tree.get_file_revision(u''))
526
self.assertEqual('dull',
527
revision_tree.get_file_revision(u'', revision_tree.get_root_id()))
534
529
def test_supports_external_lookups(self):
535
530
format = bzrdir.BzrDirMetaFormat1()
584
579
builder.start_series()
585
580
builder.build_snapshot(None, [
586
581
('add', ('', b'root-id', 'directory', '')),
587
('add', ('file', b'file-id', 'file', b'content\n'))],
582
('add', ('file', b'file-id', 'file', 'content\n'))],
588
583
revision_id=b'1')
589
584
builder.build_snapshot([b'1'], [
590
585
('modify', ('file', b'content-2\n'))],
608
603
builder.start_series()
609
604
builder.build_snapshot(None, [
610
605
('add', ('', b'root-id', 'directory', '')),
611
('add', ('file', b'file-id', 'file', b'content\n'))],
606
('add', ('file', b'file-id', 'file', 'content\n'))],
612
607
revision_id=b'1')
613
608
builder.build_snapshot([b'1'], [
614
609
('modify', ('file', b'content-2\n'))],
726
721
# On a regular pass, getting the inventories and chk pages for rev-2
727
722
# would only get the newly created chk pages
728
723
search = vf_search.SearchResult({b'rev-2'}, {b'rev-1'}, 1,
730
simple_chk_records = set()
725
simple_chk_records = []
731
726
for vf_name, substream in source.get_stream(search):
732
727
if vf_name == 'chk_bytes':
733
728
for record in substream:
734
simple_chk_records.add(record.key)
729
simple_chk_records.append(record.key)
736
731
for _ in substream:
738
733
# 3 pages, the root (InternalNode), + 2 pages which actually changed
739
self.assertEqual({(b'sha1:91481f539e802c76542ea5e4c83ad416bf219f73',),
740
(b'sha1:4ff91971043668583985aec83f4f0ab10a907d3f',),
741
(b'sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',),
742
(b'sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)},
743
set(simple_chk_records))
734
self.assertEqual([('sha1:91481f539e802c76542ea5e4c83ad416bf219f73',),
735
('sha1:4ff91971043668583985aec83f4f0ab10a907d3f',),
736
('sha1:81e7324507c5ca132eedaf2d8414ee4bb2226187',),
737
('sha1:b101b7da280596c71a4540e9a1eeba8045985ee0',)],
744
739
# Now, when we do a similar call using 'get_stream_for_missing_keys'
745
740
# we should get a much larger set of pages.
746
missing = [('inventories', b'rev-2')]
747
full_chk_records = set()
741
missing = [('inventories', 'rev-2')]
742
full_chk_records = []
748
743
for vf_name, substream in source.get_stream_for_missing_keys(missing):
749
744
if vf_name == 'inventories':
750
745
for record in substream:
751
self.assertEqual((b'rev-2',), record.key)
746
self.assertEqual(('rev-2',), record.key)
752
747
elif vf_name == 'chk_bytes':
753
748
for record in substream:
754
full_chk_records.add(record.key)
749
full_chk_records.append(record.key)
756
751
self.fail('Should not be getting a stream of %s' % (vf_name,))
757
752
# We have 257 records now. This is because we have 1 root page, and 256
774
769
source = self.make_repository('source', format='pack-0.92')
775
770
target = self.make_repository('target', format='pack-0.92')
776
771
stream_source = source._get_source(target._format)
777
self.assertIsInstance(
778
stream_source, knitpack_repo.KnitPackStreamSource)
772
self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
780
774
def test_source_to_exact_pack_rich_root_pack(self):
781
775
source = self.make_repository('source', format='rich-root-pack')
782
776
target = self.make_repository('target', format='rich-root-pack')
783
777
stream_source = source._get_source(target._format)
784
self.assertIsInstance(
785
stream_source, knitpack_repo.KnitPackStreamSource)
778
self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
787
780
def test_source_to_exact_pack_19(self):
788
781
source = self.make_repository('source', format='1.9')
789
782
target = self.make_repository('target', format='1.9')
790
783
stream_source = source._get_source(target._format)
791
self.assertIsInstance(
792
stream_source, knitpack_repo.KnitPackStreamSource)
784
self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
794
786
def test_source_to_exact_pack_19_rich_root(self):
795
787
source = self.make_repository('source', format='1.9-rich-root')
796
788
target = self.make_repository('target', format='1.9-rich-root')
797
789
stream_source = source._get_source(target._format)
798
self.assertIsInstance(
799
stream_source, knitpack_repo.KnitPackStreamSource)
790
self.assertIsInstance(stream_source, knitpack_repo.KnitPackStreamSource)
801
792
def test_source_to_remote_exact_pack_19(self):
802
793
trans = self.make_smart_server('target')
843
833
super(TestDevelopment6FindParentIdsOfRevisions, self).setUp()
844
834
self.builder = self.make_branch_builder('source')
845
835
self.builder.start_series()
846
self.builder.build_snapshot(
848
[('add', ('', b'tree-root', 'directory', None))],
836
self.builder.build_snapshot(None,
837
[('add', ('', 'tree-root', 'directory', None))],
849
838
revision_id=b'initial')
850
839
self.repo = self.builder.get_branch().repository
851
840
self.addCleanup(self.builder.finish_series)
853
842
def assertParentIds(self, expected_result, rev_set):
855
sorted(expected_result),
843
self.assertEqual(sorted(expected_result),
856
844
sorted(self.repo._find_parent_ids_of_revisions(rev_set)))
858
846
def test_simple(self):
859
847
self.builder.build_snapshot(None, [], revision_id=b'revid1')
860
self.builder.build_snapshot([b'revid1'], [], revision_id=b'revid2')
861
rev_set = [b'revid2']
862
self.assertParentIds([b'revid1'], rev_set)
848
self.builder.build_snapshot(['revid1'], [], revision_id=b'revid2')
850
self.assertParentIds(['revid1'], rev_set)
864
852
def test_not_first_parent(self):
865
853
self.builder.build_snapshot(None, [], revision_id=b'revid1')
866
self.builder.build_snapshot([b'revid1'], [], revision_id=b'revid2')
867
self.builder.build_snapshot([b'revid2'], [], revision_id=b'revid3')
868
rev_set = [b'revid3', b'revid2']
869
self.assertParentIds([b'revid1'], rev_set)
854
self.builder.build_snapshot(['revid1'], [], revision_id=b'revid2')
855
self.builder.build_snapshot(['revid2'], [], revision_id=b'revid3')
856
rev_set = ['revid3', 'revid2']
857
self.assertParentIds(['revid1'], rev_set)
871
859
def test_not_null(self):
872
rev_set = [b'initial']
860
rev_set = ['initial']
873
861
self.assertParentIds([], rev_set)
875
863
def test_not_null_set(self):
880
868
def test_ghost(self):
881
869
self.builder.build_snapshot(None, [], revision_id=b'revid1')
882
rev_set = [b'ghost', b'revid1']
883
self.assertParentIds([b'initial'], rev_set)
870
rev_set = ['ghost', 'revid1']
871
self.assertParentIds(['initial'], rev_set)
885
873
def test_ghost_parent(self):
886
874
self.builder.build_snapshot(None, [], revision_id=b'revid1')
887
self.builder.build_snapshot(
888
[b'revid1', b'ghost'], [], revision_id=b'revid2')
889
rev_set = [b'revid2', b'revid1']
890
self.assertParentIds([b'ghost', b'initial'], rev_set)
875
self.builder.build_snapshot(['revid1', 'ghost'], [], revision_id=b'revid2')
876
rev_set = ['revid2', 'revid1']
877
self.assertParentIds(['ghost', 'initial'], rev_set)
892
879
def test_righthand_parent(self):
893
880
self.builder.build_snapshot(None, [], revision_id=b'revid1')
894
self.builder.build_snapshot([b'revid1'], [], revision_id=b'revid2a')
895
self.builder.build_snapshot([b'revid1'], [], revision_id=b'revid2b')
896
self.builder.build_snapshot([b'revid2a', b'revid2b'], [],
881
self.builder.build_snapshot(['revid1'], [], revision_id=b'revid2a')
882
self.builder.build_snapshot(['revid1'], [], revision_id=b'revid2b')
883
self.builder.build_snapshot(['revid2a', 'revid2b'], [],
897
884
revision_id=b'revid3')
898
rev_set = [b'revid3', b'revid2a']
899
self.assertParentIds([b'revid1', b'revid2b'], rev_set)
885
rev_set = ['revid3', 'revid2a']
886
self.assertParentIds(['revid1', 'revid2b'], rev_set)
902
889
class TestWithBrokenRepo(TestCaseWithTransport):
915
902
cleanups.append(repo.commit_write_group)
916
903
# make rev1a: A well-formed revision, containing 'file1'
917
904
inv = inventory.Inventory(revision_id=b'rev1a')
918
inv.root.revision = b'rev1a'
919
self.add_file(repo, inv, 'file1', b'rev1a', [])
920
repo.texts.add_lines((inv.root.file_id, b'rev1a'), [], [])
921
repo.add_inventory(b'rev1a', inv, [])
922
revision = _mod_revision.Revision(
905
inv.root.revision = 'rev1a'
906
self.add_file(repo, inv, 'file1', 'rev1a', [])
907
repo.texts.add_lines((inv.root.file_id, 'rev1a'), [], [])
908
repo.add_inventory('rev1a', inv, [])
909
revision = _mod_revision.Revision('rev1a',
924
910
committer='jrandom@example.com', timestamp=0,
925
911
inventory_sha1='', timezone=0, message='foo', parent_ids=[])
926
repo.add_revision(b'rev1a', revision, inv)
912
repo.add_revision('rev1a', revision, inv)
928
914
# make rev1b, which has no Revision, but has an Inventory, and
930
916
inv = inventory.Inventory(revision_id=b'rev1b')
931
inv.root.revision = b'rev1b'
932
self.add_file(repo, inv, 'file1', b'rev1b', [])
933
repo.add_inventory(b'rev1b', inv, [])
917
inv.root.revision = 'rev1b'
918
self.add_file(repo, inv, 'file1', 'rev1b', [])
919
repo.add_inventory('rev1b', inv, [])
935
921
# make rev2, with file1 and file2
937
923
# file1 has 'rev1b' as an ancestor, even though this is not
938
924
# mentioned by 'rev1a', making it an unreferenced ancestor
939
925
inv = inventory.Inventory()
940
self.add_file(repo, inv, 'file1', b'rev2', [b'rev1a', b'rev1b'])
941
self.add_file(repo, inv, 'file2', b'rev2', [])
942
self.add_revision(repo, b'rev2', inv, [b'rev1a'])
926
self.add_file(repo, inv, 'file1', 'rev2', ['rev1a', 'rev1b'])
927
self.add_file(repo, inv, 'file2', 'rev2', [])
928
self.add_revision(repo, 'rev2', inv, ['rev1a'])
944
930
# make ghost revision rev1c
945
931
inv = inventory.Inventory()
946
self.add_file(repo, inv, 'file2', b'rev1c', [])
932
self.add_file(repo, inv, 'file2', 'rev1c', [])
948
934
# make rev3 with file2
949
935
# file2 refers to 'rev1c', which is a ghost in this repository, so
950
936
# file2 cannot have rev1c as its ancestor.
951
937
inv = inventory.Inventory()
952
self.add_file(repo, inv, 'file2', b'rev3', [b'rev1c'])
953
self.add_revision(repo, b'rev3', inv, [b'rev1c'])
938
self.add_file(repo, inv, 'file2', 'rev3', ['rev1c'])
939
self.add_revision(repo, 'rev3', inv, ['rev1c'])
956
942
for cleanup in reversed(cleanups):
961
947
inv.root.revision = revision_id
962
948
repo.texts.add_lines((inv.root.file_id, revision_id), [], [])
963
949
repo.add_inventory(revision_id, inv, parent_ids)
964
revision = _mod_revision.Revision(
950
revision = _mod_revision.Revision(revision_id,
966
951
committer='jrandom@example.com', timestamp=0, inventory_sha1='',
967
952
timezone=0, message='foo', parent_ids=parent_ids)
968
953
repo.add_revision(revision_id, revision, inv)
970
955
def add_file(self, repo, inv, filename, revision, parents):
971
file_id = filename.encode('utf-8') + b'-id'
972
content = [b'line\n']
973
entry = inventory.InventoryFile(file_id, filename, b'TREE_ROOT')
956
file_id = filename + '-id'
957
entry = inventory.InventoryFile(file_id, filename, 'TREE_ROOT')
974
958
entry.revision = revision
975
entry.text_sha1 = osutils.sha_strings(content)
976
959
entry.text_size = 0
978
961
text_key = (file_id, revision)
979
962
parent_keys = [(file_id, parent) for parent in parents]
980
repo.texts.add_lines(text_key, parent_keys, content)
963
repo.texts.add_lines(text_key, parent_keys, ['line\n'])
982
965
def test_insert_from_broken_repo(self):
983
966
"""Inserting a data stream from a broken repository won't silently
1029
1012
def test__clear_obsolete_packs(self):
1030
1013
packs = self.get_packs()
1031
1014
obsolete_pack_trans = packs.transport.clone('obsolete_packs')
1032
obsolete_pack_trans.put_bytes('a-pack.pack', b'content\n')
1033
obsolete_pack_trans.put_bytes('a-pack.rix', b'content\n')
1034
obsolete_pack_trans.put_bytes('a-pack.iix', b'content\n')
1035
obsolete_pack_trans.put_bytes('another-pack.pack', b'foo\n')
1036
obsolete_pack_trans.put_bytes('not-a-pack.rix', b'foo\n')
1015
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n')
1016
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n')
1017
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n')
1018
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n')
1019
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n')
1037
1020
res = packs._clear_obsolete_packs()
1038
1021
self.assertEqual(['a-pack', 'another-pack'], sorted(res))
1039
1022
self.assertEqual([], obsolete_pack_trans.list_dir('.'))
1041
1024
def test__clear_obsolete_packs_preserve(self):
1042
1025
packs = self.get_packs()
1043
1026
obsolete_pack_trans = packs.transport.clone('obsolete_packs')
1044
obsolete_pack_trans.put_bytes('a-pack.pack', b'content\n')
1045
obsolete_pack_trans.put_bytes('a-pack.rix', b'content\n')
1046
obsolete_pack_trans.put_bytes('a-pack.iix', b'content\n')
1047
obsolete_pack_trans.put_bytes('another-pack.pack', b'foo\n')
1048
obsolete_pack_trans.put_bytes('not-a-pack.rix', b'foo\n')
1027
obsolete_pack_trans.put_bytes('a-pack.pack', 'content\n')
1028
obsolete_pack_trans.put_bytes('a-pack.rix', 'content\n')
1029
obsolete_pack_trans.put_bytes('a-pack.iix', 'content\n')
1030
obsolete_pack_trans.put_bytes('another-pack.pack', 'foo\n')
1031
obsolete_pack_trans.put_bytes('not-a-pack.rix', 'foo\n')
1049
1032
res = packs._clear_obsolete_packs(preserve={'a-pack'})
1050
1033
self.assertEqual(['a-pack', 'another-pack'], sorted(res))
1051
1034
self.assertEqual(['a-pack.iix', 'a-pack.pack', 'a-pack.rix'],
1132
1115
def test_pack_distribution_one_to_nine(self):
1133
1116
packs = self.get_packs()
1134
1117
self.assertEqual([1],
1135
packs.pack_distribution(1))
1118
packs.pack_distribution(1))
1136
1119
self.assertEqual([1, 1],
1137
packs.pack_distribution(2))
1120
packs.pack_distribution(2))
1138
1121
self.assertEqual([1, 1, 1],
1139
packs.pack_distribution(3))
1122
packs.pack_distribution(3))
1140
1123
self.assertEqual([1, 1, 1, 1],
1141
packs.pack_distribution(4))
1124
packs.pack_distribution(4))
1142
1125
self.assertEqual([1, 1, 1, 1, 1],
1143
packs.pack_distribution(5))
1126
packs.pack_distribution(5))
1144
1127
self.assertEqual([1, 1, 1, 1, 1, 1],
1145
packs.pack_distribution(6))
1128
packs.pack_distribution(6))
1146
1129
self.assertEqual([1, 1, 1, 1, 1, 1, 1],
1147
packs.pack_distribution(7))
1130
packs.pack_distribution(7))
1148
1131
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1],
1149
packs.pack_distribution(8))
1132
packs.pack_distribution(8))
1150
1133
self.assertEqual([1, 1, 1, 1, 1, 1, 1, 1, 1],
1151
packs.pack_distribution(9))
1134
packs.pack_distribution(9))
1153
1136
def test_pack_distribution_stable_at_boundaries(self):
1154
1137
"""When there are multi-rev packs the counts are stable."""
1258
1241
inv_index = GraphIndex(packs._index_transport, name + '.iix', sizes[1])
1259
1242
txt_index = GraphIndex(packs._index_transport, name + '.tix', sizes[2])
1260
1243
sig_index = GraphIndex(packs._index_transport, name + '.six', sizes[3])
1262
pack_repo.ExistingPack(
1263
packs._pack_transport, name, rev_index, inv_index, txt_index,
1244
self.assertEqual(pack_repo.ExistingPack(packs._pack_transport,
1245
name, rev_index, inv_index, txt_index, sig_index), pack_1)
1265
1246
# and the same instance should be returned on successive calls.
1266
1247
self.assertTrue(pack_1 is packs.get_pack_by_name(name))
1316
1297
packs._remove_pack_from_memory(removed_pack)
1317
1298
names = packs.names()
1318
1299
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names()
1319
new_names = {x[0] for x in new_nodes}
1320
self.assertEqual(names, sorted([x[0] for x in all_nodes]))
1300
new_names = {x[0][0] for x in new_nodes}
1301
self.assertEqual(names, sorted([x[0][0] for x in all_nodes]))
1321
1302
self.assertEqual(set(names) - set(orig_names), new_names)
1322
1303
self.assertEqual({new_pack.name}, new_names)
1323
1304
self.assertEqual([to_remove_name],
1324
sorted([x[0] for x in deleted_nodes]))
1305
sorted([x[0][0] for x in deleted_nodes]))
1325
1306
packs.reload_pack_names()
1326
1307
reloaded_names = packs.names()
1327
1308
self.assertEqual(orig_at_load, packs._packs_at_load)
1328
1309
self.assertEqual(names, reloaded_names)
1329
1310
all_nodes, deleted_nodes, new_nodes, _ = packs._diff_pack_names()
1330
new_names = {x[0] for x in new_nodes}
1331
self.assertEqual(names, sorted([x[0] for x in all_nodes]))
1311
new_names = {x[0][0] for x in new_nodes}
1312
self.assertEqual(names, sorted([x[0][0] for x in all_nodes]))
1332
1313
self.assertEqual(set(names) - set(orig_names), new_names)
1333
1314
self.assertEqual({new_pack.name}, new_names)
1334
1315
self.assertEqual([to_remove_name],
1335
sorted([x[0] for x in deleted_nodes]))
1316
sorted([x[0][0] for x in deleted_nodes]))
1337
1318
def test_autopack_obsoletes_new_pack(self):
1338
1319
tree, r, packs, revs = self.make_packs_and_alt_repo(write_lock=True)
1494
1474
builder = self.make_branch_builder('.', format="1.9")
1495
1475
builder.start_series()
1496
1476
builder.build_snapshot(None, [
1497
('add', ('', b'root-id', 'directory', None)),
1498
('add', ('f', b'f-id', 'file', b'content\n'))],
1477
('add', ('', 'root-id', 'directory', None)),
1478
('add', ('f', 'f-id', 'file', 'content\n'))],
1499
1479
revision_id=b'A')
1500
builder.build_snapshot([b'A'],
1501
[('modify', ('f', b'new-content\n'))],
1503
builder.build_snapshot([b'B'],
1504
[('modify', ('f', b'third-content\n'))],
1506
builder.build_snapshot([b'C'],
1507
[('modify', ('f', b'fourth-content\n'))],
1480
builder.build_snapshot(['A'],
1481
[('modify', ('f', 'new-content\n'))],
1483
builder.build_snapshot(['B'],
1484
[('modify', ('f', 'third-content\n'))],
1486
builder.build_snapshot(['C'],
1487
[('modify', ('f', 'fourth-content\n'))],
1509
1489
b = builder.get_branch()
1511
1491
builder.finish_series()
1551
1531
builder = self.make_branch_builder('source')
1552
1532
builder.start_series()
1553
1533
builder.build_snapshot(None, [
1554
('add', ('', b'root-id', 'directory', None)),
1555
('add', ('file', b'file-id', 'file', b'content\n')),
1534
('add', ('', 'root-id', 'directory', None)),
1535
('add', ('file', 'file-id', 'file', 'content\n')),
1556
1536
], revision_id=b'A')
1557
builder.build_snapshot([b'A'], [
1558
('add', ('dir', b'dir-id', 'directory', None))],
1537
builder.build_snapshot(['A'], [
1538
('add', ('dir', 'dir-id', 'directory', None))],
1559
1539
revision_id=b'B')
1560
builder.build_snapshot([b'B'], [
1561
('modify', ('file', b'new content\n'))],
1540
builder.build_snapshot(['B'], [
1541
('modify', ('file', 'new content\n'))],
1562
1542
revision_id=b'C')
1563
1543
builder.finish_series()
1564
1544
return builder.get_branch()
1575
1555
pack_name_with_rev_C_content)
1577
1557
b_source = self.make_abc_branch()
1578
b_base = b_source.controldir.sprout(
1579
'base', revision_id=b'A').open_branch()
1580
b_stacked = b_base.controldir.sprout(
1581
'stacked', stacked=True).open_branch()
1558
b_base = b_source.controldir.sprout('base', revision_id=b'A').open_branch()
1559
b_stacked = b_base.controldir.sprout('stacked', stacked=True).open_branch()
1582
1560
b_stacked.lock_write()
1583
1561
self.addCleanup(b_stacked.unlock)
1584
b_stacked.fetch(b_source, b'B')
1562
b_stacked.fetch(b_source, 'B')
1585
1563
# Now re-open the stacked repo directly (no fallbacks) so that we can
1586
1564
# fill in the A rev.
1587
1565
repo_not_stacked = b_stacked.controldir.open_repository()
1589
1567
self.addCleanup(repo_not_stacked.unlock)
1590
1568
# Now we should have a pack file with A's inventory, but not its
1592
self.assertEqual([(b'A',), (b'B',)],
1570
self.assertEqual([('A',), ('B',)],
1593
1571
sorted(repo_not_stacked.inventories.keys()))
1594
self.assertEqual([(b'B',)],
1572
self.assertEqual([('B',)],
1595
1573
sorted(repo_not_stacked.revisions.keys()))
1596
1574
stacked_pack_names = repo_not_stacked._pack_collection.names()
1597
1575
# We have a couple names here, figure out which has A's inventory
1598
1576
for name in stacked_pack_names:
1599
1577
pack = repo_not_stacked._pack_collection.get_pack_by_name(name)
1600
1578
keys = [n[1] for n in pack.inventory_index.iter_all_entries()]
1602
1580
inv_a_pack_name = name
1605
1583
self.fail('Could not find pack containing A\'s inventory')
1606
repo_not_stacked.fetch(b_source.repository, b'A')
1607
self.assertEqual([(b'A',), (b'B',)],
1584
repo_not_stacked.fetch(b_source.repository, 'A')
1585
self.assertEqual([('A',), ('B',)],
1608
1586
sorted(repo_not_stacked.revisions.keys()))
1609
1587
new_pack_names = set(repo_not_stacked._pack_collection.names())
1610
1588
rev_a_pack_names = new_pack_names.difference(stacked_pack_names)
1611
1589
self.assertEqual(1, len(rev_a_pack_names))
1612
1590
rev_a_pack_name = list(rev_a_pack_names)[0]
1613
1591
# Now fetch 'C', so we have a couple pack files to join
1614
repo_not_stacked.fetch(b_source.repository, b'C')
1592
repo_not_stacked.fetch(b_source.repository, 'C')
1615
1593
rev_c_pack_names = set(repo_not_stacked._pack_collection.names())
1616
1594
rev_c_pack_names = rev_c_pack_names.difference(new_pack_names)
1617
1595
self.assertEqual(1, len(rev_c_pack_names))