239
239
self.assertTrue(large_pack_name in pack_names)
241
241
def test_commit_write_group_returns_new_pack_names(self):
242
# This test doesn't need real disk.
243
self.vfs_transport_factory = tests.MemoryServer
242
244
format = self.get_format()
243
tree = self.make_branch_and_tree('foo', format=format)
244
tree.commit('first post')
245
repo = tree.branch.repository
245
repo = self.make_repository('foo', format=format)
246
246
repo.lock_write()
248
repo.start_write_group()
250
inv = inventory.Inventory(revision_id="A")
251
inv.root.revision = "A"
252
repo.texts.add_lines((inv.root.file_id, "A"), [], [])
253
rev = _mod_revision.Revision(timestamp=0, timezone=None,
254
committer="Foo Bar <foo@example.com>", message="Message",
257
repo.add_revision("A", rev, inv=inv)
259
repo.abort_write_group()
262
old_names = repo._pack_collection._names.keys()
263
result = repo.commit_write_group()
264
cur_names = repo._pack_collection._names.keys()
265
new_names = list(set(cur_names) - set(old_names))
266
self.assertEqual(new_names, result)
248
# All current pack repository styles autopack at 10 revisions; and
249
# autopack as well as regular commit write group needs to return
250
# the new pack name. Looping is a little ugly, but we don't have a
251
# clean way to test both the autopack logic and the normal code
252
# path without doing this loop.
253
for pos in range(10):
255
repo.start_write_group()
257
inv = inventory.Inventory(revision_id=revid)
258
inv.root.revision = revid
259
repo.texts.add_lines((inv.root.file_id, revid), [], [])
260
rev = _mod_revision.Revision(timestamp=0, timezone=None,
261
committer="Foo Bar <foo@example.com>", message="Message",
264
repo.add_revision(revid, rev, inv=inv)
266
repo.abort_write_group()
269
old_names = repo._pack_collection._names.keys()
270
result = repo.commit_write_group()
271
cur_names = repo._pack_collection._names.keys()
272
new_names = list(set(cur_names) - set(old_names))
273
self.assertEqual(new_names, result)
271
278
# failing to delete obsolete packs is not fatal
272
279
format = self.get_format()
273
280
server = fakenfs.FakeNFSServer()
275
self.addCleanup(server.tearDown)
281
self.start_server(server)
276
282
transport = get_transport(server.get_url())
277
283
bzrdir = self.get_format().initialize_on_transport(transport)
278
284
repo = bzrdir.create_repository()
965
971
('add', ('', 'root-id', 'directory', None))])
966
972
builder.build_snapshot('B-id', ['A-id', 'ghost-id'], [])
967
973
builder.finish_series()
968
repo = self.make_repository('target')
974
repo = self.make_repository('target', format=self.get_format())
969
975
b = builder.get_branch()
971
977
self.addCleanup(b.unlock)
1003
1009
source_repo, target_repo = self.create_source_and_target()
1004
1010
target_repo.start_write_group()
1006
stream = source_repo.revisions.get_record_stream([('B-id',)],
1012
# Copy all texts, inventories, and chks so that nothing is missing
1013
# for revision B-id.
1014
for vf_name in ['texts', 'chk_bytes', 'inventories']:
1015
source_vf = getattr(source_repo, vf_name, None)
1016
if source_vf is None:
1018
target_vf = getattr(target_repo, vf_name)
1019
stream = source_vf.get_record_stream(
1020
source_vf.keys(), 'unordered', True)
1021
target_vf.insert_record_stream(stream)
1022
# Copy just revision B-id
1023
stream = source_repo.revisions.get_record_stream(
1024
[('B-id',)], 'unordered', True)
1008
1025
target_repo.revisions.insert_record_stream(stream)
1009
1026
key_refs = target_repo.revisions._index._key_dependencies
1010
1027
self.assertEqual([('B-id',)], sorted(key_refs.get_referrers()))
1020
1037
# Create a smart server that publishes whatever the backing VFS server
1022
1039
self.smart_server = server.SmartTCPServer_for_testing()
1023
self.smart_server.setUp(self.get_server())
1024
self.addCleanup(self.smart_server.tearDown)
1040
self.start_server(self.smart_server, self.get_server())
1025
1041
# Log all HPSS calls into self.hpss_calls.
1026
1042
client._SmartClient.hooks.install_named_hook(
1027
1043
'call', self.capture_hpss_call, None)