bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
1 |
# Copyright (C) 2008 Canonical Ltd
|
2 |
#
|
|
3 |
# This program is free software; you can redistribute it and/or modify
|
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
7 |
#
|
|
8 |
# This program is distributed in the hope that it will be useful,
|
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
12 |
#
|
|
13 |
# You should have received a copy of the GNU General Public License
|
|
14 |
# along with this program; if not, write to the Free Software
|
|
15 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
16 |
||
17 |
"""Tests for pack repositories.
|
|
18 |
||
19 |
These tests are repeated for all pack-based repository formats.
|
|
20 |
"""
|
|
21 |
||
|
3582.3.4
by Martin Pool
Use cStringIO rather than StringIO |
22 |
from cStringIO import StringIO |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
23 |
from stat import S_ISDIR |
24 |
||
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
25 |
from bzrlib.btree_index import BTreeGraphIndex |
26 |
from bzrlib.index import GraphIndex |
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
27 |
from bzrlib import ( |
28 |
bzrdir, |
|
29 |
errors, |
|
30 |
inventory, |
|
31 |
progress, |
|
32 |
repository, |
|
33 |
revision as _mod_revision, |
|
34 |
symbol_versioning, |
|
35 |
tests, |
|
36 |
ui, |
|
37 |
upgrade, |
|
38 |
workingtree, |
|
39 |
)
|
|
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
40 |
from bzrlib.smart import ( |
41 |
client, |
|
42 |
server, |
|
43 |
)
|
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
44 |
from bzrlib.tests import ( |
45 |
TestCase, |
|
46 |
TestCaseWithTransport, |
|
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
47 |
TestNotApplicable, |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
48 |
TestSkipped, |
49 |
)
|
|
50 |
from bzrlib.transport import ( |
|
51 |
fakenfs, |
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
52 |
memory, |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
53 |
get_transport, |
54 |
)
|
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
55 |
from bzrlib.tests.per_repository import TestCaseWithRepository |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
56 |
|
57 |
||
58 |
class TestPackRepository(TestCaseWithTransport): |
|
59 |
"""Tests to be repeated across all pack-based formats. |
|
60 |
||
61 |
The following are populated from the test scenario:
|
|
62 |
||
63 |
:ivar format_name: Registered name fo the format to test.
|
|
64 |
:ivar format_string: On-disk format marker.
|
|
65 |
:ivar format_supports_external_lookups: Boolean.
|
|
66 |
"""
|
|
67 |
||
68 |
def get_format(self): |
|
69 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
70 |
||
71 |
def test_attribute__fetch_order(self): |
|
|
3606.7.3
by John Arbash Meinel
We don't have to fetch in topological order, as long as we fix all of the delta logic pieces. |
72 |
"""Packs do not need ordered data retrieval.""" |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
73 |
format = self.get_format() |
74 |
repo = self.make_repository('.', format=format) |
|
|
3606.7.8
by John Arbash Meinel
Switch names to 'unordered' that I missed before. |
75 |
self.assertEqual('unordered', repo._fetch_order) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
76 |
|
77 |
def test_attribute__fetch_uses_deltas(self): |
|
78 |
"""Packs reuse deltas.""" |
|
79 |
format = self.get_format() |
|
80 |
repo = self.make_repository('.', format=format) |
|
81 |
self.assertEqual(True, repo._fetch_uses_deltas) |
|
82 |
||
83 |
def test_disk_layout(self): |
|
84 |
format = self.get_format() |
|
85 |
repo = self.make_repository('.', format=format) |
|
86 |
# in case of side effects of locking.
|
|
87 |
repo.lock_write() |
|
88 |
repo.unlock() |
|
89 |
t = repo.bzrdir.get_repository_transport(None) |
|
90 |
self.check_format(t) |
|
91 |
# XXX: no locks left when unlocked at the moment
|
|
92 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
93 |
self.check_databases(t) |
|
94 |
||
95 |
def check_format(self, t): |
|
96 |
self.assertEqualDiff( |
|
97 |
self.format_string, # from scenario |
|
98 |
t.get('format').read()) |
|
99 |
||
100 |
def assertHasNoKndx(self, t, knit_name): |
|
101 |
"""Assert that knit_name has no index on t.""" |
|
102 |
self.assertFalse(t.has(knit_name + '.kndx')) |
|
103 |
||
104 |
def assertHasNoKnit(self, t, knit_name): |
|
105 |
"""Assert that knit_name exists on t.""" |
|
106 |
# no default content
|
|
107 |
self.assertFalse(t.has(knit_name + '.knit')) |
|
108 |
||
109 |
def check_databases(self, t): |
|
110 |
"""check knit content for a repository.""" |
|
111 |
# check conversion worked
|
|
112 |
self.assertHasNoKndx(t, 'inventory') |
|
113 |
self.assertHasNoKnit(t, 'inventory') |
|
114 |
self.assertHasNoKndx(t, 'revisions') |
|
115 |
self.assertHasNoKnit(t, 'revisions') |
|
116 |
self.assertHasNoKndx(t, 'signatures') |
|
117 |
self.assertHasNoKnit(t, 'signatures') |
|
118 |
self.assertFalse(t.has('knits')) |
|
119 |
# revision-indexes file-container directory
|
|
120 |
self.assertEqual([], |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
121 |
list(self.index_class(t, 'pack-names', None).iter_all_entries())) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
122 |
self.assertTrue(S_ISDIR(t.stat('packs').st_mode)) |
123 |
self.assertTrue(S_ISDIR(t.stat('upload').st_mode)) |
|
124 |
self.assertTrue(S_ISDIR(t.stat('indices').st_mode)) |
|
125 |
self.assertTrue(S_ISDIR(t.stat('obsolete_packs').st_mode)) |
|
126 |
||
127 |
def test_shared_disk_layout(self): |
|
128 |
format = self.get_format() |
|
129 |
repo = self.make_repository('.', shared=True, format=format) |
|
130 |
# we want:
|
|
131 |
t = repo.bzrdir.get_repository_transport(None) |
|
132 |
self.check_format(t) |
|
133 |
# XXX: no locks left when unlocked at the moment
|
|
134 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
135 |
# We should have a 'shared-storage' marker file.
|
|
136 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
137 |
self.check_databases(t) |
|
138 |
||
139 |
def test_shared_no_tree_disk_layout(self): |
|
140 |
format = self.get_format() |
|
141 |
repo = self.make_repository('.', shared=True, format=format) |
|
142 |
repo.set_make_working_trees(False) |
|
143 |
# we want:
|
|
144 |
t = repo.bzrdir.get_repository_transport(None) |
|
145 |
self.check_format(t) |
|
146 |
# XXX: no locks left when unlocked at the moment
|
|
147 |
# self.assertEqualDiff('', t.get('lock').read())
|
|
148 |
# We should have a 'shared-storage' marker file.
|
|
149 |
self.assertEqualDiff('', t.get('shared-storage').read()) |
|
150 |
# We should have a marker for the no-working-trees flag.
|
|
151 |
self.assertEqualDiff('', t.get('no-working-trees').read()) |
|
152 |
# The marker should go when we toggle the setting.
|
|
153 |
repo.set_make_working_trees(True) |
|
154 |
self.assertFalse(t.has('no-working-trees')) |
|
155 |
self.check_databases(t) |
|
156 |
||
157 |
def test_adding_revision_creates_pack_indices(self): |
|
158 |
format = self.get_format() |
|
159 |
tree = self.make_branch_and_tree('.', format=format) |
|
160 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
161 |
self.assertEqual([], |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
162 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
163 |
tree.commit('foobarbaz') |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
164 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
165 |
index_nodes = list(index.iter_all_entries()) |
166 |
self.assertEqual(1, len(index_nodes)) |
|
167 |
node = index_nodes[0] |
|
168 |
name = node[1][0] |
|
169 |
# the pack sizes should be listed in the index
|
|
170 |
pack_value = node[2] |
|
171 |
sizes = [int(digits) for digits in pack_value.split(' ')] |
|
172 |
for size, suffix in zip(sizes, ['.rix', '.iix', '.tix', '.six']): |
|
173 |
stat = trans.stat('indices/%s%s' % (name, suffix)) |
|
174 |
self.assertEqual(size, stat.st_size) |
|
175 |
||
176 |
def test_pulling_nothing_leads_to_no_new_names(self): |
|
177 |
format = self.get_format() |
|
178 |
tree1 = self.make_branch_and_tree('1', format=format) |
|
179 |
tree2 = self.make_branch_and_tree('2', format=format) |
|
180 |
tree1.branch.repository.fetch(tree2.branch.repository) |
|
181 |
trans = tree1.branch.repository.bzrdir.get_repository_transport(None) |
|
182 |
self.assertEqual([], |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
183 |
list(self.index_class(trans, 'pack-names', None).iter_all_entries())) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
184 |
|
185 |
def test_commit_across_pack_shape_boundary_autopacks(self): |
|
186 |
format = self.get_format() |
|
187 |
tree = self.make_branch_and_tree('.', format=format) |
|
188 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
189 |
# This test could be a little cheaper by replacing the packs
|
|
190 |
# attribute on the repository to allow a different pack distribution
|
|
191 |
# and max packs policy - so we are checking the policy is honoured
|
|
192 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
193 |
# test.
|
|
194 |
for x in range(9): |
|
195 |
tree.commit('commit %s' % x) |
|
196 |
# there should be 9 packs:
|
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
197 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
198 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
199 |
# insert some files in obsolete_packs which should be removed by pack.
|
|
200 |
trans.put_bytes('obsolete_packs/foo', '123') |
|
201 |
trans.put_bytes('obsolete_packs/bar', '321') |
|
202 |
# committing one more should coalesce to 1 of 10.
|
|
203 |
tree.commit('commit triggering pack') |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
204 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
205 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
206 |
# packing should not damage data
|
|
207 |
tree = tree.bzrdir.open_workingtree() |
|
208 |
check_result = tree.branch.repository.check( |
|
209 |
[tree.branch.last_revision()]) |
|
210 |
# We should have 50 (10x5) files in the obsolete_packs directory.
|
|
211 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
|
212 |
self.assertFalse('foo' in obsolete_files) |
|
213 |
self.assertFalse('bar' in obsolete_files) |
|
214 |
self.assertEqual(50, len(obsolete_files)) |
|
215 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
|
216 |
# in the obsolete_packs directory.
|
|
217 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
218 |
# finally, committing again should not touch the large pack.
|
|
219 |
tree.commit('commit not triggering pack') |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
220 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
221 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
222 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
223 |
self.assertTrue(large_pack_name in pack_names) |
|
224 |
||
225 |
def test_fail_obsolete_deletion(self): |
|
226 |
# failing to delete obsolete packs is not fatal
|
|
227 |
format = self.get_format() |
|
228 |
server = fakenfs.FakeNFSServer() |
|
229 |
server.setUp() |
|
230 |
self.addCleanup(server.tearDown) |
|
231 |
transport = get_transport(server.get_url()) |
|
232 |
bzrdir = self.get_format().initialize_on_transport(transport) |
|
233 |
repo = bzrdir.create_repository() |
|
234 |
repo_transport = bzrdir.get_repository_transport(None) |
|
235 |
self.assertTrue(repo_transport.has('obsolete_packs')) |
|
236 |
# these files are in use by another client and typically can't be deleted
|
|
237 |
repo_transport.put_bytes('obsolete_packs/.nfsblahblah', 'contents') |
|
238 |
repo._pack_collection._clear_obsolete_packs() |
|
239 |
self.assertTrue(repo_transport.has('obsolete_packs/.nfsblahblah')) |
|
240 |
||
241 |
def test_pack_after_two_commits_packs_everything(self): |
|
242 |
format = self.get_format() |
|
243 |
tree = self.make_branch_and_tree('.', format=format) |
|
244 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
245 |
tree.commit('start') |
|
246 |
tree.commit('more work') |
|
247 |
tree.branch.repository.pack() |
|
248 |
# there should be 1 pack:
|
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
249 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
250 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
251 |
self.assertEqual(2, len(tree.branch.repository.all_revision_ids())) |
|
252 |
||
253 |
def test_pack_layout(self): |
|
254 |
format = self.get_format() |
|
255 |
tree = self.make_branch_and_tree('.', format=format) |
|
256 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
257 |
tree.commit('start', rev_id='1') |
|
258 |
tree.commit('more work', rev_id='2') |
|
259 |
tree.branch.repository.pack() |
|
260 |
tree.lock_read() |
|
261 |
self.addCleanup(tree.unlock) |
|
262 |
pack = tree.branch.repository._pack_collection.get_pack_by_name( |
|
263 |
tree.branch.repository._pack_collection.names()[0]) |
|
264 |
# revision access tends to be tip->ancestor, so ordering that way on
|
|
265 |
# disk is a good idea.
|
|
266 |
for _1, key, val, refs in pack.revision_index.iter_all_entries(): |
|
267 |
if key == ('1',): |
|
268 |
pos_1 = int(val[1:].split()[0]) |
|
269 |
else: |
|
270 |
pos_2 = int(val[1:].split()[0]) |
|
271 |
self.assertTrue(pos_2 < pos_1) |
|
272 |
||
273 |
def test_pack_repositories_support_multiple_write_locks(self): |
|
274 |
format = self.get_format() |
|
275 |
self.make_repository('.', shared=True, format=format) |
|
276 |
r1 = repository.Repository.open('.') |
|
277 |
r2 = repository.Repository.open('.') |
|
278 |
r1.lock_write() |
|
279 |
self.addCleanup(r1.unlock) |
|
280 |
r2.lock_write() |
|
281 |
r2.unlock() |
|
282 |
||
283 |
def _add_text(self, repo, fileid): |
|
284 |
"""Add a text to the repository within a write group.""" |
|
285 |
repo.texts.add_lines((fileid, 'samplerev+'+fileid), [], []) |
|
286 |
||
287 |
def test_concurrent_writers_merge_new_packs(self): |
|
288 |
format = self.get_format() |
|
289 |
self.make_repository('.', shared=True, format=format) |
|
290 |
r1 = repository.Repository.open('.') |
|
291 |
r2 = repository.Repository.open('.') |
|
292 |
r1.lock_write() |
|
293 |
try: |
|
294 |
# access enough data to load the names list
|
|
295 |
list(r1.all_revision_ids()) |
|
296 |
r2.lock_write() |
|
297 |
try: |
|
298 |
# access enough data to load the names list
|
|
299 |
list(r2.all_revision_ids()) |
|
300 |
r1.start_write_group() |
|
301 |
try: |
|
302 |
r2.start_write_group() |
|
303 |
try: |
|
304 |
self._add_text(r1, 'fileidr1') |
|
305 |
self._add_text(r2, 'fileidr2') |
|
306 |
except: |
|
307 |
r2.abort_write_group() |
|
308 |
raise
|
|
309 |
except: |
|
310 |
r1.abort_write_group() |
|
311 |
raise
|
|
312 |
# both r1 and r2 have open write groups with data in them
|
|
313 |
# created while the other's write group was open.
|
|
314 |
# Commit both which requires a merge to the pack-names.
|
|
315 |
try: |
|
316 |
r1.commit_write_group() |
|
317 |
except: |
|
318 |
r1.abort_write_group() |
|
319 |
r2.abort_write_group() |
|
320 |
raise
|
|
321 |
r2.commit_write_group() |
|
322 |
# tell r1 to reload from disk
|
|
323 |
r1._pack_collection.reset() |
|
324 |
# Now both repositories should know about both names
|
|
325 |
r1._pack_collection.ensure_loaded() |
|
326 |
r2._pack_collection.ensure_loaded() |
|
327 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
328 |
self.assertEqual(2, len(r1._pack_collection.names())) |
|
329 |
finally: |
|
330 |
r2.unlock() |
|
331 |
finally: |
|
332 |
r1.unlock() |
|
333 |
||
334 |
def test_concurrent_writer_second_preserves_dropping_a_pack(self): |
|
335 |
format = self.get_format() |
|
336 |
self.make_repository('.', shared=True, format=format) |
|
337 |
r1 = repository.Repository.open('.') |
|
338 |
r2 = repository.Repository.open('.') |
|
339 |
# add a pack to drop
|
|
340 |
r1.lock_write() |
|
341 |
try: |
|
342 |
r1.start_write_group() |
|
343 |
try: |
|
344 |
self._add_text(r1, 'fileidr1') |
|
345 |
except: |
|
346 |
r1.abort_write_group() |
|
347 |
raise
|
|
348 |
else: |
|
349 |
r1.commit_write_group() |
|
350 |
r1._pack_collection.ensure_loaded() |
|
351 |
name_to_drop = r1._pack_collection.all_packs()[0].name |
|
352 |
finally: |
|
353 |
r1.unlock() |
|
354 |
r1.lock_write() |
|
355 |
try: |
|
356 |
# access enough data to load the names list
|
|
357 |
list(r1.all_revision_ids()) |
|
358 |
r2.lock_write() |
|
359 |
try: |
|
360 |
# access enough data to load the names list
|
|
361 |
list(r2.all_revision_ids()) |
|
362 |
r1._pack_collection.ensure_loaded() |
|
363 |
try: |
|
364 |
r2.start_write_group() |
|
365 |
try: |
|
366 |
# in r1, drop the pack
|
|
367 |
r1._pack_collection._remove_pack_from_memory( |
|
368 |
r1._pack_collection.get_pack_by_name(name_to_drop)) |
|
369 |
# in r2, add a pack
|
|
370 |
self._add_text(r2, 'fileidr2') |
|
371 |
except: |
|
372 |
r2.abort_write_group() |
|
373 |
raise
|
|
374 |
except: |
|
375 |
r1._pack_collection.reset() |
|
376 |
raise
|
|
377 |
# r1 has a changed names list, and r2 an open write groups with
|
|
378 |
# changes.
|
|
379 |
# save r1, and then commit the r2 write group, which requires a
|
|
380 |
# merge to the pack-names, which should not reinstate
|
|
381 |
# name_to_drop
|
|
382 |
try: |
|
383 |
r1._pack_collection._save_pack_names() |
|
384 |
r1._pack_collection.reset() |
|
385 |
except: |
|
386 |
r2.abort_write_group() |
|
387 |
raise
|
|
388 |
try: |
|
389 |
r2.commit_write_group() |
|
390 |
except: |
|
391 |
r2.abort_write_group() |
|
392 |
raise
|
|
393 |
# Now both repositories should now about just one name.
|
|
394 |
r1._pack_collection.ensure_loaded() |
|
395 |
r2._pack_collection.ensure_loaded() |
|
396 |
self.assertEqual(r1._pack_collection.names(), r2._pack_collection.names()) |
|
397 |
self.assertEqual(1, len(r1._pack_collection.names())) |
|
398 |
self.assertFalse(name_to_drop in r1._pack_collection.names()) |
|
399 |
finally: |
|
400 |
r2.unlock() |
|
401 |
finally: |
|
402 |
r1.unlock() |
|
403 |
||
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
404 |
def test_concurrent_pack_triggers_reload(self): |
405 |
# create 2 packs, which we will then collapse
|
|
406 |
tree = self.make_branch_and_tree('tree') |
|
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
407 |
tree.lock_write() |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
408 |
try: |
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
409 |
rev1 = tree.commit('one') |
410 |
rev2 = tree.commit('two') |
|
411 |
r2 = repository.Repository.open('tree') |
|
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
412 |
r2.lock_read() |
413 |
try: |
|
414 |
# Now r2 has read the pack-names file, but will need to reload
|
|
415 |
# it after r1 has repacked
|
|
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
416 |
tree.branch.repository.pack() |
417 |
self.assertEqual({rev2:(rev1,)}, r2.get_parent_map([rev2])) |
|
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
418 |
finally: |
419 |
r2.unlock() |
|
420 |
finally: |
|
|
3789.1.2
by John Arbash Meinel
Add RepositoryPackCollection.reload_pack_names() |
421 |
tree.unlock() |
|
3789.1.1
by John Arbash Meinel
add the failing acceptance test for the first portion. |
422 |
|
|
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
423 |
def test_concurrent_pack_during_get_record_reloads(self): |
424 |
tree = self.make_branch_and_tree('tree') |
|
425 |
tree.lock_write() |
|
426 |
try: |
|
427 |
rev1 = tree.commit('one') |
|
428 |
rev2 = tree.commit('two') |
|
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
429 |
keys = [(rev1,), (rev2,)] |
|
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
430 |
r2 = repository.Repository.open('tree') |
431 |
r2.lock_read() |
|
432 |
try: |
|
433 |
# At this point, we will start grabbing a record stream, and
|
|
434 |
# trigger a repack mid-way
|
|
435 |
packed = False |
|
436 |
result = {} |
|
437 |
record_stream = r2.revisions.get_record_stream(keys, |
|
438 |
'unordered', False) |
|
439 |
for record in record_stream: |
|
440 |
result[record.key] = record |
|
441 |
if not packed: |
|
442 |
tree.branch.repository.pack() |
|
443 |
packed = True |
|
444 |
# The first record will be found in the original location, but
|
|
445 |
# after the pack, we have to reload to find the next record
|
|
|
3789.2.14
by John Arbash Meinel
Update AggregateIndex to pass the reload_func into _DirectPackAccess |
446 |
self.assertEqual(sorted(keys), sorted(result.keys())) |
|
3789.2.8
by John Arbash Meinel
Add a test that KnitPackRepository.get_record_stream retries when appropriate. |
447 |
finally: |
448 |
r2.unlock() |
|
449 |
finally: |
|
450 |
tree.unlock() |
|
451 |
||
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
452 |
def test_lock_write_does_not_physically_lock(self): |
453 |
repo = self.make_repository('.', format=self.get_format()) |
|
454 |
repo.lock_write() |
|
455 |
self.addCleanup(repo.unlock) |
|
456 |
self.assertFalse(repo.get_physical_lock_status()) |
|
457 |
||
458 |
def prepare_for_break_lock(self): |
|
459 |
# Setup the global ui factory state so that a break-lock method call
|
|
460 |
# will find usable input in the input stream.
|
|
461 |
old_factory = ui.ui_factory |
|
462 |
def restoreFactory(): |
|
463 |
ui.ui_factory = old_factory |
|
464 |
self.addCleanup(restoreFactory) |
|
465 |
ui.ui_factory = ui.SilentUIFactory() |
|
466 |
ui.ui_factory.stdin = StringIO("y\n") |
|
467 |
||
468 |
def test_break_lock_breaks_physical_lock(self): |
|
469 |
repo = self.make_repository('.', format=self.get_format()) |
|
470 |
repo._pack_collection.lock_names() |
|
|
3650.4.1
by Aaron Bentley
Fix test kipple in test_break_lock_breaks_physical_lock |
471 |
repo.control_files.leave_in_place() |
472 |
repo.unlock() |
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
473 |
repo2 = repository.Repository.open('.') |
474 |
self.assertTrue(repo.get_physical_lock_status()) |
|
475 |
self.prepare_for_break_lock() |
|
476 |
repo2.break_lock() |
|
477 |
self.assertFalse(repo.get_physical_lock_status()) |
|
478 |
||
479 |
def test_broken_physical_locks_error_on__unlock_names_lock(self): |
|
480 |
repo = self.make_repository('.', format=self.get_format()) |
|
481 |
repo._pack_collection.lock_names() |
|
482 |
self.assertTrue(repo.get_physical_lock_status()) |
|
483 |
repo2 = repository.Repository.open('.') |
|
484 |
self.prepare_for_break_lock() |
|
485 |
repo2.break_lock() |
|
486 |
self.assertRaises(errors.LockBroken, repo._pack_collection._unlock_names) |
|
487 |
||
488 |
def test_fetch_without_find_ghosts_ignores_ghosts(self): |
|
489 |
# we want two repositories at this point:
|
|
490 |
# one with a revision that is a ghost in the other
|
|
491 |
# repository.
|
|
492 |
# 'ghost' is present in has_ghost, 'ghost' is absent in 'missing_ghost'.
|
|
493 |
# 'references' is present in both repositories, and 'tip' is present
|
|
494 |
# just in has_ghost.
|
|
495 |
# has_ghost missing_ghost
|
|
496 |
#------------------------------
|
|
497 |
# 'ghost' -
|
|
498 |
# 'references' 'references'
|
|
499 |
# 'tip' -
|
|
500 |
# In this test we fetch 'tip' which should not fetch 'ghost'
|
|
501 |
has_ghost = self.make_repository('has_ghost', format=self.get_format()) |
|
502 |
missing_ghost = self.make_repository('missing_ghost', |
|
503 |
format=self.get_format()) |
|
504 |
||
505 |
def add_commit(repo, revision_id, parent_ids): |
|
506 |
repo.lock_write() |
|
507 |
repo.start_write_group() |
|
508 |
inv = inventory.Inventory(revision_id=revision_id) |
|
509 |
inv.root.revision = revision_id |
|
510 |
root_id = inv.root.file_id |
|
511 |
sha1 = repo.add_inventory(revision_id, inv, []) |
|
512 |
repo.texts.add_lines((root_id, revision_id), [], []) |
|
513 |
rev = _mod_revision.Revision(timestamp=0, |
|
514 |
timezone=None, |
|
515 |
committer="Foo Bar <foo@example.com>", |
|
516 |
message="Message", |
|
517 |
inventory_sha1=sha1, |
|
518 |
revision_id=revision_id) |
|
519 |
rev.parent_ids = parent_ids |
|
520 |
repo.add_revision(revision_id, rev) |
|
521 |
repo.commit_write_group() |
|
522 |
repo.unlock() |
|
523 |
add_commit(has_ghost, 'ghost', []) |
|
524 |
add_commit(has_ghost, 'references', ['ghost']) |
|
525 |
add_commit(missing_ghost, 'references', ['ghost']) |
|
526 |
add_commit(has_ghost, 'tip', ['references']) |
|
527 |
missing_ghost.fetch(has_ghost, 'tip') |
|
528 |
# missing ghost now has tip and not ghost.
|
|
529 |
rev = missing_ghost.get_revision('tip') |
|
530 |
inv = missing_ghost.get_inventory('tip') |
|
531 |
self.assertRaises(errors.NoSuchRevision, |
|
532 |
missing_ghost.get_revision, 'ghost') |
|
533 |
self.assertRaises(errors.NoSuchRevision, |
|
534 |
missing_ghost.get_inventory, 'ghost') |
|
535 |
||
536 |
def test_supports_external_lookups(self): |
|
537 |
repo = self.make_repository('.', format=self.get_format()) |
|
538 |
self.assertEqual(self.format_supports_external_lookups, |
|
539 |
repo._format.supports_external_lookups) |
|
540 |
||
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
541 |
def test_abort_write_group_does_not_raise_when_suppressed(self): |
542 |
"""Similar to per_repository.test_write_group's test of the same name. |
|
543 |
||
544 |
Also requires that the exception is logged.
|
|
545 |
"""
|
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
546 |
self.vfs_transport_factory = memory.MemoryServer |
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
547 |
repo = self.make_repository('repo') |
548 |
token = repo.lock_write() |
|
549 |
self.addCleanup(repo.unlock) |
|
550 |
repo.start_write_group() |
|
551 |
# Damage the repository on the filesystem
|
|
552 |
self.get_transport('').rename('repo', 'foo') |
|
553 |
# abort_write_group will not raise an error
|
|
554 |
self.assertEqual(None, repo.abort_write_group(suppress_errors=True)) |
|
555 |
# But it does log an error
|
|
556 |
log_file = self._get_log(keep_log_file=True) |
|
557 |
self.assertContainsRe(log_file, 'abort_write_group failed') |
|
558 |
self.assertContainsRe(log_file, r'INFO bzr: ERROR \(ignored\):') |
|
559 |
if token is not None: |
|
560 |
repo.leave_lock_in_place() |
|
561 |
||
562 |
def test_abort_write_group_does_raise_when_not_suppressed(self): |
|
|
3825.4.2
by Andrew Bennetts
Run the abort_write_group tests against a memory transport to avoid platform-specific limits on changing files that may be in use. |
563 |
self.vfs_transport_factory = memory.MemoryServer |
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
564 |
repo = self.make_repository('repo') |
565 |
token = repo.lock_write() |
|
566 |
self.addCleanup(repo.unlock) |
|
567 |
repo.start_write_group() |
|
568 |
# Damage the repository on the filesystem
|
|
569 |
self.get_transport('').rename('repo', 'foo') |
|
570 |
# abort_write_group will not raise an error
|
|
571 |
self.assertRaises(Exception, repo.abort_write_group) |
|
572 |
if token is not None: |
|
573 |
repo.leave_lock_in_place() |
|
574 |
||
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
575 |
|
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
576 |
class TestPackRepositoryStacking(TestCaseWithTransport): |
577 |
||
578 |
"""Tests for stacking pack repositories""" |
|
579 |
||
580 |
def setUp(self): |
|
581 |
if not self.format_supports_external_lookups: |
|
582 |
raise TestNotApplicable("%r doesn't support stacking" |
|
583 |
% (self.format_name,)) |
|
584 |
super(TestPackRepositoryStacking, self).setUp() |
|
585 |
||
586 |
def get_format(self): |
|
587 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
588 |
||
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
589 |
def test_stack_checks_rich_root_compatibility(self): |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
590 |
# early versions of the packing code relied on pack internals to
|
591 |
# stack, but the current version should be able to stack on any
|
|
592 |
# format.
|
|
593 |
#
|
|
594 |
# TODO: Possibly this should be run per-repository-format and raise
|
|
595 |
# TestNotApplicable on formats that don't support stacking. -- mbp
|
|
596 |
# 20080729
|
|
597 |
repo = self.make_repository('repo', format=self.get_format()) |
|
598 |
if repo.supports_rich_root(): |
|
599 |
# can only stack on repositories that have compatible internal
|
|
600 |
# metadata
|
|
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
601 |
if getattr(repo._format, 'supports_tree_reference', False): |
602 |
matching_format_name = 'pack-0.92-subtree' |
|
603 |
else: |
|
604 |
matching_format_name = 'rich-root-pack' |
|
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
605 |
mismatching_format_name = 'pack-0.92' |
606 |
else: |
|
607 |
matching_format_name = 'pack-0.92' |
|
608 |
mismatching_format_name = 'pack-0.92-subtree' |
|
609 |
base = self.make_repository('base', format=matching_format_name) |
|
610 |
repo.add_fallback_repository(base) |
|
611 |
# you can't stack on something with incompatible data
|
|
612 |
bad_repo = self.make_repository('mismatch', |
|
613 |
format=mismatching_format_name) |
|
614 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
615 |
repo.add_fallback_repository, bad_repo) |
|
616 |
self.assertContainsRe(str(e), |
|
617 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
618 |
r'KnitPackRepository.*/repo/.*\n' |
|
619 |
r'different rich-root support') |
|
620 |
||
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
621 |
def test_stack_checks_serializers_compatibility(self): |
622 |
repo = self.make_repository('repo', format=self.get_format()) |
|
623 |
if getattr(repo._format, 'supports_tree_reference', False): |
|
624 |
# can only stack on repositories that have compatible internal
|
|
625 |
# metadata
|
|
626 |
matching_format_name = 'pack-0.92-subtree' |
|
627 |
mismatching_format_name = 'rich-root-pack' |
|
628 |
else: |
|
629 |
if repo.supports_rich_root(): |
|
630 |
matching_format_name = 'rich-root-pack' |
|
631 |
mismatching_format_name = 'pack-0.92-subtree' |
|
632 |
else: |
|
633 |
raise TestNotApplicable('No formats use non-v5 serializer' |
|
634 |
' without having rich-root also set') |
|
635 |
base = self.make_repository('base', format=matching_format_name) |
|
636 |
repo.add_fallback_repository(base) |
|
637 |
# you can't stack on something with incompatible data
|
|
638 |
bad_repo = self.make_repository('mismatch', |
|
639 |
format=mismatching_format_name) |
|
640 |
e = self.assertRaises(errors.IncompatibleRepositories, |
|
641 |
repo.add_fallback_repository, bad_repo) |
|
642 |
self.assertContainsRe(str(e), |
|
643 |
r'(?m)KnitPackRepository.*/mismatch/.*\nis not compatible with\n' |
|
644 |
r'KnitPackRepository.*/repo/.*\n' |
|
645 |
r'different serializers') |
|
646 |
||
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
647 |
def test_adding_pack_does_not_record_pack_names_from_other_repositories(self): |
648 |
base = self.make_branch_and_tree('base', format=self.get_format()) |
|
649 |
base.commit('foo') |
|
650 |
referencing = self.make_branch_and_tree('repo', format=self.get_format()) |
|
651 |
referencing.branch.repository.add_fallback_repository(base.branch.repository) |
|
652 |
referencing.commit('bar') |
|
653 |
new_instance = referencing.bzrdir.open_repository() |
|
654 |
new_instance.lock_read() |
|
655 |
self.addCleanup(new_instance.unlock) |
|
656 |
new_instance._pack_collection.ensure_loaded() |
|
657 |
self.assertEqual(1, len(new_instance._pack_collection.all_packs())) |
|
658 |
||
659 |
def test_autopack_only_considers_main_repo_packs(self): |
|
660 |
base = self.make_branch_and_tree('base', format=self.get_format()) |
|
661 |
base.commit('foo') |
|
662 |
tree = self.make_branch_and_tree('repo', format=self.get_format()) |
|
663 |
tree.branch.repository.add_fallback_repository(base.branch.repository) |
|
664 |
trans = tree.branch.repository.bzrdir.get_repository_transport(None) |
|
665 |
# This test could be a little cheaper by replacing the packs
|
|
666 |
# attribute on the repository to allow a different pack distribution
|
|
667 |
# and max packs policy - so we are checking the policy is honoured
|
|
668 |
# in the test. But for now 11 commits is not a big deal in a single
|
|
669 |
# test.
|
|
670 |
for x in range(9): |
|
671 |
tree.commit('commit %s' % x) |
|
672 |
# there should be 9 packs:
|
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
673 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
674 |
self.assertEqual(9, len(list(index.iter_all_entries()))) |
675 |
# committing one more should coalesce to 1 of 10.
|
|
676 |
tree.commit('commit triggering pack') |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
677 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
678 |
self.assertEqual(1, len(list(index.iter_all_entries()))) |
679 |
# packing should not damage data
|
|
680 |
tree = tree.bzrdir.open_workingtree() |
|
681 |
check_result = tree.branch.repository.check( |
|
682 |
[tree.branch.last_revision()]) |
|
683 |
# We should have 50 (10x5) files in the obsolete_packs directory.
|
|
684 |
obsolete_files = list(trans.list_dir('obsolete_packs')) |
|
685 |
self.assertFalse('foo' in obsolete_files) |
|
686 |
self.assertFalse('bar' in obsolete_files) |
|
687 |
self.assertEqual(50, len(obsolete_files)) |
|
688 |
# XXX: Todo check packs obsoleted correctly - old packs and indices
|
|
689 |
# in the obsolete_packs directory.
|
|
690 |
large_pack_name = list(index.iter_all_entries())[0][1][0] |
|
691 |
# finally, committing again should not touch the large pack.
|
|
692 |
tree.commit('commit not triggering pack') |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
693 |
index = self.index_class(trans, 'pack-names', None) |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
694 |
self.assertEqual(2, len(list(index.iter_all_entries()))) |
695 |
pack_names = [node[1][0] for node in index.iter_all_entries()] |
|
696 |
self.assertTrue(large_pack_name in pack_names) |
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
697 |
|
698 |
||
|
3801.1.18
by Andrew Bennetts
Add a test that ensures that the autopack RPC is actually used for all pack formats. |
699 |
class TestSmartServerAutopack(TestCaseWithTransport): |
700 |
||
701 |
def setUp(self): |
|
702 |
super(TestSmartServerAutopack, self).setUp() |
|
703 |
# Create a smart server that publishes whatever the backing VFS server
|
|
704 |
# does.
|
|
705 |
self.smart_server = server.SmartTCPServer_for_testing() |
|
706 |
self.smart_server.setUp(self.get_server()) |
|
707 |
self.addCleanup(self.smart_server.tearDown) |
|
708 |
# Log all HPSS calls into self.hpss_calls.
|
|
709 |
client._SmartClient.hooks.install_named_hook( |
|
710 |
'call', self.capture_hpss_call, None) |
|
711 |
self.hpss_calls = [] |
|
712 |
||
713 |
def capture_hpss_call(self, params): |
|
714 |
self.hpss_calls.append(params.method) |
|
715 |
||
716 |
def get_format(self): |
|
717 |
return bzrdir.format_registry.make_bzrdir(self.format_name) |
|
718 |
||
719 |
def test_autopack_rpc_is_used_when_using_hpss(self): |
|
720 |
# Make local and remote repos
|
|
721 |
tree = self.make_branch_and_tree('local', format=self.get_format()) |
|
722 |
self.make_branch_and_tree('remote', format=self.get_format()) |
|
723 |
remote_branch_url = self.smart_server.get_url() + 'remote' |
|
724 |
remote_branch = bzrdir.BzrDir.open(remote_branch_url).open_branch() |
|
725 |
# Make 9 local revisions, and push them one at a time to the remote
|
|
726 |
# repo to produce 9 pack files.
|
|
727 |
for x in range(9): |
|
728 |
tree.commit('commit %s' % x) |
|
729 |
tree.branch.push(remote_branch) |
|
730 |
# Make one more push to trigger an autopack
|
|
731 |
self.hpss_calls = [] |
|
732 |
tree.commit('commit triggering pack') |
|
733 |
tree.branch.push(remote_branch) |
|
734 |
self.assertTrue('PackRepository.autopack' in self.hpss_calls) |
|
735 |
||
736 |
||
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
737 |
def load_tests(basic_tests, module, test_loader): |
|
3582.3.3
by Martin Pool
Reenable tests for stacking pack repositories |
738 |
# these give the bzrdir canned format name, and the repository on-disk
|
739 |
# format string
|
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
740 |
scenarios_params = [ |
741 |
dict(format_name='pack-0.92', |
|
742 |
format_string="Bazaar pack repository format 1 (needs bzr 0.92)\n", |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
743 |
format_supports_external_lookups=False, |
744 |
index_class=GraphIndex), |
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
745 |
dict(format_name='pack-0.92-subtree', |
746 |
format_string="Bazaar pack repository format 1 " |
|
747 |
"with subtree support (needs bzr 0.92)\n", |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
748 |
format_supports_external_lookups=False, |
749 |
index_class=GraphIndex), |
|
|
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
750 |
dict(format_name='1.6', |
751 |
format_string="Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n", |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
752 |
format_supports_external_lookups=True, |
753 |
index_class=GraphIndex), |
|
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
754 |
dict(format_name='1.6.1-rich-root', |
|
3582.3.2
by Martin Pool
Add 1.6 formats to pack repository tests |
755 |
format_string="Bazaar RepositoryFormatKnitPack5RichRoot " |
|
3606.10.5
by John Arbash Meinel
Switch out --1.6-rich-root for --1.6.1-rich-root. |
756 |
"(bzr 1.6.1)\n", |
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
757 |
format_supports_external_lookups=True, |
758 |
index_class=GraphIndex), |
|
|
3805.3.1
by John Arbash Meinel
Add repository 1.9 format, and update the documentation. |
759 |
dict(format_name='1.9', |
760 |
format_string="Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n", |
|
761 |
format_supports_external_lookups=True, |
|
762 |
index_class=BTreeGraphIndex), |
|
763 |
dict(format_name='1.9-rich-root', |
|
764 |
format_string="Bazaar RepositoryFormatKnitPack6RichRoot " |
|
765 |
"(bzr 1.9)\n", |
|
766 |
format_supports_external_lookups=True, |
|
767 |
index_class=BTreeGraphIndex), |
|
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
768 |
dict(format_name='development2', |
769 |
format_string="Bazaar development format 2 " |
|
770 |
"(needs bzr.dev from before 1.8)\n", |
|
771 |
format_supports_external_lookups=True, |
|
772 |
index_class=BTreeGraphIndex), |
|
773 |
dict(format_name='development2-subtree', |
|
774 |
format_string="Bazaar development format 2 " |
|
775 |
"with subtree support (needs bzr.dev from before 1.8)\n", |
|
776 |
format_supports_external_lookups=True, |
|
777 |
index_class=BTreeGraphIndex), |
|
|
3582.3.1
by Martin Pool
Split pack repository tests into their own file and use scenarios |
778 |
]
|
779 |
adapter = tests.TestScenarioApplier() |
|
780 |
# name of the scenario is the format name
|
|
781 |
adapter.scenarios = [(s['format_name'], s) for s in scenarios_params] |
|
782 |
suite = tests.TestSuite() |
|
783 |
tests.adapt_tests(basic_tests, adapter, suite) |
|
784 |
return suite |