154
141
texts/deltas (via (fileid, revisionid) tuples).
155
142
:param signature_index: A GraphIndex for determining what signatures are
156
143
present in the Pack and accessing the locations of their texts.
157
:param chk_index: A GraphIndex for accessing content by CHK, if the
160
145
self.revision_index = revision_index
161
146
self.inventory_index = inventory_index
162
147
self.text_index = text_index
163
148
self.signature_index = signature_index
164
self.chk_index = chk_index
166
150
def access_tuple(self):
167
151
"""Return a tuple (transport, name) for the pack content."""
168
152
return self.pack_transport, self.file_name()
170
def _check_references(self):
171
"""Make sure our external references are present.
173
Packs are allowed to have deltas whose base is not in the pack, but it
174
must be present somewhere in this collection. It is not allowed to
175
have deltas based on a fallback repository.
176
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
179
for (index_name, external_refs, index) in [
181
self._get_external_refs(self.text_index),
182
self._pack_collection.text_index.combined_index),
184
self._get_external_refs(self.inventory_index),
185
self._pack_collection.inventory_index.combined_index),
187
missing = external_refs.difference(
188
k for (idx, k, v, r) in
189
index.iter_entries(external_refs))
191
missing_items[index_name] = sorted(list(missing))
193
from pprint import pformat
194
raise errors.BzrCheckError(
195
"Newly created pack file %r has delta references to "
196
"items not in its repository:\n%s"
197
% (self, pformat(missing_items)))
199
154
def file_name(self):
200
155
"""Get the file name for the pack on disk."""
201
156
return self.name + '.pack'
227
174
"""The text index is the name + .tix."""
228
175
return self.index_name('text', name)
230
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
setattr(self, index_type + '_index',
235
self.index_class(self.index_transport,
236
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
241
178
class ExistingPack(Pack):
242
179
"""An in memory proxy for an existing .pack and its disk indices."""
244
181
def __init__(self, pack_transport, name, revision_index, inventory_index,
245
text_index, signature_index, chk_index=None):
182
text_index, signature_index):
246
183
"""Create an ExistingPack object.
248
185
:param pack_transport: The transport where the pack file resides.
249
186
:param name: The name of the pack on disk in the pack_transport.
251
188
Pack.__init__(self, revision_index, inventory_index, text_index,
252
signature_index, chk_index)
254
191
self.pack_transport = pack_transport
255
192
if None in (revision_index, inventory_index, text_index,
263
200
return not self.__eq__(other)
265
202
def __repr__(self):
266
return "<%s.%s object at 0x%x, %s, %s" % (
267
self.__class__.__module__, self.__class__.__name__, id(self),
268
self.pack_transport, self.name)
271
class ResumedPack(ExistingPack):
273
def __init__(self, name, revision_index, inventory_index, text_index,
274
signature_index, upload_transport, pack_transport, index_transport,
275
pack_collection, chk_index=None):
276
"""Create a ResumedPack object."""
277
ExistingPack.__init__(self, pack_transport, name, revision_index,
278
inventory_index, text_index, signature_index,
280
self.upload_transport = upload_transport
281
self.index_transport = index_transport
282
self.index_sizes = [None, None, None, None]
284
('revision', revision_index),
285
('inventory', inventory_index),
286
('text', text_index),
287
('signature', signature_index),
289
if chk_index is not None:
290
indices.append(('chk', chk_index))
291
self.index_sizes.append(None)
292
for index_type, index in indices:
293
offset = self.index_offset(index_type)
294
self.index_sizes[offset] = index._size
295
self.index_class = pack_collection._index_class
296
self._pack_collection = pack_collection
297
self._state = 'resumed'
298
# XXX: perhaps check that the .pack file exists?
300
def access_tuple(self):
301
if self._state == 'finished':
302
return Pack.access_tuple(self)
303
elif self._state == 'resumed':
304
return self.upload_transport, self.file_name()
306
raise AssertionError(self._state)
309
self.upload_transport.delete(self.file_name())
310
indices = [self.revision_index, self.inventory_index, self.text_index,
311
self.signature_index]
312
if self.chk_index is not None:
313
indices.append(self.chk_index)
314
for index in indices:
315
index._transport.delete(index._name)
318
self._check_references()
319
index_types = ['revision', 'inventory', 'text', 'signature']
320
if self.chk_index is not None:
321
index_types.append('chk')
322
for index_type in index_types:
323
old_name = self.index_name(index_type, self.name)
324
new_name = '../indices/' + old_name
325
self.upload_transport.rename(old_name, new_name)
326
self._replace_index_with_readonly(index_type)
327
new_name = '../packs/' + self.file_name()
328
self.upload_transport.rename(self.file_name(), new_name)
329
self._state = 'finished'
331
def _get_external_refs(self, index):
332
"""Return compression parents for this index that are not present.
334
This returns any compression parents that are referenced by this index,
335
which are not contained *in* this index. They may be present elsewhere.
337
return index.external_references(1)
203
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
204
id(self), self.pack_transport, self.name)
340
207
class NewPack(Pack):
341
208
"""An in memory proxy for a pack which is being created."""
210
# A map of index 'type' to the file extension and position in the
212
index_definitions = {
213
'revision': ('.rix', 0),
214
'inventory': ('.iix', 1),
216
'signature': ('.six', 3),
343
219
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
344
220
"""Create a NewPack instance.
451
319
raise AssertionError(self._state)
321
def _check_references(self):
322
"""Make sure our external references are present.
324
Packs are allowed to have deltas whose base is not in the pack, but it
325
must be present somewhere in this collection. It is not allowed to
326
have deltas based on a fallback repository.
327
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
330
for (index_name, external_refs, index) in [
332
self.text_index._external_references(),
333
self._pack_collection.text_index.combined_index),
335
self.inventory_index._external_references(),
336
self._pack_collection.inventory_index.combined_index),
338
missing = external_refs.difference(
339
k for (idx, k, v, r) in
340
index.iter_entries(external_refs))
342
missing_items[index_name] = sorted(list(missing))
344
from pprint import pformat
345
raise errors.BzrCheckError(
346
"Newly created pack file %r has delta references to "
347
"items not in its repository:\n%s"
348
% (self, pformat(missing_items)))
453
350
def data_inserted(self):
454
351
"""True if data has been added to this pack."""
455
352
return bool(self.get_revision_count() or
456
353
self.inventory_index.key_count() or
457
354
self.text_index.key_count() or
458
self.signature_index.key_count() or
459
(self.chk_index is not None and self.chk_index.key_count()))
461
def finish_content(self):
462
if self.name is not None:
466
self._write_data('', flush=True)
467
self.name = self._hash.hexdigest()
469
def finish(self, suspend=False):
355
self.signature_index.key_count())
470
358
"""Finish the new pack.
477
365
- stores the index size tuple for the pack in the index_sizes
480
self.finish_content()
482
self._check_references()
370
self._write_data('', flush=True)
371
self.name = self._hash.hexdigest()
372
self._check_references()
484
374
# XXX: It'd be better to write them all to temporary names, then
485
375
# rename them all into place, so that the window when only some are
486
376
# visible is smaller. On the other hand none will be seen until
487
377
# they're in the names list.
488
378
self.index_sizes = [None, None, None, None]
489
self._write_index('revision', self.revision_index, 'revision', suspend)
490
self._write_index('inventory', self.inventory_index, 'inventory',
492
self._write_index('text', self.text_index, 'file texts', suspend)
379
self._write_index('revision', self.revision_index, 'revision')
380
self._write_index('inventory', self.inventory_index, 'inventory')
381
self._write_index('text', self.text_index, 'file texts')
493
382
self._write_index('signature', self.signature_index,
494
'revision signatures', suspend)
495
if self.chk_index is not None:
496
self.index_sizes.append(None)
497
self._write_index('chk', self.chk_index,
498
'content hash bytes', suspend)
383
'revision signatures')
499
384
self.write_stream.close()
500
385
# Note that this will clobber an existing pack with the same name,
501
386
# without checking for hash collisions. While this is undesirable this
508
393
# - try for HASH.pack
509
394
# - try for temporary-name
510
395
# - refresh the pack-list to see if the pack is now absent
511
new_name = self.name + '.pack'
513
new_name = '../packs/' + new_name
514
self.upload_transport.rename(self.random_name, new_name)
396
self.upload_transport.rename(self.random_name,
397
'../packs/' + self.name + '.pack')
515
398
self._state = 'finished'
516
399
if 'pack' in debug.debug_flags:
517
400
# XXX: size might be interesting?
518
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
401
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
519
402
time.ctime(), self.upload_transport.base, self.random_name,
520
new_name, time.time() - self.start_time)
403
self.pack_transport, self.name,
404
time.time() - self.start_time)
523
407
"""Flush any current data."""
527
411
self._hash.update(bytes)
528
412
self._buffer[:] = [[], 0]
530
def _get_external_refs(self, index):
531
return index._external_references()
414
def index_name(self, index_type, name):
415
"""Get the disk name of an index type for pack name 'name'."""
416
return name + NewPack.index_definitions[index_type][0]
418
def index_offset(self, index_type):
419
"""Get the position in a index_size array for a given index type."""
420
return NewPack.index_definitions[index_type][1]
422
def _replace_index_with_readonly(self, index_type):
423
setattr(self, index_type + '_index',
424
self.index_class(self.index_transport,
425
self.index_name(index_type, self.name),
426
self.index_sizes[self.index_offset(index_type)]))
533
428
def set_write_cache_size(self, size):
534
429
self._cache_limit = size
536
def _write_index(self, index_type, index, label, suspend=False):
431
def _write_index(self, index_type, index, label):
537
432
"""Write out an index.
539
434
:param index_type: The type of index to write - e.g. 'revision'.
541
436
:param label: What label to give the index e.g. 'revision'.
543
438
index_name = self.index_name(index_type, self.name)
545
transport = self.upload_transport
547
transport = self.index_transport
548
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
549
index_name, index.finish(), mode=self._file_mode)
439
self.index_sizes[self.index_offset(index_type)] = \
440
self.index_transport.put_file(index_name, index.finish(),
441
mode=self._file_mode)
550
442
if 'pack' in debug.debug_flags:
551
443
# XXX: size might be interesting?
552
444
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
553
445
time.ctime(), label, self.upload_transport.base,
554
446
self.random_name, time.time() - self.start_time)
555
# Replace the writable index on this object with a readonly,
447
# Replace the writable index on this object with a readonly,
556
448
# presently unloaded index. We should alter
557
449
# the index layer to make its finish() error if add_node is
558
450
# subsequently used. RBC
585
477
self.index_to_pack = {}
586
478
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
587
479
self.data_access = _DirectPackAccess(self.index_to_pack,
588
reload_func=reload_func,
589
flush_func=flush_func)
480
reload_func=reload_func)
481
self.add_callback = None
483
def replace_indices(self, index_to_pack, indices):
484
"""Replace the current mappings with fresh ones.
486
This should probably not be used eventually, rather incremental add and
487
removal of indices. It has been added during refactoring of existing
490
:param index_to_pack: A mapping from index objects to
491
(transport, name) tuples for the pack file data.
492
:param indices: A list of indices.
494
# refresh the revision pack map dict without replacing the instance.
495
self.index_to_pack.clear()
496
self.index_to_pack.update(index_to_pack)
497
# XXX: API break - clearly a 'replace' method would be good?
498
self.combined_index._indices[:] = indices
499
# the current add nodes callback for the current writable index if
590
501
self.add_callback = None
592
503
def add_index(self, index, pack):
595
506
Future searches on the aggregate index will seach this new index
596
507
before all previously inserted indices.
598
509
:param index: An Index for the pack.
599
510
:param pack: A Pack instance.
601
512
# expose it to the index map
602
513
self.index_to_pack[index] = pack.access_tuple()
603
514
# put it at the front of the linear index list
604
self.combined_index.insert_index(0, index, pack.name)
515
self.combined_index.insert_index(0, index)
606
517
def add_writable_index(self, index, pack):
607
518
"""Add an index which is able to have data added to it.
609
520
There can be at most one writable index at any time. Any
610
521
modifications made to the knit are put into this index.
612
523
:param index: An index from the pack parameter.
613
524
:param pack: A Pack instance.
627
538
self.data_access.set_writer(None, None, (None, None))
628
539
self.index_to_pack.clear()
629
540
del self.combined_index._indices[:]
630
del self.combined_index._index_names[:]
631
541
self.add_callback = None
633
def remove_index(self, index):
543
def remove_index(self, index, pack):
634
544
"""Remove index from the indices used to answer queries.
636
546
:param index: An index from the pack parameter.
547
:param pack: A Pack instance.
638
549
del self.index_to_pack[index]
639
pos = self.combined_index._indices.index(index)
640
del self.combined_index._indices[pos]
641
del self.combined_index._index_names[pos]
550
self.combined_index._indices.remove(index)
642
551
if (self.add_callback is not None and
643
552
getattr(index, 'add_nodes', None) == self.add_callback):
644
553
self.add_callback = None
751
660
def open_pack(self):
752
661
"""Open a pack for the pack we are creating."""
753
new_pack = self._pack_collection.pack_factory(self._pack_collection,
754
upload_suffix=self.suffix,
662
return NewPack(self._pack_collection, upload_suffix=self.suffix,
755
663
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
756
# We know that we will process all nodes in order, and don't need to
757
# query, so don't combine any indices spilled to disk until we are done
758
new_pack.revision_index.set_optimize(combine_backing_indices=False)
759
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
760
new_pack.text_index.set_optimize(combine_backing_indices=False)
761
new_pack.signature_index.set_optimize(combine_backing_indices=False)
764
665
def _update_pack_order(self, entries, index_to_pack_map):
765
666
"""Determine how we want our packs to be ordered.
922
822
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
923
823
new_pack.signature_index.key_count(),
924
824
time.time() - new_pack.start_time)
926
# NB XXX: how to check CHK references are present? perhaps by yielding
927
# the items? How should that interact with stacked repos?
928
if new_pack.chk_index is not None:
930
if 'pack' in debug.debug_flags:
931
mutter('%s: create_pack: chk content copied: %s%s %d items t+%6.3fs',
932
time.ctime(), self._pack_collection._upload_transport.base,
933
new_pack.random_name,
934
new_pack.chk_index.key_count(),
935
time.time() - new_pack.start_time)
936
825
new_pack._check_references()
937
826
if not self._use_pack(new_pack):
942
831
self._pack_collection.allocate(new_pack)
945
def _copy_chks(self, refs=None):
946
# XXX: Todo, recursive follow-pointers facility when fetching some
948
chk_index_map, chk_indices = self._pack_map_and_index_list(
950
chk_nodes = self._index_contents(chk_indices, refs)
952
# TODO: This isn't strictly tasteful as we are accessing some private
953
# variables (_serializer). Perhaps a better way would be to have
954
# Repository._deserialise_chk_node()
955
search_key_func = chk_map.search_key_registry.get(
956
self._pack_collection.repo._serializer.search_key_name)
957
def accumlate_refs(lines):
958
# XXX: move to a generic location
960
bytes = ''.join(lines)
961
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
962
new_refs.update(node.refs())
963
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
964
self.new_pack.chk_index, output_lines=accumlate_refs)
967
def _copy_nodes(self, nodes, index_map, writer, write_index,
969
"""Copy knit nodes between packs with no graph references.
971
:param output_lines: Output full texts of copied items.
834
def _copy_nodes(self, nodes, index_map, writer, write_index):
835
"""Copy knit nodes between packs with no graph references."""
973
836
pb = ui.ui_factory.nested_progress_bar()
975
838
return self._do_copy_nodes(nodes, index_map, writer,
976
write_index, pb, output_lines=output_lines)
980
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
843
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
982
844
# for record verification
983
845
knit = KnitVersionedFiles(None, None)
984
846
# plan a readv on each source pack:
1355
1214
class RepositoryPackCollection(object):
1356
1215
"""Management of packs within a repository.
1358
1217
:ivar _names: map of {pack_name: (index_size,)}
1361
pack_factory = NewPack
1362
resumed_pack_factory = ResumedPack
1364
1220
def __init__(self, repo, transport, index_transport, upload_transport,
1365
pack_transport, index_builder_class, index_class,
1221
pack_transport, index_builder_class, index_class):
1367
1222
"""Create a new RepositoryPackCollection.
1369
:param transport: Addresses the repository base directory
1224
:param transport: Addresses the repository base directory
1370
1225
(typically .bzr/repository/).
1371
1226
:param index_transport: Addresses the directory containing indices.
1372
1227
:param upload_transport: Addresses the directory into which packs are written
1384
1237
self._pack_transport = pack_transport
1385
1238
self._index_builder_class = index_builder_class
1386
1239
self._index_class = index_class
1387
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1240
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1389
1241
self.packs = []
1390
1242
# name:Pack mapping
1392
1243
self._packs_by_name = {}
1393
1244
# the previous pack-names content
1394
1245
self._packs_at_load = None
1395
1246
# when a pack is being created by this object, the state of that pack.
1396
1247
self._new_pack = None
1397
1248
# aggregated revision index data
1398
flush = self._flush_new_pack
1399
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1406
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
# used to determine if we're using a chk_index elsewhere.
1410
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1418
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1249
self.revision_index = AggregateIndex(self.reload_pack_names)
1250
self.inventory_index = AggregateIndex(self.reload_pack_names)
1251
self.text_index = AggregateIndex(self.reload_pack_names)
1252
self.signature_index = AggregateIndex(self.reload_pack_names)
1423
1254
def add_pack_to_memory(self, pack):
1424
1255
"""Make a Pack object available to the repository to satisfy queries.
1426
1257
:param pack: A Pack object.
1428
1259
if pack.name in self._packs_by_name:
1429
raise AssertionError(
1430
'pack %s already in _packs_by_name' % (pack.name,))
1260
raise AssertionError()
1431
1261
self.packs.append(pack)
1432
1262
self._packs_by_name[pack.name] = pack
1433
1263
self.revision_index.add_index(pack.revision_index, pack)
1434
1264
self.inventory_index.add_index(pack.inventory_index, pack)
1435
1265
self.text_index.add_index(pack.text_index, pack)
1436
1266
self.signature_index.add_index(pack.signature_index, pack)
1437
if self.chk_index is not None:
1438
self.chk_index.add_index(pack.chk_index, pack)
1440
1268
def all_packs(self):
1441
1269
"""Return a list of all the Pack objects this repository has.
1539
1368
self._remove_pack_from_memory(pack)
1540
1369
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1549
def _flush_new_pack(self):
1550
if self._new_pack is not None:
1551
self._new_pack.flush()
1371
self._save_pack_names(clear_obsolete_packs=True)
1372
# Move the old packs out of the way now they are no longer referenced.
1373
for revision_count, packs in pack_operations:
1374
self._obsolete_packs(packs)
1553
1376
def lock_names(self):
1554
1377
"""Acquire the mutex around the pack-names index.
1556
1379
This cannot be used in the middle of a read-only transaction on the
1559
1382
self.repo.control_files.lock_write()
1561
def _already_packed(self):
1562
"""Is the collection already packed?"""
1563
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1566
1385
"""Pack the pack collection totally."""
1567
1386
self.ensure_loaded()
1568
1387
total_packs = len(self._names)
1569
if self._already_packed():
1389
# This is arguably wrong because we might not be optimal, but for
1390
# now lets leave it in. (e.g. reconcile -> one pack. But not
1571
1393
total_revisions = self.revision_index.combined_index.key_count()
1572
1394
# XXX: the following may want to be a class, to pack with a given
1574
1396
mutter('Packing repository %s, which has %d pack files, '
1575
'containing %d revisions with hint %r.', self, total_packs,
1576
total_revisions, hint)
1397
'containing %d revisions into 1 packs.', self, total_packs,
1577
1399
# determine which packs need changing
1400
pack_distribution = [1]
1578
1401
pack_operations = [[0, []]]
1579
1402
for pack in self.all_packs():
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1583
pack_operations[-1][0] += pack.get_revision_count()
1584
pack_operations[-1][1].append(pack)
1403
pack_operations[-1][0] += pack.get_revision_count()
1404
pack_operations[-1][1].append(pack)
1585
1405
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1407
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1408
"""Plan a pack operation.
1679
1488
inv_index = self._make_index(name, '.iix')
1680
1489
txt_index = self._make_index(name, '.tix')
1681
1490
sig_index = self._make_index(name, '.six')
1682
if self.chk_index is not None:
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1686
1491
result = ExistingPack(self._pack_transport, name, rev_index,
1687
inv_index, txt_index, sig_index, chk_index)
1492
inv_index, txt_index, sig_index)
1688
1493
self.add_pack_to_memory(result)
1691
def _resume_pack(self, name):
1692
"""Get a suspended Pack object by name.
1694
:param name: The name of the pack - e.g. '123456'
1695
:return: A Pack object.
1697
if not re.match('[a-f0-9]{32}', name):
1698
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1700
raise errors.UnresumableWriteGroup(
1701
self.repo, [name], 'Malformed write group token')
1703
rev_index = self._make_index(name, '.rix', resume=True)
1704
inv_index = self._make_index(name, '.iix', resume=True)
1705
txt_index = self._make_index(name, '.tix', resume=True)
1706
sig_index = self._make_index(name, '.six', resume=True)
1707
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1712
result = self.resumed_pack_factory(name, rev_index, inv_index,
1713
txt_index, sig_index, self._upload_transport,
1714
self._pack_transport, self._index_transport, self,
1715
chk_index=chk_index)
1716
except errors.NoSuchFile, e:
1717
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1718
self.add_pack_to_memory(result)
1719
self._resumed_packs.append(result)
1722
1496
def allocate(self, a_new_pack):
1723
1497
"""Allocate name in the list of packs.
1742
1516
return self._index_class(self.transport, 'pack-names', None
1743
1517
).iter_all_entries()
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1519
def _make_index(self, name, suffix):
1746
1520
size_offset = self._suffix_offsets[suffix]
1747
1521
index_name = name + suffix
1749
transport = self._upload_transport
1750
index_size = transport.stat(index_name).st_size
1752
transport = self._index_transport
1753
index_size = self._names[name][size_offset]
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1522
index_size = self._names[name][size_offset]
1523
return self._index_class(
1524
self._index_transport, index_name, index_size)
1757
1526
def _max_pack_count(self, total_revisions):
1758
1527
"""Return the maximum number of packs to use for total revisions.
1760
1529
:param total_revisions: The total number of revisions in the
1786
1555
:param return: None.
1788
1557
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1558
pack.pack_transport.rename(pack.file_name(),
1559
'../obsolete_packs/' + pack.file_name())
1796
1560
# TODO: Probably needs to know all possible indices for this pack
1797
1561
# - or maybe list the directory and move all indices matching this
1798
1562
# name whether we recognize it or not?
1799
suffixes = ['.iix', '.six', '.tix', '.rix']
1800
if self.chk_index is not None:
1801
suffixes.append('.cix')
1802
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1563
for suffix in ('.iix', '.six', '.tix', '.rix'):
1564
self._index_transport.rename(pack.name + suffix,
1565
'../obsolete_packs/' + pack.name + suffix)
1810
1567
def pack_distribution(self, total_revisions):
1811
1568
"""Generate a list of the number of revisions to put in each pack.
1837
1594
self._remove_pack_indices(pack)
1838
1595
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1597
def _remove_pack_indices(self, pack):
1598
"""Remove the indices for pack from the aggregated indices."""
1599
self.revision_index.remove_index(pack.revision_index, pack)
1600
self.inventory_index.remove_index(pack.inventory_index, pack)
1601
self.text_index.remove_index(pack.text_index, pack)
1602
self.signature_index.remove_index(pack.signature_index, pack)
1857
1604
def reset(self):
1858
1605
"""Clear all cached data."""
1859
1606
# cached revision data
1607
self.repo._revision_knit = None
1860
1608
self.revision_index.clear()
1861
1609
# cached signature data
1610
self.repo._signature_knit = None
1862
1611
self.signature_index.clear()
1863
1612
# cached file text data
1864
1613
self.text_index.clear()
1614
self.repo._text_knit = None
1865
1615
# cached inventory data
1866
1616
self.inventory_index.clear()
1868
if self.chk_index is not None:
1869
self.chk_index.clear()
1870
1617
# remove the open pack
1871
1618
self._new_pack = None
1872
1619
# information about packs.
1967
1713
:param clear_obsolete_packs: If True, clear out the contents of the
1968
1714
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1974
1716
self.lock_names()
1976
1718
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1979
# TODO: handle same-name, index-size-changes here -
1719
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1720
# TODO: handle same-name, index-size-changes here -
1980
1721
# e.g. use the value from disk, not ours, *unless* we're the one
1982
1723
for key, value in disk_nodes:
1983
1724
builder.add_node(key, value)
1984
1725
self.transport.put_file('pack-names', builder.finish(),
1985
1726
mode=self.repo.bzrdir._get_file_mode())
1727
# move the baseline forward
1986
1728
self._packs_at_load = disk_nodes
1987
1729
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1730
self._clear_obsolete_packs()
1993
1732
self._unlock_names()
1994
1733
# synchronise the memory packs list with what we just wrote:
1995
1734
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
return [new_node[0][0] for new_node in new_nodes]
2007
1736
def reload_pack_names(self):
2008
1737
"""Sync our pack listing with what is present in the repository.
2010
1739
This should be called when we find out that something we thought was
2011
1740
present is now missing. This happens when another process re-packs the
2012
1741
repository, etc.
2014
:return: True if the in-memory list of packs has been altered at all.
2016
# The ensure_loaded call is to handle the case where the first call
2017
# made involving the collection was to reload_pack_names, where we
2018
# don't have a view of disk contents. Its a bit of a bandaid, and
2019
# causes two reads of pack-names, but its a rare corner case not struck
2020
# with regular push/pull etc.
2021
first_read = self.ensure_loaded()
1743
# This is functionally similar to _save_pack_names, but we don't write
2024
1744
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
1745
disk_nodes, _, _ = self._diff_pack_names()
1746
self._packs_at_load = disk_nodes
2031
1747
(removed, added,
2032
1748
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
1749
if removed or added or modified:
2043
1759
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
1761
def _clear_obsolete_packs(self):
2046
1762
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
1764
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
1765
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
1767
obsolete_pack_transport.delete(filename)
2063
1768
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
1769
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
1771
def _start_write_group(self):
2069
1772
# Do not permit preparation for writing if we're not in a 'write lock'.
2070
1773
if not self.repo.is_write_locked():
2071
1774
raise errors.NotWriteLocked(self)
2072
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1775
self._new_pack = NewPack(self, upload_suffix='.pack',
2073
1776
file_mode=self.repo.bzrdir._get_file_mode())
2074
1777
# allow writing: queue writes to a new index
2075
1778
self.revision_index.add_writable_index(self._new_pack.revision_index,
2096
1793
# FIXME: just drop the transient index.
2097
1794
# forget what names there are
2098
1795
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2108
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2114
del self._resumed_packs[:]
2116
def _remove_resumed_pack_indices(self):
2117
for resumed_pack in self._resumed_packs:
2118
self._remove_pack_indices(resumed_pack)
2119
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
1797
self._new_pack.abort()
1799
# XXX: If we aborted while in the middle of finishing the write
1800
# group, _remove_pack_indices can fail because the indexes are
1801
# already gone. If they're not there we shouldn't fail in this
1802
# case. -- mbp 20081113
1803
self._remove_pack_indices(self._new_pack)
1804
self._new_pack = None
1805
self.repo._text_knit = None
2131
1807
def _commit_write_group(self):
2133
for prefix, versioned_file in (
2134
('revisions', self.repo.revisions),
2135
('inventories', self.repo.inventories),
2136
('texts', self.repo.texts),
2137
('signatures', self.repo.signatures),
2139
missing = versioned_file.get_missing_compression_parent_keys()
2140
all_missing.update([(prefix,) + key for key in missing])
2142
raise errors.BzrCheckError(
2143
"Repository %s has missing compression parent(s) %r "
2144
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2150
1808
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
2152
1809
if self._new_pack.data_inserted():
2153
1810
# get all the data to disk and read to use
2154
1811
self._new_pack.finish()
2155
1812
self.allocate(self._new_pack)
2156
1813
self._new_pack = None
2157
any_new_content = True
2159
self._new_pack.abort()
2160
self._new_pack = None
2161
for resumed_pack in self._resumed_packs:
2162
# XXX: this is a pretty ugly way to turn the resumed pack into a
2163
# properly committed pack.
2164
self._names[resumed_pack.name] = None
2165
self._remove_pack_from_memory(resumed_pack)
2166
resumed_pack.finish()
2167
self.allocate(resumed_pack)
2168
any_new_content = True
2169
del self._resumed_packs[:]
2171
result = self.autopack()
1814
if not self.autopack():
2173
1815
# when autopack takes no steps, the names list is still
2175
return self._save_pack_names()
2179
def _suspend_write_group(self):
2180
tokens = [pack.name for pack in self._resumed_packs]
2181
self._remove_pack_indices(self._new_pack)
2182
if self._new_pack.data_inserted():
2183
# get all the data to disk and read to use
2184
self._new_pack.finish(suspend=True)
2185
tokens.append(self._new_pack.name)
2186
self._new_pack = None
1817
self._save_pack_names()
2188
1819
self._new_pack.abort()
2189
1820
self._new_pack = None
2190
self._remove_resumed_pack_indices()
2193
def _resume_write_group(self, tokens):
2194
for token in tokens:
2195
self._resume_pack(token)
1821
self.repo._text_knit = None
2198
1824
class KnitPackRepository(KnitRepository):
2199
1825
"""Repository with knit objects stored inside pack containers.
2201
1827
The layering for a KnitPackRepository is:
2203
1829
Graph | HPSS | Repository public layer |
2258
1881
deltas=True, parents=True, is_locked=self.is_locked),
2259
1882
data_access=self._pack_collection.text_index.data_access,
2260
1883
max_delta_chain=200)
2261
if _format.supports_chks:
2262
# No graph, no compression:- references from chks are between
2263
# different objects not temporal versions of the same; and without
2264
# some sort of temporal structure knit compression will just fail.
2265
self.chk_bytes = KnitVersionedFiles(
2266
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2267
add_callback=self._pack_collection.chk_index.add_callback,
2268
deltas=False, parents=False, is_locked=self.is_locked),
2269
data_access=self._pack_collection.chk_index.data_access,
2272
self.chk_bytes = None
2273
1884
# True when the repository object is 'write locked' (as opposed to the
2274
# physical lock only taken out around changes to the pack-names list.)
1885
# physical lock only taken out around changes to the pack-names list.)
2275
1886
# Another way to represent this would be a decorator around the control
2276
1887
# files object that presents logical locks as physical ones - if this
2277
1888
# gets ugly consider that alternative design. RBC 20071011
2281
1892
self._reconcile_does_inventory_gc = True
2282
1893
self._reconcile_fixes_text_parents = True
2283
1894
self._reconcile_backsup_inventory = False
1895
self._fetch_order = 'unordered'
2285
def _warn_if_deprecated(self, branch=None):
1897
def _warn_if_deprecated(self):
2286
1898
# This class isn't deprecated, but one sub-format is
2287
1899
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1900
from bzrlib import repository
1901
if repository._deprecation_warning_done:
1903
repository._deprecation_warning_done = True
1904
warning("Format %s for %s is deprecated - please use"
1905
" 'bzr upgrade --1.6.1-rich-root'"
1906
% (self._format, self.bzrdir.transport.base))
2290
1908
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
2292
1909
self._pack_collection._abort_write_group()
2294
def _get_source(self, to_format):
2295
if to_format.network_name() == self._format.network_name():
2296
return KnitPackStreamSource(self, to_format)
2297
return super(KnitPackRepository, self)._get_source(to_format)
1911
def _find_inconsistent_revision_parents(self):
1912
"""Find revisions with incorrectly cached parents.
1914
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1915
parents-in-revision).
1917
if not self.is_locked():
1918
raise errors.ObjectNotLocked(self)
1919
pb = ui.ui_factory.nested_progress_bar()
1922
revision_nodes = self._pack_collection.revision_index \
1923
.combined_index.iter_all_entries()
1924
index_positions = []
1925
# Get the cached index values for all revisions, and also the location
1926
# in each index of the revision text so we can perform linear IO.
1927
for index, key, value, refs in revision_nodes:
1928
pos, length = value[1:].split(' ')
1929
index_positions.append((index, int(pos), key[0],
1930
tuple(parent[0] for parent in refs[0])))
1931
pb.update("Reading revision index.", 0, 0)
1932
index_positions.sort()
1933
batch_count = len(index_positions) / 1000 + 1
1934
pb.update("Checking cached revision graph.", 0, batch_count)
1935
for offset in xrange(batch_count):
1936
pb.update("Checking cached revision graph.", offset)
1937
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1940
rev_ids = [item[2] for item in to_query]
1941
revs = self.get_revisions(rev_ids)
1942
for revision, item in zip(revs, to_query):
1943
index_parents = item[3]
1944
rev_parents = tuple(revision.parent_ids)
1945
if index_parents != rev_parents:
1946
result.append((revision.revision_id, index_parents, rev_parents))
1951
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1952
def get_parents(self, revision_ids):
1953
"""See graph._StackedParentsProvider.get_parents."""
1954
parent_map = self.get_parent_map(revision_ids)
1955
return [parent_map.get(r, None) for r in revision_ids]
2299
1957
def _make_parents_provider(self):
2300
1958
return graph.CachingParentsProvider(self)
2302
1960
def _refresh_data(self):
2303
if not self.is_locked():
2305
self._pack_collection.reload_pack_names()
1961
if self._write_lock_count == 1 or (
1962
self.control_files._lock_count == 1 and
1963
self.control_files._lock_mode == 'r'):
1964
# forget what names there are
1965
self._pack_collection.reset()
1966
# XXX: Better to do an in-memory merge when acquiring a new lock -
1967
# factor out code from _save_pack_names.
1968
self._pack_collection.ensure_loaded()
2307
1970
def _start_write_group(self):
2308
1971
self._pack_collection._start_write_group()
2310
1973
def _commit_write_group(self):
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2315
def suspend_write_group(self):
2316
# XXX check self._write_group is self.get_transaction()?
2317
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2319
self._write_group = None
2322
def _resume_write_group(self, tokens):
2323
self._start_write_group()
2325
self._pack_collection._resume_write_group(tokens)
2326
except errors.UnresumableWriteGroup:
2327
self._abort_write_group()
2329
for pack in self._pack_collection._resumed_packs:
2330
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1974
return self._pack_collection._commit_write_group()
2332
1976
def get_transaction(self):
2333
1977
if self._write_lock_count:
2342
1986
return self._write_lock_count
2344
1988
def lock_write(self, token=None):
2345
"""Lock the repository for writes.
2347
:return: A bzrlib.repository.RepositoryWriteLockResult.
2349
locked = self.is_locked()
2350
if not self._write_lock_count and locked:
1989
if not self._write_lock_count and self.is_locked():
2351
1990
raise errors.ReadOnlyError(self)
2352
1991
self._write_lock_count += 1
2353
1992
if self._write_lock_count == 1:
2354
1993
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2359
1994
for repo in self._fallback_repositories:
2360
1995
# Writes don't affect fallback repos
2361
1996
repo.lock_read()
2362
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
1997
self._refresh_data()
2365
1999
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
locked = self.is_locked()
2371
2000
if self._write_lock_count:
2372
2001
self._write_lock_count += 1
2374
2003
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2379
2004
for repo in self._fallback_repositories:
2005
# Writes don't affect fallback repos
2380
2006
repo.lock_read()
2381
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
2007
self._refresh_data()
2384
2009
def leave_lock_in_place(self):
2385
2010
# not supported - raise an error
2425
2045
transaction = self._transaction
2426
2046
self._transaction = None
2427
2047
transaction.finish()
2048
for repo in self._fallback_repositories:
2429
2051
self.control_files.unlock()
2431
if not self.is_locked():
2432
2052
for repo in self._fallback_repositories:
2436
class KnitPackStreamSource(StreamSource):
2437
"""A StreamSource used to transfer data between same-format KnitPack repos.
2439
This source assumes:
2440
1) Same serialization format for all objects
2441
2) Same root information
2442
3) XML format inventories
2443
4) Atomic inserts (so we can stream inventory texts before text
2448
def __init__(self, from_repository, to_format):
2449
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2450
self._text_keys = None
2451
self._text_fetch_order = 'unordered'
2453
def _get_filtered_inv_stream(self, revision_ids):
2454
from_repo = self.from_repository
2455
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2456
parent_keys = [(p,) for p in parent_ids]
2457
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2458
parent_text_keys = set(find_text_keys(
2459
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2460
content_text_keys = set()
2461
knit = KnitVersionedFiles(None, None)
2462
factory = KnitPlainFactory()
2463
def find_text_keys_from_content(record):
2464
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2465
raise ValueError("Unknown content storage kind for"
2466
" inventory text: %s" % (record.storage_kind,))
2467
# It's a knit record, it has a _raw_record field (even if it was
2468
# reconstituted from a network stream).
2469
raw_data = record._raw_record
2470
# read the entire thing
2471
revision_id = record.key[-1]
2472
content, _ = knit._parse_record(revision_id, raw_data)
2473
if record.storage_kind == 'knit-delta-gz':
2474
line_iterator = factory.get_linedelta_content(content)
2475
elif record.storage_kind == 'knit-ft-gz':
2476
line_iterator = factory.get_fulltext_content(content)
2477
content_text_keys.update(find_text_keys(
2478
[(line, revision_id) for line in line_iterator]))
2479
revision_keys = [(r,) for r in revision_ids]
2480
def _filtered_inv_stream():
2481
source_vf = from_repo.inventories
2482
stream = source_vf.get_record_stream(revision_keys,
2484
for record in stream:
2485
if record.storage_kind == 'absent':
2486
raise errors.NoSuchRevision(from_repo, record.key)
2487
find_text_keys_from_content(record)
2489
self._text_keys = content_text_keys - parent_text_keys
2490
return ('inventories', _filtered_inv_stream())
2492
def _get_text_stream(self):
2493
# Note: We know we don't have to handle adding root keys, because both
2494
# the source and target are the identical network name.
2495
text_stream = self.from_repository.texts.get_record_stream(
2496
self._text_keys, self._text_fetch_order, False)
2497
return ('texts', text_stream)
2499
def get_stream(self, search):
2500
revision_ids = search.get_keys()
2501
for stream_info in self._fetch_revision_texts(revision_ids):
2503
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2504
yield self._get_filtered_inv_stream(revision_ids)
2505
yield self._get_text_stream()
2509
2056
class RepositoryFormatPack(MetaDirRepositoryFormat):
2510
2057
"""Format logic for pack structured repositories.
2556
2097
builder = self.index_builder_class()
2557
2098
files = [('pack-names', builder.finish())]
2558
2099
utf8_files = [('format', self.get_format_string())]
2560
2101
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2561
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2562
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2102
return self.open(a_bzrdir=a_bzrdir, _found=True)
2565
2104
def open(self, a_bzrdir, _found=False, _override_transport=None):
2566
2105
"""See RepositoryFormat.open().
2568
2107
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2569
2108
repository at a slightly different url
2570
2109
than normal. I.e. during 'upgrade'.
2889
2464
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2467
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2468
"""A no-subtrees development repository.
2470
This format should be retained until the second release after bzr 1.7.
2472
This is pack-1.6.1 with B+Tree indices.
2475
repository_class = KnitPackRepository
2476
_commit_builder_class = PackCommitBuilder
2477
supports_external_lookups = True
2478
# What index classes to use
2479
index_builder_class = BTreeBuilder
2480
index_class = BTreeGraphIndex
2483
def _serializer(self):
2484
return xml5.serializer_v5
2486
def _get_matching_bzrdir(self):
2487
return bzrdir.format_registry.make_bzrdir('development2')
2489
def _ignore_setting_bzrdir(self, format):
2492
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2494
def get_format_string(self):
2495
"""See RepositoryFormat.get_format_string()."""
2496
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2498
def get_format_description(self):
2499
"""See RepositoryFormat.get_format_description()."""
2500
return ("Development repository format, currently the same as "
2501
"1.6.1 with B+Trees.\n")
2503
def check_conversion_target(self, target_format):
2892
2507
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2893
2508
"""A subtrees development repository.
2895
2510
This format should be retained until the second release after bzr 1.7.
2897
2512
1.6.1-subtree[as it might have been] with B+Tree indices.
2899
This is [now] retained until we have a CHK based subtree format in
2903
2515
repository_class = KnitPackRepository
2904
2516
_commit_builder_class = PackRootCommitBuilder
2905
2517
rich_root_data = True
2907
2518
supports_tree_reference = True
2908
2519
supports_external_lookups = True
2909
2520
# What index classes to use
2917
2528
def _get_matching_bzrdir(self):
2918
2529
return bzrdir.format_registry.make_bzrdir(
2919
'development-subtree')
2530
'development2-subtree')
2921
2532
def _ignore_setting_bzrdir(self, format):
2924
2535
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2537
def check_conversion_target(self, target_format):
2538
if not target_format.rich_root_data:
2539
raise errors.BadConversionTarget(
2540
'Does not support rich root data.', target_format)
2541
if not getattr(target_format, 'supports_tree_reference', False):
2542
raise errors.BadConversionTarget(
2543
'Does not support nested trees', target_format)
2926
2545
def get_format_string(self):
2927
2546
"""See RepositoryFormat.get_format_string()."""
2928
2547
return ("Bazaar development format 2 with subtree support "