152
139
texts/deltas (via (fileid, revisionid) tuples).
153
140
:param signature_index: A GraphIndex for determining what signatures are
154
141
present in the Pack and accessing the locations of their texts.
155
:param chk_index: A GraphIndex for accessing content by CHK, if the
158
143
self.revision_index = revision_index
159
144
self.inventory_index = inventory_index
160
145
self.text_index = text_index
161
146
self.signature_index = signature_index
162
self.chk_index = chk_index
164
148
def access_tuple(self):
165
149
"""Return a tuple (transport, name) for the pack content."""
166
150
return self.pack_transport, self.file_name()
168
def _check_references(self):
169
"""Make sure our external references are present.
171
Packs are allowed to have deltas whose base is not in the pack, but it
172
must be present somewhere in this collection. It is not allowed to
173
have deltas based on a fallback repository.
174
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
177
for (index_name, external_refs, index) in [
179
self._get_external_refs(self.text_index),
180
self._pack_collection.text_index.combined_index),
182
self._get_external_refs(self.inventory_index),
183
self._pack_collection.inventory_index.combined_index),
185
missing = external_refs.difference(
186
k for (idx, k, v, r) in
187
index.iter_entries(external_refs))
189
missing_items[index_name] = sorted(list(missing))
191
from pprint import pformat
192
raise errors.BzrCheckError(
193
"Newly created pack file %r has delta references to "
194
"items not in its repository:\n%s"
195
% (self, pformat(missing_items)))
197
152
def file_name(self):
198
153
"""Get the file name for the pack on disk."""
199
154
return self.name + '.pack'
225
172
"""The text index is the name + .tix."""
226
173
return self.index_name('text', name)
228
def _replace_index_with_readonly(self, index_type):
229
unlimited_cache = False
230
if index_type == 'chk':
231
unlimited_cache = True
232
setattr(self, index_type + '_index',
233
self.index_class(self.index_transport,
234
self.index_name(index_type, self.name),
235
self.index_sizes[self.index_offset(index_type)],
236
unlimited_cache=unlimited_cache))
239
176
class ExistingPack(Pack):
240
177
"""An in memory proxy for an existing .pack and its disk indices."""
242
179
def __init__(self, pack_transport, name, revision_index, inventory_index,
243
text_index, signature_index, chk_index=None):
180
text_index, signature_index):
244
181
"""Create an ExistingPack object.
246
183
:param pack_transport: The transport where the pack file resides.
247
184
:param name: The name of the pack on disk in the pack_transport.
249
186
Pack.__init__(self, revision_index, inventory_index, text_index,
250
signature_index, chk_index)
252
189
self.pack_transport = pack_transport
253
190
if None in (revision_index, inventory_index, text_index,
261
198
return not self.__eq__(other)
263
200
def __repr__(self):
264
return "<%s.%s object at 0x%x, %s, %s" % (
265
self.__class__.__module__, self.__class__.__name__, id(self),
266
self.pack_transport, self.name)
269
class ResumedPack(ExistingPack):
271
def __init__(self, name, revision_index, inventory_index, text_index,
272
signature_index, upload_transport, pack_transport, index_transport,
273
pack_collection, chk_index=None):
274
"""Create a ResumedPack object."""
275
ExistingPack.__init__(self, pack_transport, name, revision_index,
276
inventory_index, text_index, signature_index,
278
self.upload_transport = upload_transport
279
self.index_transport = index_transport
280
self.index_sizes = [None, None, None, None]
282
('revision', revision_index),
283
('inventory', inventory_index),
284
('text', text_index),
285
('signature', signature_index),
287
if chk_index is not None:
288
indices.append(('chk', chk_index))
289
self.index_sizes.append(None)
290
for index_type, index in indices:
291
offset = self.index_offset(index_type)
292
self.index_sizes[offset] = index._size
293
self.index_class = pack_collection._index_class
294
self._pack_collection = pack_collection
295
self._state = 'resumed'
296
# XXX: perhaps check that the .pack file exists?
298
def access_tuple(self):
299
if self._state == 'finished':
300
return Pack.access_tuple(self)
301
elif self._state == 'resumed':
302
return self.upload_transport, self.file_name()
304
raise AssertionError(self._state)
307
self.upload_transport.delete(self.file_name())
308
indices = [self.revision_index, self.inventory_index, self.text_index,
309
self.signature_index]
310
if self.chk_index is not None:
311
indices.append(self.chk_index)
312
for index in indices:
313
index._transport.delete(index._name)
316
self._check_references()
317
index_types = ['revision', 'inventory', 'text', 'signature']
318
if self.chk_index is not None:
319
index_types.append('chk')
320
for index_type in index_types:
321
old_name = self.index_name(index_type, self.name)
322
new_name = '../indices/' + old_name
323
self.upload_transport.rename(old_name, new_name)
324
self._replace_index_with_readonly(index_type)
325
new_name = '../packs/' + self.file_name()
326
self.upload_transport.rename(self.file_name(), new_name)
327
self._state = 'finished'
329
def _get_external_refs(self, index):
330
"""Return compression parents for this index that are not present.
332
This returns any compression parents that are referenced by this index,
333
which are not contained *in* this index. They may be present elsewhere.
335
return index.external_references(1)
201
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
202
id(self), self.pack_transport, self.name)
338
205
class NewPack(Pack):
339
206
"""An in memory proxy for a pack which is being created."""
208
# A map of index 'type' to the file extension and position in the
210
index_definitions = {
211
'revision': ('.rix', 0),
212
'inventory': ('.iix', 1),
214
'signature': ('.six', 3),
341
217
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
342
218
"""Create a NewPack instance.
449
317
raise AssertionError(self._state)
319
def _check_references(self):
320
"""Make sure our external references are present.
322
Packs are allowed to have deltas whose base is not in the pack, but it
323
must be present somewhere in this collection. It is not allowed to
324
have deltas based on a fallback repository.
325
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
328
for (index_name, external_refs, index) in [
330
self.text_index._external_references(),
331
self._pack_collection.text_index.combined_index),
333
self.inventory_index._external_references(),
334
self._pack_collection.inventory_index.combined_index),
336
missing = external_refs.difference(
337
k for (idx, k, v, r) in
338
index.iter_entries(external_refs))
340
missing_items[index_name] = sorted(list(missing))
342
from pprint import pformat
343
raise errors.BzrCheckError(
344
"Newly created pack file %r has delta references to "
345
"items not in its repository:\n%s"
346
% (self, pformat(missing_items)))
451
348
def data_inserted(self):
452
349
"""True if data has been added to this pack."""
453
350
return bool(self.get_revision_count() or
454
351
self.inventory_index.key_count() or
455
352
self.text_index.key_count() or
456
self.signature_index.key_count() or
457
(self.chk_index is not None and self.chk_index.key_count()))
459
def finish_content(self):
460
if self.name is not None:
464
self._write_data('', flush=True)
465
self.name = self._hash.hexdigest()
467
def finish(self, suspend=False):
353
self.signature_index.key_count())
468
356
"""Finish the new pack.
475
363
- stores the index size tuple for the pack in the index_sizes
478
self.finish_content()
480
self._check_references()
368
self._write_data('', flush=True)
369
self.name = self._hash.hexdigest()
370
self._check_references()
482
372
# XXX: It'd be better to write them all to temporary names, then
483
373
# rename them all into place, so that the window when only some are
484
374
# visible is smaller. On the other hand none will be seen until
485
375
# they're in the names list.
486
376
self.index_sizes = [None, None, None, None]
487
self._write_index('revision', self.revision_index, 'revision', suspend)
488
self._write_index('inventory', self.inventory_index, 'inventory',
490
self._write_index('text', self.text_index, 'file texts', suspend)
377
self._write_index('revision', self.revision_index, 'revision')
378
self._write_index('inventory', self.inventory_index, 'inventory')
379
self._write_index('text', self.text_index, 'file texts')
491
380
self._write_index('signature', self.signature_index,
492
'revision signatures', suspend)
493
if self.chk_index is not None:
494
self.index_sizes.append(None)
495
self._write_index('chk', self.chk_index,
496
'content hash bytes', suspend)
381
'revision signatures')
497
382
self.write_stream.close()
498
383
# Note that this will clobber an existing pack with the same name,
499
384
# without checking for hash collisions. While this is undesirable this
506
391
# - try for HASH.pack
507
392
# - try for temporary-name
508
393
# - refresh the pack-list to see if the pack is now absent
509
new_name = self.name + '.pack'
511
new_name = '../packs/' + new_name
512
self.upload_transport.rename(self.random_name, new_name)
394
self.upload_transport.rename(self.random_name,
395
'../packs/' + self.name + '.pack')
513
396
self._state = 'finished'
514
397
if 'pack' in debug.debug_flags:
515
398
# XXX: size might be interesting?
516
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
399
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
517
400
time.ctime(), self.upload_transport.base, self.random_name,
518
new_name, time.time() - self.start_time)
401
self.pack_transport, self.name,
402
time.time() - self.start_time)
521
405
"""Flush any current data."""
525
409
self._hash.update(bytes)
526
410
self._buffer[:] = [[], 0]
528
def _get_external_refs(self, index):
529
return index._external_references()
412
def index_name(self, index_type, name):
413
"""Get the disk name of an index type for pack name 'name'."""
414
return name + NewPack.index_definitions[index_type][0]
416
def index_offset(self, index_type):
417
"""Get the position in a index_size array for a given index type."""
418
return NewPack.index_definitions[index_type][1]
420
def _replace_index_with_readonly(self, index_type):
421
setattr(self, index_type + '_index',
422
self.index_class(self.index_transport,
423
self.index_name(index_type, self.name),
424
self.index_sizes[self.index_offset(index_type)]))
531
426
def set_write_cache_size(self, size):
532
427
self._cache_limit = size
534
def _write_index(self, index_type, index, label, suspend=False):
429
def _write_index(self, index_type, index, label):
535
430
"""Write out an index.
537
432
:param index_type: The type of index to write - e.g. 'revision'.
539
434
:param label: What label to give the index e.g. 'revision'.
541
436
index_name = self.index_name(index_type, self.name)
543
transport = self.upload_transport
545
transport = self.index_transport
546
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
547
index_name, index.finish(), mode=self._file_mode)
437
self.index_sizes[self.index_offset(index_type)] = \
438
self.index_transport.put_file(index_name, index.finish(),
439
mode=self._file_mode)
548
440
if 'pack' in debug.debug_flags:
549
441
# XXX: size might be interesting?
550
442
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
551
443
time.ctime(), label, self.upload_transport.base,
552
444
self.random_name, time.time() - self.start_time)
553
# Replace the writable index on this object with a readonly,
445
# Replace the writable index on this object with a readonly,
554
446
# presently unloaded index. We should alter
555
447
# the index layer to make its finish() error if add_node is
556
448
# subsequently used. RBC
582
474
self._reload_func = reload_func
583
475
self.index_to_pack = {}
584
476
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
585
self.data_access = _DirectPackAccess(self.index_to_pack,
586
reload_func=reload_func,
587
flush_func=flush_func)
477
self.data_access = _DirectPackAccess(self.index_to_pack)
478
self.add_callback = None
480
def replace_indices(self, index_to_pack, indices):
481
"""Replace the current mappings with fresh ones.
483
This should probably not be used eventually, rather incremental add and
484
removal of indices. It has been added during refactoring of existing
487
:param index_to_pack: A mapping from index objects to
488
(transport, name) tuples for the pack file data.
489
:param indices: A list of indices.
491
# refresh the revision pack map dict without replacing the instance.
492
self.index_to_pack.clear()
493
self.index_to_pack.update(index_to_pack)
494
# XXX: API break - clearly a 'replace' method would be good?
495
self.combined_index._indices[:] = indices
496
# the current add nodes callback for the current writable index if
588
498
self.add_callback = None
590
500
def add_index(self, index, pack):
593
503
Future searches on the aggregate index will seach this new index
594
504
before all previously inserted indices.
596
506
:param index: An Index for the pack.
597
507
:param pack: A Pack instance.
599
509
# expose it to the index map
600
510
self.index_to_pack[index] = pack.access_tuple()
601
511
# put it at the front of the linear index list
602
self.combined_index.insert_index(0, index, pack.name)
512
self.combined_index.insert_index(0, index)
604
514
def add_writable_index(self, index, pack):
605
515
"""Add an index which is able to have data added to it.
607
517
There can be at most one writable index at any time. Any
608
518
modifications made to the knit are put into this index.
610
520
:param index: An index from the pack parameter.
611
521
:param pack: A Pack instance.
625
535
self.data_access.set_writer(None, None, (None, None))
626
536
self.index_to_pack.clear()
627
537
del self.combined_index._indices[:]
628
del self.combined_index._index_names[:]
629
538
self.add_callback = None
631
def remove_index(self, index):
540
def remove_index(self, index, pack):
632
541
"""Remove index from the indices used to answer queries.
634
543
:param index: An index from the pack parameter.
544
:param pack: A Pack instance.
636
546
del self.index_to_pack[index]
637
pos = self.combined_index._indices.index(index)
638
del self.combined_index._indices[pos]
639
del self.combined_index._index_names[pos]
547
self.combined_index._indices.remove(index)
640
548
if (self.add_callback is not None and
641
549
getattr(index, 'add_nodes', None) == self.add_callback):
642
550
self.add_callback = None
677
580
def _extra_init(self):
678
581
"""A template hook to allow extending the constructor trivially."""
680
def _pack_map_and_index_list(self, index_attribute):
681
"""Convert a list of packs to an index pack map and index list.
683
:param index_attribute: The attribute that the desired index is found
685
:return: A tuple (map, list) where map contains the dict from
686
index:pack_tuple, and list contains the indices in the preferred
691
for pack_obj in self.packs:
692
index = getattr(pack_obj, index_attribute)
693
indices.append(index)
694
pack_map[index] = pack_obj
695
return pack_map, indices
697
def _index_contents(self, indices, key_filter=None):
698
"""Get an iterable of the index contents from a pack_map.
700
:param indices: The list of indices to query
701
:param key_filter: An optional filter to limit the keys returned.
703
all_index = CombinedGraphIndex(indices)
704
if key_filter is None:
705
return all_index.iter_all_entries()
707
return all_index.iter_entries(key_filter)
709
583
def pack(self, pb=None):
710
584
"""Create a new pack by reading data from other packs.
712
586
This does little more than a bulk copy of data. One key difference
713
587
is that data with the same item key across multiple packs is elided
714
588
from the output. The new pack is written into the current pack store
715
along with its indices, and the name added to the pack names. The
589
along with its indices, and the name added to the pack names. The
716
590
source packs are not altered and are not required to be in the current
749
622
def open_pack(self):
750
623
"""Open a pack for the pack we are creating."""
751
new_pack = self._pack_collection.pack_factory(self._pack_collection,
752
upload_suffix=self.suffix,
624
return NewPack(self._pack_collection, upload_suffix=self.suffix,
753
625
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
754
# We know that we will process all nodes in order, and don't need to
755
# query, so don't combine any indices spilled to disk until we are done
756
new_pack.revision_index.set_optimize(combine_backing_indices=False)
757
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
758
new_pack.text_index.set_optimize(combine_backing_indices=False)
759
new_pack.signature_index.set_optimize(combine_backing_indices=False)
762
def _update_pack_order(self, entries, index_to_pack_map):
763
"""Determine how we want our packs to be ordered.
765
This changes the sort order of the self.packs list so that packs unused
766
by 'entries' will be at the end of the list, so that future requests
767
can avoid probing them. Used packs will be at the front of the
768
self.packs list, in the order of their first use in 'entries'.
770
:param entries: A list of (index, ...) tuples
771
:param index_to_pack_map: A mapping from index objects to pack objects.
775
for entry in entries:
777
if index not in seen_indexes:
778
packs.append(index_to_pack_map[index])
779
seen_indexes.add(index)
780
if len(packs) == len(self.packs):
781
if 'pack' in debug.debug_flags:
782
mutter('Not changing pack list, all packs used.')
784
seen_packs = set(packs)
785
for pack in self.packs:
786
if pack not in seen_packs:
789
if 'pack' in debug.debug_flags:
790
old_names = [p.access_tuple()[1] for p in self.packs]
791
new_names = [p.access_tuple()[1] for p in packs]
792
mutter('Reordering packs\nfrom: %s\n to: %s',
793
old_names, new_names)
796
627
def _copy_revision_texts(self):
797
628
"""Copy revision data to the new pack."""
940
757
self._pack_collection.allocate(new_pack)
943
def _copy_chks(self, refs=None):
944
# XXX: Todo, recursive follow-pointers facility when fetching some
946
chk_index_map, chk_indices = self._pack_map_and_index_list(
948
chk_nodes = self._index_contents(chk_indices, refs)
950
# TODO: This isn't strictly tasteful as we are accessing some private
951
# variables (_serializer). Perhaps a better way would be to have
952
# Repository._deserialise_chk_node()
953
search_key_func = chk_map.search_key_registry.get(
954
self._pack_collection.repo._serializer.search_key_name)
955
def accumlate_refs(lines):
956
# XXX: move to a generic location
958
bytes = ''.join(lines)
959
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
960
new_refs.update(node.refs())
961
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
962
self.new_pack.chk_index, output_lines=accumlate_refs)
965
def _copy_nodes(self, nodes, index_map, writer, write_index,
967
"""Copy knit nodes between packs with no graph references.
969
:param output_lines: Output full texts of copied items.
760
def _copy_nodes(self, nodes, index_map, writer, write_index):
761
"""Copy knit nodes between packs with no graph references."""
971
762
pb = ui.ui_factory.nested_progress_bar()
973
764
return self._do_copy_nodes(nodes, index_map, writer,
974
write_index, pb, output_lines=output_lines)
978
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
769
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
770
# for record verification
981
771
knit = KnitVersionedFiles(None, None)
982
772
# plan a readv on each source pack:
1003
793
# linear scan up the pack
1004
794
pack_readv_requests.sort()
1006
pack_obj = index_map[index]
1007
transport, path = pack_obj.access_tuple()
1009
reader = pack.make_readv_reader(transport, path,
1010
[offset[0:2] for offset in pack_readv_requests])
1011
except errors.NoSuchFile:
1012
if self._reload_func is not None:
796
transport, path = index_map[index]
797
reader = pack.make_readv_reader(transport, path,
798
[offset[0:2] for offset in pack_readv_requests])
1015
799
for (names, read_func), (_1, _2, (key, eol_flag)) in \
1016
800
izip(reader.iter_records(), pack_readv_requests):
1017
801
raw_data = read_func(None)
1018
802
# check the header only
1019
if output_lines is not None:
1020
output_lines(knit._parse_record(key[-1], raw_data)[0])
1022
df, _ = knit._parse_record_header(key, raw_data)
803
df, _ = knit._parse_record_header(key, raw_data)
1024
805
pos, size = writer.add_bytes_record(raw_data, names)
1025
806
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1026
807
pb.update("Copied record", record_index)
1056
837
pb.update("Copied record", record_index, total_items)
1057
838
for index, readv_vector, node_vector in readv_group_iter:
1059
pack_obj = index_map[index]
1060
transport, path = pack_obj.access_tuple()
1062
reader = pack.make_readv_reader(transport, path, readv_vector)
1063
except errors.NoSuchFile:
1064
if self._reload_func is not None:
840
transport, path = index_map[index]
841
reader = pack.make_readv_reader(transport, path, readv_vector)
1067
842
for (names, read_func), (key, eol_flag, references) in \
1068
843
izip(reader.iter_records(), node_vector):
1069
844
raw_data = read_func(None)
1086
861
record_index += 1
1088
863
def _get_text_nodes(self):
1089
text_index_map, text_indices = self._pack_map_and_index_list(
1091
return text_index_map, self._index_contents(text_indices,
864
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
865
self.packs, 'text_index')[0]
866
return text_index_map, self._pack_collection._index_contents(text_index_map,
1092
867
self._text_filter)
1094
869
def _least_readv_node_readv(self, nodes):
1095
870
"""Generate request groups for nodes using the least readv's.
1097
872
:param nodes: An iterable of graph index nodes.
1098
873
:return: Total node count and an iterator of the data needed to perform
1099
874
readvs to obtain the data for nodes. Each item yielded by the
1100
875
iterator is a tuple with:
1101
876
index, readv_vector, node_vector. readv_vector is a list ready to
1102
877
hand to the transport readv method, and node_vector is a list of
1103
(key, eol_flag, references) for the node retrieved by the
878
(key, eol_flag, references) for the the node retrieved by the
1104
879
matching readv_vector.
1106
881
# group by pack so we do one readv per pack
1353
1128
class RepositoryPackCollection(object):
1354
1129
"""Management of packs within a repository.
1356
1131
:ivar _names: map of {pack_name: (index_size,)}
1359
pack_factory = NewPack
1360
resumed_pack_factory = ResumedPack
1362
1134
def __init__(self, repo, transport, index_transport, upload_transport,
1363
pack_transport, index_builder_class, index_class,
1135
pack_transport, index_builder_class, index_class):
1365
1136
"""Create a new RepositoryPackCollection.
1367
:param transport: Addresses the repository base directory
1138
:param transport: Addresses the repository base directory
1368
1139
(typically .bzr/repository/).
1369
1140
:param index_transport: Addresses the directory containing indices.
1370
1141
:param upload_transport: Addresses the directory into which packs are written
1382
1151
self._pack_transport = pack_transport
1383
1152
self._index_builder_class = index_builder_class
1384
1153
self._index_class = index_class
1385
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1154
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
1155
self.packs = []
1388
1156
# name:Pack mapping
1390
1157
self._packs_by_name = {}
1391
1158
# the previous pack-names content
1392
1159
self._packs_at_load = None
1393
1160
# when a pack is being created by this object, the state of that pack.
1394
1161
self._new_pack = None
1395
1162
# aggregated revision index data
1396
flush = self._flush_new_pack
1397
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1398
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
all_indices = [self.revision_index, self.inventory_index,
1402
self.text_index, self.signature_index]
1404
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
all_indices.append(self.chk_index)
1407
# used to determine if we're using a chk_index elsewhere.
1408
self.chk_index = None
1409
# Tell all the CombinedGraphIndex objects about each other, so they can
1410
# share hints about which pack names to search first.
1411
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
for combined_idx in all_combined:
1413
combined_idx.set_sibling_indices(
1414
set(all_combined).difference([combined_idx]))
1416
self._resumed_packs = []
1419
return '%s(%r)' % (self.__class__.__name__, self.repo)
1163
self.revision_index = AggregateIndex(self.reload_pack_names)
1164
self.inventory_index = AggregateIndex(self.reload_pack_names)
1165
self.text_index = AggregateIndex(self.reload_pack_names)
1166
self.signature_index = AggregateIndex(self.reload_pack_names)
1421
1168
def add_pack_to_memory(self, pack):
1422
1169
"""Make a Pack object available to the repository to satisfy queries.
1424
1171
:param pack: A Pack object.
1426
1173
if pack.name in self._packs_by_name:
1427
raise AssertionError(
1428
'pack %s already in _packs_by_name' % (pack.name,))
1174
raise AssertionError()
1429
1175
self.packs.append(pack)
1430
1176
self._packs_by_name[pack.name] = pack
1431
1177
self.revision_index.add_index(pack.revision_index, pack)
1432
1178
self.inventory_index.add_index(pack.inventory_index, pack)
1433
1179
self.text_index.add_index(pack.text_index, pack)
1434
1180
self.signature_index.add_index(pack.signature_index, pack)
1435
if self.chk_index is not None:
1436
self.chk_index.add_index(pack.chk_index, pack)
1438
1182
def all_packs(self):
1439
1183
"""Return a list of all the Pack objects this repository has.
1461
1205
in synchronisation with certain steps. Otherwise the names collection
1462
1206
is not flushed.
1464
:return: Something evaluating true if packing took place.
1208
:return: True if packing took place.
1468
return self._do_autopack()
1469
except errors.RetryAutopack:
1470
# If we get a RetryAutopack exception, we should abort the
1471
# current action, and retry.
1474
def _do_autopack(self):
1475
1210
# XXX: Should not be needed when the management of indices is sane.
1476
1211
total_revisions = self.revision_index.combined_index.key_count()
1477
1212
total_packs = len(self._names)
1478
1213
if self._max_pack_count(total_revisions) >= total_packs:
1215
# XXX: the following may want to be a class, to pack with a given
1217
mutter('Auto-packing repository %s, which has %d pack files, '
1218
'containing %d revisions into %d packs.', self, total_packs,
1219
total_revisions, self._max_pack_count(total_revisions))
1480
1220
# determine which packs need changing
1481
1221
pack_distribution = self.pack_distribution(total_revisions)
1482
1222
existing_packs = []
1490
1230
# group their data with the relevant commit, and that may
1491
1231
# involve rewriting ancient history - which autopack tries to
1492
1232
# avoid. Alternatively we could not group the data but treat
1493
# each of these as having a single revision, and thus add
1233
# each of these as having a single revision, and thus add
1494
1234
# one revision for each to the total revision count, to get
1495
1235
# a matching distribution.
1497
1237
existing_packs.append((revision_count, pack))
1498
1238
pack_operations = self.plan_autopack_combinations(
1499
1239
existing_packs, pack_distribution)
1500
num_new_packs = len(pack_operations)
1501
num_old_packs = sum([len(po[1]) for po in pack_operations])
1502
num_revs_affected = sum([po[0] for po in pack_operations])
1503
mutter('Auto-packing repository %s, which has %d pack files, '
1504
'containing %d revisions. Packing %d files into %d affecting %d'
1505
' revisions', self, total_packs, total_revisions, num_old_packs,
1506
num_new_packs, num_revs_affected)
1507
result = self._execute_pack_operations(pack_operations,
1508
reload_func=self._restart_autopack)
1509
mutter('Auto-packing repository %s completed', self)
1240
self._execute_pack_operations(pack_operations)
1512
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1243
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1514
1244
"""Execute a series of pack operations.
1516
1246
:param pack_operations: A list of [revision_count, packs_to_combine].
1517
1247
:param _packer_class: The class of packer to use (default: Packer).
1518
:return: The new pack names.
1520
1250
for revision_count, packs in pack_operations:
1521
1251
# we may have no-ops from the setup logic
1522
1252
if len(packs) == 0:
1524
packer = _packer_class(self, packs, '.autopack',
1525
reload_func=reload_func)
1528
except errors.RetryWithNewPacks:
1529
# An exception is propagating out of this context, make sure
1530
# this packer has cleaned up. Packer() doesn't set its new_pack
1531
# state into the RepositoryPackCollection object, so we only
1532
# have access to it directly here.
1533
if packer.new_pack is not None:
1534
packer.new_pack.abort()
1254
_packer_class(self, packs, '.autopack').pack()
1536
1255
for pack in packs:
1537
1256
self._remove_pack_from_memory(pack)
1538
1257
# record the newly available packs and stop advertising the old
1540
to_be_obsoleted = []
1541
for _, packs in pack_operations:
1542
to_be_obsoleted.extend(packs)
1543
result = self._save_pack_names(clear_obsolete_packs=True,
1544
obsolete_packs=to_be_obsoleted)
1547
def _flush_new_pack(self):
1548
if self._new_pack is not None:
1549
self._new_pack.flush()
1259
self._save_pack_names(clear_obsolete_packs=True)
1260
# Move the old packs out of the way now they are no longer referenced.
1261
for revision_count, packs in pack_operations:
1262
self._obsolete_packs(packs)
1551
1264
def lock_names(self):
1552
1265
"""Acquire the mutex around the pack-names index.
1554
1267
This cannot be used in the middle of a read-only transaction on the
1557
1270
self.repo.control_files.lock_write()
1559
def _already_packed(self):
1560
"""Is the collection already packed?"""
1561
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1563
def pack(self, hint=None, clean_obsolete_packs=False):
1564
1273
"""Pack the pack collection totally."""
1565
1274
self.ensure_loaded()
1566
1275
total_packs = len(self._names)
1567
if self._already_packed():
1277
# This is arguably wrong because we might not be optimal, but for
1278
# now lets leave it in. (e.g. reconcile -> one pack. But not
1569
1281
total_revisions = self.revision_index.combined_index.key_count()
1570
1282
# XXX: the following may want to be a class, to pack with a given
1572
1284
mutter('Packing repository %s, which has %d pack files, '
1573
'containing %d revisions with hint %r.', self, total_packs,
1574
total_revisions, hint)
1285
'containing %d revisions into 1 packs.', self, total_packs,
1575
1287
# determine which packs need changing
1288
pack_distribution = [1]
1576
1289
pack_operations = [[0, []]]
1577
1290
for pack in self.all_packs():
1578
if hint is None or pack.name in hint:
1579
# Either no hint was provided (so we are packing everything),
1580
# or this pack was included in the hint.
1581
pack_operations[-1][0] += pack.get_revision_count()
1582
pack_operations[-1][1].append(pack)
1291
pack_operations[-1][0] += pack.get_revision_count()
1292
pack_operations[-1][1].append(pack)
1583
1293
self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
if clean_obsolete_packs:
1586
self._clear_obsolete_packs()
1588
1295
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1296
"""Plan a pack operation.
1677
1376
inv_index = self._make_index(name, '.iix')
1678
1377
txt_index = self._make_index(name, '.tix')
1679
1378
sig_index = self._make_index(name, '.six')
1680
if self.chk_index is not None:
1681
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1684
1379
result = ExistingPack(self._pack_transport, name, rev_index,
1685
inv_index, txt_index, sig_index, chk_index)
1380
inv_index, txt_index, sig_index)
1686
1381
self.add_pack_to_memory(result)
1689
def _resume_pack(self, name):
1690
"""Get a suspended Pack object by name.
1692
:param name: The name of the pack - e.g. '123456'
1693
:return: A Pack object.
1695
if not re.match('[a-f0-9]{32}', name):
1696
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1698
raise errors.UnresumableWriteGroup(
1699
self.repo, [name], 'Malformed write group token')
1701
rev_index = self._make_index(name, '.rix', resume=True)
1702
inv_index = self._make_index(name, '.iix', resume=True)
1703
txt_index = self._make_index(name, '.tix', resume=True)
1704
sig_index = self._make_index(name, '.six', resume=True)
1705
if self.chk_index is not None:
1706
chk_index = self._make_index(name, '.cix', resume=True,
1707
unlimited_cache=True)
1710
result = self.resumed_pack_factory(name, rev_index, inv_index,
1711
txt_index, sig_index, self._upload_transport,
1712
self._pack_transport, self._index_transport, self,
1713
chk_index=chk_index)
1714
except errors.NoSuchFile, e:
1715
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1716
self.add_pack_to_memory(result)
1717
self._resumed_packs.append(result)
1720
1384
def allocate(self, a_new_pack):
1721
1385
"""Allocate name in the list of packs.
1740
1404
return self._index_class(self.transport, 'pack-names', None
1741
1405
).iter_all_entries()
1743
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1407
def _make_index(self, name, suffix):
1744
1408
size_offset = self._suffix_offsets[suffix]
1745
1409
index_name = name + suffix
1747
transport = self._upload_transport
1748
index_size = transport.stat(index_name).st_size
1750
transport = self._index_transport
1751
index_size = self._names[name][size_offset]
1752
return self._index_class(transport, index_name, index_size,
1753
unlimited_cache=unlimited_cache)
1410
index_size = self._names[name][size_offset]
1411
return self._index_class(
1412
self._index_transport, index_name, index_size)
1755
1414
def _max_pack_count(self, total_revisions):
1756
1415
"""Return the maximum number of packs to use for total revisions.
1758
1417
:param total_revisions: The total number of revisions in the
1784
1443
:param return: None.
1786
1445
for pack in packs:
1788
pack.pack_transport.rename(pack.file_name(),
1789
'../obsolete_packs/' + pack.file_name())
1790
except (errors.PathError, errors.TransportError), e:
1791
# TODO: Should these be warnings or mutters?
1792
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1446
pack.pack_transport.rename(pack.file_name(),
1447
'../obsolete_packs/' + pack.file_name())
1794
1448
# TODO: Probably needs to know all possible indices for this pack
1795
1449
# - or maybe list the directory and move all indices matching this
1796
1450
# name whether we recognize it or not?
1797
suffixes = ['.iix', '.six', '.tix', '.rix']
1798
if self.chk_index is not None:
1799
suffixes.append('.cix')
1800
for suffix in suffixes:
1802
self._index_transport.rename(pack.name + suffix,
1803
'../obsolete_packs/' + pack.name + suffix)
1804
except (errors.PathError, errors.TransportError), e:
1805
mutter("couldn't rename obsolete index, skipping it:\n%s"
1451
for suffix in ('.iix', '.six', '.tix', '.rix'):
1452
self._index_transport.rename(pack.name + suffix,
1453
'../obsolete_packs/' + pack.name + suffix)
1808
1455
def pack_distribution(self, total_revisions):
1809
1456
"""Generate a list of the number of revisions to put in each pack.
1835
1482
self._remove_pack_indices(pack)
1836
1483
self.packs.remove(pack)
1838
def _remove_pack_indices(self, pack, ignore_missing=False):
1839
"""Remove the indices for pack from the aggregated indices.
1841
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1843
for index_type in Pack.index_definitions.keys():
1844
attr_name = index_type + '_index'
1845
aggregate_index = getattr(self, attr_name)
1846
if aggregate_index is not None:
1847
pack_index = getattr(pack, attr_name)
1849
aggregate_index.remove_index(pack_index)
1485
def _remove_pack_indices(self, pack):
1486
"""Remove the indices for pack from the aggregated indices."""
1487
self.revision_index.remove_index(pack.revision_index, pack)
1488
self.inventory_index.remove_index(pack.inventory_index, pack)
1489
self.text_index.remove_index(pack.text_index, pack)
1490
self.signature_index.remove_index(pack.signature_index, pack)
1855
1492
def reset(self):
1856
1493
"""Clear all cached data."""
1857
1494
# cached revision data
1495
self.repo._revision_knit = None
1858
1496
self.revision_index.clear()
1859
1497
# cached signature data
1498
self.repo._signature_knit = None
1860
1499
self.signature_index.clear()
1861
1500
# cached file text data
1862
1501
self.text_index.clear()
1502
self.repo._text_knit = None
1863
1503
# cached inventory data
1864
1504
self.inventory_index.clear()
1866
if self.chk_index is not None:
1867
self.chk_index.clear()
1868
1505
# remove the open pack
1869
1506
self._new_pack = None
1870
1507
# information about packs.
1873
1510
self._packs_by_name = {}
1874
1511
self._packs_at_load = None
1513
def _make_index_map(self, index_suffix):
1514
"""Return information on existing indices.
1516
:param suffix: Index suffix added to pack name.
1518
:returns: (pack_map, indices) where indices is a list of GraphIndex
1519
objects, and pack_map is a mapping from those objects to the
1520
pack tuple they describe.
1522
# TODO: stop using this; it creates new indices unnecessarily.
1523
self.ensure_loaded()
1524
suffix_map = {'.rix': 'revision_index',
1525
'.six': 'signature_index',
1526
'.iix': 'inventory_index',
1527
'.tix': 'text_index',
1529
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1530
suffix_map[index_suffix])
1532
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1533
"""Convert a list of packs to an index pack map and index list.
1535
:param packs: The packs list to process.
1536
:param index_attribute: The attribute that the desired index is found
1538
:return: A tuple (map, list) where map contains the dict from
1539
index:pack_tuple, and lsit contains the indices in the same order
1545
index = getattr(pack, index_attribute)
1546
indices.append(index)
1547
pack_map[index] = (pack.pack_transport, pack.file_name())
1548
return pack_map, indices
1550
def _index_contents(self, pack_map, key_filter=None):
1551
"""Get an iterable of the index contents from a pack_map.
1553
:param pack_map: A map from indices to pack details.
1554
:param key_filter: An optional filter to limit the
1557
indices = [index for index in pack_map.iterkeys()]
1558
all_index = CombinedGraphIndex(indices)
1559
if key_filter is None:
1560
return all_index.iter_all_entries()
1562
return all_index.iter_entries(key_filter)
1876
1564
def _unlock_names(self):
1877
1565
"""Release the mutex around the pack-names index."""
1878
1566
self.repo.control_files.unlock()
1965
1652
:param clear_obsolete_packs: If True, clear out the contents of the
1966
1653
obsolete_packs directory.
1967
:param obsolete_packs: Packs that are obsolete once the new pack-names
1968
file has been written.
1969
:return: A list of the names saved that were not previously on disk.
1971
already_obsolete = []
1972
1655
self.lock_names()
1974
1657
builder = self._index_builder_class()
1975
(disk_nodes, deleted_nodes, new_nodes,
1976
orig_disk_nodes) = self._diff_pack_names()
1977
# TODO: handle same-name, index-size-changes here -
1658
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1659
# TODO: handle same-name, index-size-changes here -
1978
1660
# e.g. use the value from disk, not ours, *unless* we're the one
1980
1662
for key, value in disk_nodes:
1981
1663
builder.add_node(key, value)
1982
1664
self.transport.put_file('pack-names', builder.finish(),
1983
1665
mode=self.repo.bzrdir._get_file_mode())
1666
# move the baseline forward
1984
1667
self._packs_at_load = disk_nodes
1985
1668
if clear_obsolete_packs:
1988
to_preserve = set([o.name for o in obsolete_packs])
1989
already_obsolete = self._clear_obsolete_packs(to_preserve)
1669
self._clear_obsolete_packs()
1991
1671
self._unlock_names()
1992
1672
# synchronise the memory packs list with what we just wrote:
1993
1673
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
# TODO: We could add one more condition here. "if o.name not in
1996
# orig_disk_nodes and o != the new_pack we haven't written to
1997
# disk yet. However, the new pack object is not easily
1998
# accessible here (it would have to be passed through the
1999
# autopacking code, etc.)
2000
obsolete_packs = [o for o in obsolete_packs
2001
if o.name not in already_obsolete]
2002
self._obsolete_packs(obsolete_packs)
2003
return [new_node[0][0] for new_node in new_nodes]
2005
1675
def reload_pack_names(self):
2006
1676
"""Sync our pack listing with what is present in the repository.
2008
1678
This should be called when we find out that something we thought was
2009
1679
present is now missing. This happens when another process re-packs the
2010
1680
repository, etc.
2012
:return: True if the in-memory list of packs has been altered at all.
2014
# The ensure_loaded call is to handle the case where the first call
2015
# made involving the collection was to reload_pack_names, where we
2016
# don't have a view of disk contents. Its a bit of a bandaid, and
2017
# causes two reads of pack-names, but its a rare corner case not struck
2018
# with regular push/pull etc.
2019
first_read = self.ensure_loaded()
1682
# This is functionally similar to _save_pack_names, but we don't write
2022
1683
# out the new value.
2023
(disk_nodes, deleted_nodes, new_nodes,
2024
orig_disk_nodes) = self._diff_pack_names()
2025
# _packs_at_load is meant to be the explicit list of names in
2026
# 'pack-names' at then start. As such, it should not contain any
2027
# pending names that haven't been written out yet.
2028
self._packs_at_load = orig_disk_nodes
1684
disk_nodes, _, _ = self._diff_pack_names()
1685
self._packs_at_load = disk_nodes
2029
1686
(removed, added,
2030
1687
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1688
if removed or added or modified:
2035
def _restart_autopack(self):
2036
"""Reload the pack names list, and restart the autopack code."""
2037
if not self.reload_pack_names():
2038
# Re-raise the original exception, because something went missing
2039
# and a restart didn't find it
2041
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2043
def _clear_obsolete_packs(self, preserve=None):
1692
def _clear_obsolete_packs(self):
2044
1693
"""Delete everything from the obsolete-packs directory.
2046
:return: A list of pack identifiers (the filename without '.pack') that
2047
were found in obsolete_packs.
2050
1695
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
if preserve is None:
2053
1696
for filename in obsolete_pack_transport.list_dir('.'):
2054
name, ext = osutils.splitext(filename)
2057
if name in preserve:
2060
1698
obsolete_pack_transport.delete(filename)
2061
1699
except (errors.PathError, errors.TransportError), e:
2062
warning("couldn't delete obsolete pack, skipping it:\n%s"
1700
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2066
1702
def _start_write_group(self):
2067
1703
# Do not permit preparation for writing if we're not in a 'write lock'.
2068
1704
if not self.repo.is_write_locked():
2069
1705
raise errors.NotWriteLocked(self)
2070
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1706
self._new_pack = NewPack(self, upload_suffix='.pack',
2071
1707
file_mode=self.repo.bzrdir._get_file_mode())
2072
1708
# allow writing: queue writes to a new index
2073
1709
self.revision_index.add_writable_index(self._new_pack.revision_index,
2094
1724
# FIXME: just drop the transient index.
2095
1725
# forget what names there are
2096
1726
if self._new_pack is not None:
2097
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
operation.add_cleanup(setattr, self, '_new_pack', None)
2099
# If we aborted while in the middle of finishing the write
2100
# group, _remove_pack_indices could fail because the indexes are
2101
# already gone. But they're not there we shouldn't fail in this
2102
# case, so we pass ignore_missing=True.
2103
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
ignore_missing=True)
2105
operation.run_simple()
2106
for resumed_pack in self._resumed_packs:
2107
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
# See comment in previous finally block.
2109
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
ignore_missing=True)
2111
operation.run_simple()
2112
del self._resumed_packs[:]
2114
def _remove_resumed_pack_indices(self):
2115
for resumed_pack in self._resumed_packs:
2116
self._remove_pack_indices(resumed_pack)
2117
del self._resumed_packs[:]
2119
def _check_new_inventories(self):
2120
"""Detect missing inventories in this write group.
2122
:returns: list of strs, summarising any problems found. If the list is
2123
empty no problems were found.
2125
# The base implementation does no checks. GCRepositoryPackCollection
1728
self._new_pack.abort()
1730
# XXX: If we aborted while in the middle of finishing the write
1731
# group, _remove_pack_indices can fail because the indexes are
1732
# already gone. If they're not there we shouldn't fail in this
1733
# case. -- mbp 20081113
1734
self._remove_pack_indices(self._new_pack)
1735
self._new_pack = None
1736
self.repo._text_knit = None
2129
1738
def _commit_write_group(self):
2131
for prefix, versioned_file in (
2132
('revisions', self.repo.revisions),
2133
('inventories', self.repo.inventories),
2134
('texts', self.repo.texts),
2135
('signatures', self.repo.signatures),
2137
missing = versioned_file.get_missing_compression_parent_keys()
2138
all_missing.update([(prefix,) + key for key in missing])
2140
raise errors.BzrCheckError(
2141
"Repository %s has missing compression parent(s) %r "
2142
% (self.repo, sorted(all_missing)))
2143
problems = self._check_new_inventories()
2145
problems_summary = '\n'.join(problems)
2146
raise errors.BzrCheckError(
2147
"Cannot add revision(s) to repository: " + problems_summary)
2148
1739
self._remove_pack_indices(self._new_pack)
2149
any_new_content = False
2150
1740
if self._new_pack.data_inserted():
2151
1741
# get all the data to disk and read to use
2152
1742
self._new_pack.finish()
2153
1743
self.allocate(self._new_pack)
2154
1744
self._new_pack = None
2155
any_new_content = True
2157
self._new_pack.abort()
2158
self._new_pack = None
2159
for resumed_pack in self._resumed_packs:
2160
# XXX: this is a pretty ugly way to turn the resumed pack into a
2161
# properly committed pack.
2162
self._names[resumed_pack.name] = None
2163
self._remove_pack_from_memory(resumed_pack)
2164
resumed_pack.finish()
2165
self.allocate(resumed_pack)
2166
any_new_content = True
2167
del self._resumed_packs[:]
2169
result = self.autopack()
1745
if not self.autopack():
2171
1746
# when autopack takes no steps, the names list is still
2173
return self._save_pack_names()
2177
def _suspend_write_group(self):
2178
tokens = [pack.name for pack in self._resumed_packs]
2179
self._remove_pack_indices(self._new_pack)
2180
if self._new_pack.data_inserted():
2181
# get all the data to disk and read to use
2182
self._new_pack.finish(suspend=True)
2183
tokens.append(self._new_pack.name)
2184
self._new_pack = None
1748
self._save_pack_names()
2186
1750
self._new_pack.abort()
2187
1751
self._new_pack = None
2188
self._remove_resumed_pack_indices()
2191
def _resume_write_group(self, tokens):
2192
for token in tokens:
2193
self._resume_pack(token)
1752
self.repo._text_knit = None
2196
1755
class KnitPackRepository(KnitRepository):
2197
1756
"""Repository with knit objects stored inside pack containers.
2199
1758
The layering for a KnitPackRepository is:
2201
1760
Graph | HPSS | Repository public layer |
2256
1812
deltas=True, parents=True, is_locked=self.is_locked),
2257
1813
data_access=self._pack_collection.text_index.data_access,
2258
1814
max_delta_chain=200)
2259
if _format.supports_chks:
2260
# No graph, no compression:- references from chks are between
2261
# different objects not temporal versions of the same; and without
2262
# some sort of temporal structure knit compression will just fail.
2263
self.chk_bytes = KnitVersionedFiles(
2264
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2265
add_callback=self._pack_collection.chk_index.add_callback,
2266
deltas=False, parents=False, is_locked=self.is_locked),
2267
data_access=self._pack_collection.chk_index.data_access,
2270
self.chk_bytes = None
2271
1815
# True when the repository object is 'write locked' (as opposed to the
2272
# physical lock only taken out around changes to the pack-names list.)
1816
# physical lock only taken out around changes to the pack-names list.)
2273
1817
# Another way to represent this would be a decorator around the control
2274
1818
# files object that presents logical locks as physical ones - if this
2275
1819
# gets ugly consider that alternative design. RBC 20071011
2279
1823
self._reconcile_does_inventory_gc = True
2280
1824
self._reconcile_fixes_text_parents = True
2281
1825
self._reconcile_backsup_inventory = False
1826
self._fetch_order = 'unordered'
2283
def _warn_if_deprecated(self, branch=None):
1828
def _warn_if_deprecated(self):
2284
1829
# This class isn't deprecated, but one sub-format is
2285
1830
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1831
from bzrlib import repository
1832
if repository._deprecation_warning_done:
1834
repository._deprecation_warning_done = True
1835
warning("Format %s for %s is deprecated - please use"
1836
" 'bzr upgrade --1.6.1-rich-root'"
1837
% (self._format, self.bzrdir.transport.base))
2288
1839
def _abort_write_group(self):
2289
self.revisions._index._key_dependencies.clear()
2290
1840
self._pack_collection._abort_write_group()
2292
def _get_source(self, to_format):
2293
if to_format.network_name() == self._format.network_name():
2294
return KnitPackStreamSource(self, to_format)
2295
return super(KnitPackRepository, self)._get_source(to_format)
1842
def _find_inconsistent_revision_parents(self):
1843
"""Find revisions with incorrectly cached parents.
1845
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1846
parents-in-revision).
1848
if not self.is_locked():
1849
raise errors.ObjectNotLocked(self)
1850
pb = ui.ui_factory.nested_progress_bar()
1853
revision_nodes = self._pack_collection.revision_index \
1854
.combined_index.iter_all_entries()
1855
index_positions = []
1856
# Get the cached index values for all revisions, and also the location
1857
# in each index of the revision text so we can perform linear IO.
1858
for index, key, value, refs in revision_nodes:
1859
pos, length = value[1:].split(' ')
1860
index_positions.append((index, int(pos), key[0],
1861
tuple(parent[0] for parent in refs[0])))
1862
pb.update("Reading revision index.", 0, 0)
1863
index_positions.sort()
1864
batch_count = len(index_positions) / 1000 + 1
1865
pb.update("Checking cached revision graph.", 0, batch_count)
1866
for offset in xrange(batch_count):
1867
pb.update("Checking cached revision graph.", offset)
1868
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1871
rev_ids = [item[2] for item in to_query]
1872
revs = self.get_revisions(rev_ids)
1873
for revision, item in zip(revs, to_query):
1874
index_parents = item[3]
1875
rev_parents = tuple(revision.parent_ids)
1876
if index_parents != rev_parents:
1877
result.append((revision.revision_id, index_parents, rev_parents))
1882
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1883
def get_parents(self, revision_ids):
1884
"""See graph._StackedParentsProvider.get_parents."""
1885
parent_map = self.get_parent_map(revision_ids)
1886
return [parent_map.get(r, None) for r in revision_ids]
2297
1888
def _make_parents_provider(self):
2298
1889
return graph.CachingParentsProvider(self)
2300
1891
def _refresh_data(self):
2301
if not self.is_locked():
2303
self._pack_collection.reload_pack_names()
1892
if self._write_lock_count == 1 or (
1893
self.control_files._lock_count == 1 and
1894
self.control_files._lock_mode == 'r'):
1895
# forget what names there are
1896
self._pack_collection.reset()
1897
# XXX: Better to do an in-memory merge when acquiring a new lock -
1898
# factor out code from _save_pack_names.
1899
self._pack_collection.ensure_loaded()
2305
1901
def _start_write_group(self):
2306
1902
self._pack_collection._start_write_group()
2308
1904
def _commit_write_group(self):
2309
hint = self._pack_collection._commit_write_group()
2310
self.revisions._index._key_dependencies.clear()
2313
def suspend_write_group(self):
2314
# XXX check self._write_group is self.get_transaction()?
2315
tokens = self._pack_collection._suspend_write_group()
2316
self.revisions._index._key_dependencies.clear()
2317
self._write_group = None
2320
def _resume_write_group(self, tokens):
2321
self._start_write_group()
2323
self._pack_collection._resume_write_group(tokens)
2324
except errors.UnresumableWriteGroup:
2325
self._abort_write_group()
2327
for pack in self._pack_collection._resumed_packs:
2328
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1905
return self._pack_collection._commit_write_group()
2330
1907
def get_transaction(self):
2331
1908
if self._write_lock_count:
2340
1917
return self._write_lock_count
2342
1919
def lock_write(self, token=None):
2343
locked = self.is_locked()
2344
if not self._write_lock_count and locked:
1920
if not self._write_lock_count and self.is_locked():
2345
1921
raise errors.ReadOnlyError(self)
2346
1922
self._write_lock_count += 1
2347
1923
if self._write_lock_count == 1:
2348
1924
self._transaction = transactions.WriteTransaction()
2350
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
note('%r was write locked again', self)
2352
self._prev_lock = 'w'
2353
1925
for repo in self._fallback_repositories:
2354
1926
# Writes don't affect fallback repos
2355
1927
repo.lock_read()
2356
self._refresh_data()
1928
self._refresh_data()
2358
1930
def lock_read(self):
2359
locked = self.is_locked()
2360
1931
if self._write_lock_count:
2361
1932
self._write_lock_count += 1
2363
1934
self.control_files.lock_read()
2365
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
note('%r was read locked again', self)
2367
self._prev_lock = 'r'
2368
1935
for repo in self._fallback_repositories:
1936
# Writes don't affect fallback repos
2369
1937
repo.lock_read()
2370
self._refresh_data()
1938
self._refresh_data()
2372
1940
def leave_lock_in_place(self):
2373
1941
# not supported - raise an error
2413
1976
transaction = self._transaction
2414
1977
self._transaction = None
2415
1978
transaction.finish()
1979
for repo in self._fallback_repositories:
2417
1982
self.control_files.unlock()
2419
if not self.is_locked():
2420
1983
for repo in self._fallback_repositories:
2424
class KnitPackStreamSource(StreamSource):
2425
"""A StreamSource used to transfer data between same-format KnitPack repos.
2427
This source assumes:
2428
1) Same serialization format for all objects
2429
2) Same root information
2430
3) XML format inventories
2431
4) Atomic inserts (so we can stream inventory texts before text
2436
def __init__(self, from_repository, to_format):
2437
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
self._text_keys = None
2439
self._text_fetch_order = 'unordered'
2441
def _get_filtered_inv_stream(self, revision_ids):
2442
from_repo = self.from_repository
2443
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
parent_keys = [(p,) for p in parent_ids]
2445
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
parent_text_keys = set(find_text_keys(
2447
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
content_text_keys = set()
2449
knit = KnitVersionedFiles(None, None)
2450
factory = KnitPlainFactory()
2451
def find_text_keys_from_content(record):
2452
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
raise ValueError("Unknown content storage kind for"
2454
" inventory text: %s" % (record.storage_kind,))
2455
# It's a knit record, it has a _raw_record field (even if it was
2456
# reconstituted from a network stream).
2457
raw_data = record._raw_record
2458
# read the entire thing
2459
revision_id = record.key[-1]
2460
content, _ = knit._parse_record(revision_id, raw_data)
2461
if record.storage_kind == 'knit-delta-gz':
2462
line_iterator = factory.get_linedelta_content(content)
2463
elif record.storage_kind == 'knit-ft-gz':
2464
line_iterator = factory.get_fulltext_content(content)
2465
content_text_keys.update(find_text_keys(
2466
[(line, revision_id) for line in line_iterator]))
2467
revision_keys = [(r,) for r in revision_ids]
2468
def _filtered_inv_stream():
2469
source_vf = from_repo.inventories
2470
stream = source_vf.get_record_stream(revision_keys,
2472
for record in stream:
2473
if record.storage_kind == 'absent':
2474
raise errors.NoSuchRevision(from_repo, record.key)
2475
find_text_keys_from_content(record)
2477
self._text_keys = content_text_keys - parent_text_keys
2478
return ('inventories', _filtered_inv_stream())
2480
def _get_text_stream(self):
2481
# Note: We know we don't have to handle adding root keys, because both
2482
# the source and target are the identical network name.
2483
text_stream = self.from_repository.texts.get_record_stream(
2484
self._text_keys, self._text_fetch_order, False)
2485
return ('texts', text_stream)
2487
def get_stream(self, search):
2488
revision_ids = search.get_keys()
2489
for stream_info in self._fetch_revision_texts(revision_ids):
2491
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
yield self._get_filtered_inv_stream(revision_ids)
2493
yield self._get_text_stream()
2497
1987
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
1988
"""Format logic for pack structured repositories.
2544
2028
builder = self.index_builder_class()
2545
2029
files = [('pack-names', builder.finish())]
2546
2030
utf8_files = [('format', self.get_format_string())]
2548
2032
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2033
return self.open(a_bzrdir=a_bzrdir, _found=True)
2553
2035
def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2036
"""See RepositoryFormat.open().
2556
2038
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2557
2039
repository at a slightly different url
2558
2040
than normal. I.e. during 'upgrade'.
2877
2393
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2396
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2397
"""A no-subtrees development repository.
2399
This format should be retained until the second release after bzr 1.7.
2401
This is pack-1.6.1 with B+Tree indices.
2404
repository_class = KnitPackRepository
2405
_commit_builder_class = PackCommitBuilder
2406
supports_external_lookups = True
2407
# What index classes to use
2408
index_builder_class = BTreeBuilder
2409
index_class = BTreeGraphIndex
2412
def _serializer(self):
2413
return xml5.serializer_v5
2415
def _get_matching_bzrdir(self):
2416
return bzrdir.format_registry.make_bzrdir('development2')
2418
def _ignore_setting_bzrdir(self, format):
2421
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2423
def get_format_string(self):
2424
"""See RepositoryFormat.get_format_string()."""
2425
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2427
def get_format_description(self):
2428
"""See RepositoryFormat.get_format_description()."""
2429
return ("Development repository format, currently the same as "
2430
"1.6.1 with B+Trees.\n")
2432
def check_conversion_target(self, target_format):
2880
2436
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2881
2437
"""A subtrees development repository.
2883
2439
This format should be retained until the second release after bzr 1.7.
2885
2441
1.6.1-subtree[as it might have been] with B+Tree indices.
2887
This is [now] retained until we have a CHK based subtree format in
2891
2444
repository_class = KnitPackRepository
2892
2445
_commit_builder_class = PackRootCommitBuilder
2893
2446
rich_root_data = True
2895
2447
supports_tree_reference = True
2896
2448
supports_external_lookups = True
2897
2449
# What index classes to use