152
139
texts/deltas (via (fileid, revisionid) tuples).
153
140
:param signature_index: A GraphIndex for determining what signatures are
154
141
present in the Pack and accessing the locations of their texts.
155
:param chk_index: A GraphIndex for accessing content by CHK, if the
158
143
self.revision_index = revision_index
159
144
self.inventory_index = inventory_index
160
145
self.text_index = text_index
161
146
self.signature_index = signature_index
162
self.chk_index = chk_index
164
148
def access_tuple(self):
165
149
"""Return a tuple (transport, name) for the pack content."""
166
150
return self.pack_transport, self.file_name()
168
def _check_references(self):
169
"""Make sure our external references are present.
171
Packs are allowed to have deltas whose base is not in the pack, but it
172
must be present somewhere in this collection. It is not allowed to
173
have deltas based on a fallback repository.
174
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
177
for (index_name, external_refs, index) in [
179
self._get_external_refs(self.text_index),
180
self._pack_collection.text_index.combined_index),
182
self._get_external_refs(self.inventory_index),
183
self._pack_collection.inventory_index.combined_index),
185
missing = external_refs.difference(
186
k for (idx, k, v, r) in
187
index.iter_entries(external_refs))
189
missing_items[index_name] = sorted(list(missing))
191
from pprint import pformat
192
raise errors.BzrCheckError(
193
"Newly created pack file %r has delta references to "
194
"items not in its repository:\n%s"
195
% (self, pformat(missing_items)))
197
152
def file_name(self):
198
153
"""Get the file name for the pack on disk."""
199
154
return self.name + '.pack'
225
172
"""The text index is the name + .tix."""
226
173
return self.index_name('text', name)
228
def _replace_index_with_readonly(self, index_type):
229
unlimited_cache = False
230
if index_type == 'chk':
231
unlimited_cache = True
232
setattr(self, index_type + '_index',
233
self.index_class(self.index_transport,
234
self.index_name(index_type, self.name),
235
self.index_sizes[self.index_offset(index_type)],
236
unlimited_cache=unlimited_cache))
175
def _external_compression_parents_of_texts(self):
178
for node in self.text_index.iter_all_entries():
180
refs.update(node[3][1])
239
184
class ExistingPack(Pack):
240
185
"""An in memory proxy for an existing .pack and its disk indices."""
242
187
def __init__(self, pack_transport, name, revision_index, inventory_index,
243
text_index, signature_index, chk_index=None):
188
text_index, signature_index):
244
189
"""Create an ExistingPack object.
246
191
:param pack_transport: The transport where the pack file resides.
247
192
:param name: The name of the pack on disk in the pack_transport.
249
194
Pack.__init__(self, revision_index, inventory_index, text_index,
250
signature_index, chk_index)
252
197
self.pack_transport = pack_transport
253
198
if None in (revision_index, inventory_index, text_index,
261
206
return not self.__eq__(other)
263
208
def __repr__(self):
264
return "<%s.%s object at 0x%x, %s, %s" % (
265
self.__class__.__module__, self.__class__.__name__, id(self),
266
self.pack_transport, self.name)
269
class ResumedPack(ExistingPack):
271
def __init__(self, name, revision_index, inventory_index, text_index,
272
signature_index, upload_transport, pack_transport, index_transport,
273
pack_collection, chk_index=None):
274
"""Create a ResumedPack object."""
275
ExistingPack.__init__(self, pack_transport, name, revision_index,
276
inventory_index, text_index, signature_index,
278
self.upload_transport = upload_transport
279
self.index_transport = index_transport
280
self.index_sizes = [None, None, None, None]
282
('revision', revision_index),
283
('inventory', inventory_index),
284
('text', text_index),
285
('signature', signature_index),
287
if chk_index is not None:
288
indices.append(('chk', chk_index))
289
self.index_sizes.append(None)
290
for index_type, index in indices:
291
offset = self.index_offset(index_type)
292
self.index_sizes[offset] = index._size
293
self.index_class = pack_collection._index_class
294
self._pack_collection = pack_collection
295
self._state = 'resumed'
296
# XXX: perhaps check that the .pack file exists?
298
def access_tuple(self):
299
if self._state == 'finished':
300
return Pack.access_tuple(self)
301
elif self._state == 'resumed':
302
return self.upload_transport, self.file_name()
304
raise AssertionError(self._state)
307
self.upload_transport.delete(self.file_name())
308
indices = [self.revision_index, self.inventory_index, self.text_index,
309
self.signature_index]
310
if self.chk_index is not None:
311
indices.append(self.chk_index)
312
for index in indices:
313
index._transport.delete(index._name)
316
self._check_references()
317
index_types = ['revision', 'inventory', 'text', 'signature']
318
if self.chk_index is not None:
319
index_types.append('chk')
320
for index_type in index_types:
321
old_name = self.index_name(index_type, self.name)
322
new_name = '../indices/' + old_name
323
self.upload_transport.rename(old_name, new_name)
324
self._replace_index_with_readonly(index_type)
325
new_name = '../packs/' + self.file_name()
326
self.upload_transport.rename(self.file_name(), new_name)
327
self._state = 'finished'
329
def _get_external_refs(self, index):
330
"""Return compression parents for this index that are not present.
332
This returns any compression parents that are referenced by this index,
333
which are not contained *in* this index. They may be present elsewhere.
335
return index.external_references(1)
209
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
210
id(self), self.pack_transport, self.name)
338
213
class NewPack(Pack):
339
214
"""An in memory proxy for a pack which is being created."""
216
# A map of index 'type' to the file extension and position in the
218
index_definitions = {
219
'revision': ('.rix', 0),
220
'inventory': ('.iix', 1),
222
'signature': ('.six', 3),
341
225
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
342
226
"""Create a NewPack instance.
449
325
raise AssertionError(self._state)
327
def _check_references(self):
328
"""Make sure our external references are present.
330
Packs are allowed to have deltas whose base is not in the pack, but it
331
must be present somewhere in this collection. It is not allowed to
332
have deltas based on a fallback repository.
333
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
335
external_refs = self._external_compression_parents_of_texts()
337
index = self._pack_collection.text_index.combined_index
338
found_items = list(index.iter_entries(external_refs))
339
if len(found_items) != len(external_refs):
340
found_keys = set(k for idx, k, refs, value in found_items)
341
missing_items = external_refs - found_keys
342
raise errors.BzrCheckError(
343
"Newly created pack file %r has delta references to items not "
344
"in its repository:\n%r"
345
% (self, missing_items))
451
347
def data_inserted(self):
452
348
"""True if data has been added to this pack."""
453
349
return bool(self.get_revision_count() or
454
350
self.inventory_index.key_count() or
455
351
self.text_index.key_count() or
456
self.signature_index.key_count() or
457
(self.chk_index is not None and self.chk_index.key_count()))
459
def finish_content(self):
460
if self.name is not None:
464
self._write_data('', flush=True)
465
self.name = self._hash.hexdigest()
467
def finish(self, suspend=False):
352
self.signature_index.key_count())
468
355
"""Finish the new pack.
475
362
- stores the index size tuple for the pack in the index_sizes
478
self.finish_content()
480
self._check_references()
367
self._write_data('', flush=True)
368
self.name = self._hash.hexdigest()
369
self._check_references()
482
371
# XXX: It'd be better to write them all to temporary names, then
483
372
# rename them all into place, so that the window when only some are
484
373
# visible is smaller. On the other hand none will be seen until
485
374
# they're in the names list.
486
375
self.index_sizes = [None, None, None, None]
487
self._write_index('revision', self.revision_index, 'revision', suspend)
488
self._write_index('inventory', self.inventory_index, 'inventory',
490
self._write_index('text', self.text_index, 'file texts', suspend)
376
self._write_index('revision', self.revision_index, 'revision')
377
self._write_index('inventory', self.inventory_index, 'inventory')
378
self._write_index('text', self.text_index, 'file texts')
491
379
self._write_index('signature', self.signature_index,
492
'revision signatures', suspend)
493
if self.chk_index is not None:
494
self.index_sizes.append(None)
495
self._write_index('chk', self.chk_index,
496
'content hash bytes', suspend)
380
'revision signatures')
497
381
self.write_stream.close()
498
382
# Note that this will clobber an existing pack with the same name,
499
383
# without checking for hash collisions. While this is undesirable this
506
390
# - try for HASH.pack
507
391
# - try for temporary-name
508
392
# - refresh the pack-list to see if the pack is now absent
509
new_name = self.name + '.pack'
511
new_name = '../packs/' + new_name
512
self.upload_transport.rename(self.random_name, new_name)
393
self.upload_transport.rename(self.random_name,
394
'../packs/' + self.name + '.pack')
513
395
self._state = 'finished'
514
396
if 'pack' in debug.debug_flags:
515
397
# XXX: size might be interesting?
516
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
398
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
517
399
time.ctime(), self.upload_transport.base, self.random_name,
518
new_name, time.time() - self.start_time)
400
self.pack_transport, self.name,
401
time.time() - self.start_time)
521
404
"""Flush any current data."""
525
408
self._hash.update(bytes)
526
409
self._buffer[:] = [[], 0]
528
def _get_external_refs(self, index):
529
return index._external_references()
411
def index_name(self, index_type, name):
412
"""Get the disk name of an index type for pack name 'name'."""
413
return name + NewPack.index_definitions[index_type][0]
415
def index_offset(self, index_type):
416
"""Get the position in a index_size array for a given index type."""
417
return NewPack.index_definitions[index_type][1]
419
def _replace_index_with_readonly(self, index_type):
420
setattr(self, index_type + '_index',
421
self.index_class(self.index_transport,
422
self.index_name(index_type, self.name),
423
self.index_sizes[self.index_offset(index_type)]))
531
425
def set_write_cache_size(self, size):
532
426
self._cache_limit = size
534
def _write_index(self, index_type, index, label, suspend=False):
428
def _write_index(self, index_type, index, label):
535
429
"""Write out an index.
537
431
:param index_type: The type of index to write - e.g. 'revision'.
539
433
:param label: What label to give the index e.g. 'revision'.
541
435
index_name = self.index_name(index_type, self.name)
543
transport = self.upload_transport
545
transport = self.index_transport
546
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
547
index_name, index.finish(), mode=self._file_mode)
436
self.index_sizes[self.index_offset(index_type)] = \
437
self.index_transport.put_file(index_name, index.finish(),
438
mode=self._file_mode)
548
439
if 'pack' in debug.debug_flags:
549
440
# XXX: size might be interesting?
550
441
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
551
442
time.ctime(), label, self.upload_transport.base,
552
443
self.random_name, time.time() - self.start_time)
553
# Replace the writable index on this object with a readonly,
444
# Replace the writable index on this object with a readonly,
554
445
# presently unloaded index. We should alter
555
446
# the index layer to make its finish() error if add_node is
556
447
# subsequently used. RBC
582
473
self._reload_func = reload_func
583
474
self.index_to_pack = {}
584
475
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
585
self.data_access = _DirectPackAccess(self.index_to_pack,
586
reload_func=reload_func,
587
flush_func=flush_func)
476
self.data_access = _DirectPackAccess(self.index_to_pack)
477
self.add_callback = None
479
def replace_indices(self, index_to_pack, indices):
480
"""Replace the current mappings with fresh ones.
482
This should probably not be used eventually, rather incremental add and
483
removal of indices. It has been added during refactoring of existing
486
:param index_to_pack: A mapping from index objects to
487
(transport, name) tuples for the pack file data.
488
:param indices: A list of indices.
490
# refresh the revision pack map dict without replacing the instance.
491
self.index_to_pack.clear()
492
self.index_to_pack.update(index_to_pack)
493
# XXX: API break - clearly a 'replace' method would be good?
494
self.combined_index._indices[:] = indices
495
# the current add nodes callback for the current writable index if
588
497
self.add_callback = None
590
499
def add_index(self, index, pack):
593
502
Future searches on the aggregate index will seach this new index
594
503
before all previously inserted indices.
596
505
:param index: An Index for the pack.
597
506
:param pack: A Pack instance.
599
508
# expose it to the index map
600
509
self.index_to_pack[index] = pack.access_tuple()
601
510
# put it at the front of the linear index list
602
self.combined_index.insert_index(0, index, pack.name)
511
self.combined_index.insert_index(0, index)
604
513
def add_writable_index(self, index, pack):
605
514
"""Add an index which is able to have data added to it.
607
516
There can be at most one writable index at any time. Any
608
517
modifications made to the knit are put into this index.
610
519
:param index: An index from the pack parameter.
611
520
:param pack: A Pack instance.
625
534
self.data_access.set_writer(None, None, (None, None))
626
535
self.index_to_pack.clear()
627
536
del self.combined_index._indices[:]
628
del self.combined_index._index_names[:]
629
537
self.add_callback = None
631
def remove_index(self, index):
539
def remove_index(self, index, pack):
632
540
"""Remove index from the indices used to answer queries.
634
542
:param index: An index from the pack parameter.
543
:param pack: A Pack instance.
636
545
del self.index_to_pack[index]
637
pos = self.combined_index._indices.index(index)
638
del self.combined_index._indices[pos]
639
del self.combined_index._index_names[pos]
546
self.combined_index._indices.remove(index)
640
547
if (self.add_callback is not None and
641
548
getattr(index, 'add_nodes', None) == self.add_callback):
642
549
self.add_callback = None
677
579
def _extra_init(self):
678
580
"""A template hook to allow extending the constructor trivially."""
680
def _pack_map_and_index_list(self, index_attribute):
681
"""Convert a list of packs to an index pack map and index list.
683
:param index_attribute: The attribute that the desired index is found
685
:return: A tuple (map, list) where map contains the dict from
686
index:pack_tuple, and list contains the indices in the preferred
691
for pack_obj in self.packs:
692
index = getattr(pack_obj, index_attribute)
693
indices.append(index)
694
pack_map[index] = pack_obj
695
return pack_map, indices
697
def _index_contents(self, indices, key_filter=None):
698
"""Get an iterable of the index contents from a pack_map.
700
:param indices: The list of indices to query
701
:param key_filter: An optional filter to limit the keys returned.
703
all_index = CombinedGraphIndex(indices)
704
if key_filter is None:
705
return all_index.iter_all_entries()
707
return all_index.iter_entries(key_filter)
709
582
def pack(self, pb=None):
710
583
"""Create a new pack by reading data from other packs.
712
585
This does little more than a bulk copy of data. One key difference
713
586
is that data with the same item key across multiple packs is elided
714
587
from the output. The new pack is written into the current pack store
715
along with its indices, and the name added to the pack names. The
588
along with its indices, and the name added to the pack names. The
716
589
source packs are not altered and are not required to be in the current
749
621
def open_pack(self):
750
622
"""Open a pack for the pack we are creating."""
751
new_pack = self._pack_collection.pack_factory(self._pack_collection,
752
upload_suffix=self.suffix,
623
return NewPack(self._pack_collection, upload_suffix=self.suffix,
753
624
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
754
# We know that we will process all nodes in order, and don't need to
755
# query, so don't combine any indices spilled to disk until we are done
756
new_pack.revision_index.set_optimize(combine_backing_indices=False)
757
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
758
new_pack.text_index.set_optimize(combine_backing_indices=False)
759
new_pack.signature_index.set_optimize(combine_backing_indices=False)
762
def _update_pack_order(self, entries, index_to_pack_map):
763
"""Determine how we want our packs to be ordered.
765
This changes the sort order of the self.packs list so that packs unused
766
by 'entries' will be at the end of the list, so that future requests
767
can avoid probing them. Used packs will be at the front of the
768
self.packs list, in the order of their first use in 'entries'.
770
:param entries: A list of (index, ...) tuples
771
:param index_to_pack_map: A mapping from index objects to pack objects.
775
for entry in entries:
777
if index not in seen_indexes:
778
packs.append(index_to_pack_map[index])
779
seen_indexes.add(index)
780
if len(packs) == len(self.packs):
781
if 'pack' in debug.debug_flags:
782
mutter('Not changing pack list, all packs used.')
784
seen_packs = set(packs)
785
for pack in self.packs:
786
if pack not in seen_packs:
789
if 'pack' in debug.debug_flags:
790
old_names = [p.access_tuple()[1] for p in self.packs]
791
new_names = [p.access_tuple()[1] for p in packs]
792
mutter('Reordering packs\nfrom: %s\n to: %s',
793
old_names, new_names)
796
626
def _copy_revision_texts(self):
797
627
"""Copy revision data to the new pack."""
940
756
self._pack_collection.allocate(new_pack)
943
def _copy_chks(self, refs=None):
944
# XXX: Todo, recursive follow-pointers facility when fetching some
946
chk_index_map, chk_indices = self._pack_map_and_index_list(
948
chk_nodes = self._index_contents(chk_indices, refs)
950
# TODO: This isn't strictly tasteful as we are accessing some private
951
# variables (_serializer). Perhaps a better way would be to have
952
# Repository._deserialise_chk_node()
953
search_key_func = chk_map.search_key_registry.get(
954
self._pack_collection.repo._serializer.search_key_name)
955
def accumlate_refs(lines):
956
# XXX: move to a generic location
958
bytes = ''.join(lines)
959
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
960
new_refs.update(node.refs())
961
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
962
self.new_pack.chk_index, output_lines=accumlate_refs)
965
def _copy_nodes(self, nodes, index_map, writer, write_index,
967
"""Copy knit nodes between packs with no graph references.
969
:param output_lines: Output full texts of copied items.
759
def _copy_nodes(self, nodes, index_map, writer, write_index):
760
"""Copy knit nodes between packs with no graph references."""
971
761
pb = ui.ui_factory.nested_progress_bar()
973
763
return self._do_copy_nodes(nodes, index_map, writer,
974
write_index, pb, output_lines=output_lines)
978
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
768
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
769
# for record verification
981
770
knit = KnitVersionedFiles(None, None)
982
771
# plan a readv on each source pack:
1003
792
# linear scan up the pack
1004
793
pack_readv_requests.sort()
1006
pack_obj = index_map[index]
1007
transport, path = pack_obj.access_tuple()
1009
reader = pack.make_readv_reader(transport, path,
1010
[offset[0:2] for offset in pack_readv_requests])
1011
except errors.NoSuchFile:
1012
if self._reload_func is not None:
795
transport, path = index_map[index]
796
reader = pack.make_readv_reader(transport, path,
797
[offset[0:2] for offset in pack_readv_requests])
1015
798
for (names, read_func), (_1, _2, (key, eol_flag)) in \
1016
799
izip(reader.iter_records(), pack_readv_requests):
1017
800
raw_data = read_func(None)
1018
801
# check the header only
1019
if output_lines is not None:
1020
output_lines(knit._parse_record(key[-1], raw_data)[0])
1022
df, _ = knit._parse_record_header(key, raw_data)
802
df, _ = knit._parse_record_header(key, raw_data)
1024
804
pos, size = writer.add_bytes_record(raw_data, names)
1025
805
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1026
806
pb.update("Copied record", record_index)
1056
836
pb.update("Copied record", record_index, total_items)
1057
837
for index, readv_vector, node_vector in readv_group_iter:
1059
pack_obj = index_map[index]
1060
transport, path = pack_obj.access_tuple()
1062
reader = pack.make_readv_reader(transport, path, readv_vector)
1063
except errors.NoSuchFile:
1064
if self._reload_func is not None:
839
transport, path = index_map[index]
840
reader = pack.make_readv_reader(transport, path, readv_vector)
1067
841
for (names, read_func), (key, eol_flag, references) in \
1068
842
izip(reader.iter_records(), node_vector):
1069
843
raw_data = read_func(None)
1086
860
record_index += 1
1088
862
def _get_text_nodes(self):
1089
text_index_map, text_indices = self._pack_map_and_index_list(
1091
return text_index_map, self._index_contents(text_indices,
863
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
864
self.packs, 'text_index')[0]
865
return text_index_map, self._pack_collection._index_contents(text_index_map,
1092
866
self._text_filter)
1094
868
def _least_readv_node_readv(self, nodes):
1095
869
"""Generate request groups for nodes using the least readv's.
1097
871
:param nodes: An iterable of graph index nodes.
1098
872
:return: Total node count and an iterator of the data needed to perform
1099
873
readvs to obtain the data for nodes. Each item yielded by the
1100
874
iterator is a tuple with:
1101
875
index, readv_vector, node_vector. readv_vector is a list ready to
1102
876
hand to the transport readv method, and node_vector is a list of
1103
(key, eol_flag, references) for the node retrieved by the
877
(key, eol_flag, references) for the the node retrieved by the
1104
878
matching readv_vector.
1106
880
# group by pack so we do one readv per pack
1353
1127
class RepositoryPackCollection(object):
1354
1128
"""Management of packs within a repository.
1356
1130
:ivar _names: map of {pack_name: (index_size,)}
1359
pack_factory = NewPack
1360
resumed_pack_factory = ResumedPack
1362
1133
def __init__(self, repo, transport, index_transport, upload_transport,
1363
pack_transport, index_builder_class, index_class,
1134
pack_transport, index_builder_class, index_class):
1365
1135
"""Create a new RepositoryPackCollection.
1367
:param transport: Addresses the repository base directory
1137
:param transport: Addresses the repository base directory
1368
1138
(typically .bzr/repository/).
1369
1139
:param index_transport: Addresses the directory containing indices.
1370
1140
:param upload_transport: Addresses the directory into which packs are written
1382
1150
self._pack_transport = pack_transport
1383
1151
self._index_builder_class = index_builder_class
1384
1152
self._index_class = index_class
1385
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1153
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
1154
self.packs = []
1388
1155
# name:Pack mapping
1390
1156
self._packs_by_name = {}
1391
1157
# the previous pack-names content
1392
1158
self._packs_at_load = None
1393
1159
# when a pack is being created by this object, the state of that pack.
1394
1160
self._new_pack = None
1395
1161
# aggregated revision index data
1396
flush = self._flush_new_pack
1397
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1398
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
all_indices = [self.revision_index, self.inventory_index,
1402
self.text_index, self.signature_index]
1404
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
all_indices.append(self.chk_index)
1407
# used to determine if we're using a chk_index elsewhere.
1408
self.chk_index = None
1409
# Tell all the CombinedGraphIndex objects about each other, so they can
1410
# share hints about which pack names to search first.
1411
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
for combined_idx in all_combined:
1413
combined_idx.set_sibling_indices(
1414
set(all_combined).difference([combined_idx]))
1416
self._resumed_packs = []
1419
return '%s(%r)' % (self.__class__.__name__, self.repo)
1162
self.revision_index = AggregateIndex(self.reload_pack_names)
1163
self.inventory_index = AggregateIndex(self.reload_pack_names)
1164
self.text_index = AggregateIndex(self.reload_pack_names)
1165
self.signature_index = AggregateIndex(self.reload_pack_names)
1421
1167
def add_pack_to_memory(self, pack):
1422
1168
"""Make a Pack object available to the repository to satisfy queries.
1424
1170
:param pack: A Pack object.
1426
1172
if pack.name in self._packs_by_name:
1427
raise AssertionError(
1428
'pack %s already in _packs_by_name' % (pack.name,))
1173
raise AssertionError()
1429
1174
self.packs.append(pack)
1430
1175
self._packs_by_name[pack.name] = pack
1431
1176
self.revision_index.add_index(pack.revision_index, pack)
1432
1177
self.inventory_index.add_index(pack.inventory_index, pack)
1433
1178
self.text_index.add_index(pack.text_index, pack)
1434
1179
self.signature_index.add_index(pack.signature_index, pack)
1435
if self.chk_index is not None:
1436
self.chk_index.add_index(pack.chk_index, pack)
1438
1181
def all_packs(self):
1439
1182
"""Return a list of all the Pack objects this repository has.
1461
1204
in synchronisation with certain steps. Otherwise the names collection
1462
1205
is not flushed.
1464
:return: Something evaluating true if packing took place.
1207
:return: True if packing took place.
1468
return self._do_autopack()
1469
except errors.RetryAutopack:
1470
# If we get a RetryAutopack exception, we should abort the
1471
# current action, and retry.
1474
def _do_autopack(self):
1475
1209
# XXX: Should not be needed when the management of indices is sane.
1476
1210
total_revisions = self.revision_index.combined_index.key_count()
1477
1211
total_packs = len(self._names)
1478
1212
if self._max_pack_count(total_revisions) >= total_packs:
1214
# XXX: the following may want to be a class, to pack with a given
1216
mutter('Auto-packing repository %s, which has %d pack files, '
1217
'containing %d revisions into %d packs.', self, total_packs,
1218
total_revisions, self._max_pack_count(total_revisions))
1480
1219
# determine which packs need changing
1481
1220
pack_distribution = self.pack_distribution(total_revisions)
1482
1221
existing_packs = []
1490
1229
# group their data with the relevant commit, and that may
1491
1230
# involve rewriting ancient history - which autopack tries to
1492
1231
# avoid. Alternatively we could not group the data but treat
1493
# each of these as having a single revision, and thus add
1232
# each of these as having a single revision, and thus add
1494
1233
# one revision for each to the total revision count, to get
1495
1234
# a matching distribution.
1497
1236
existing_packs.append((revision_count, pack))
1498
1237
pack_operations = self.plan_autopack_combinations(
1499
1238
existing_packs, pack_distribution)
1500
num_new_packs = len(pack_operations)
1501
num_old_packs = sum([len(po[1]) for po in pack_operations])
1502
num_revs_affected = sum([po[0] for po in pack_operations])
1503
mutter('Auto-packing repository %s, which has %d pack files, '
1504
'containing %d revisions. Packing %d files into %d affecting %d'
1505
' revisions', self, total_packs, total_revisions, num_old_packs,
1506
num_new_packs, num_revs_affected)
1507
result = self._execute_pack_operations(pack_operations,
1508
reload_func=self._restart_autopack)
1509
mutter('Auto-packing repository %s completed', self)
1239
self._execute_pack_operations(pack_operations)
1512
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1242
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1514
1243
"""Execute a series of pack operations.
1516
1245
:param pack_operations: A list of [revision_count, packs_to_combine].
1517
1246
:param _packer_class: The class of packer to use (default: Packer).
1518
:return: The new pack names.
1520
1249
for revision_count, packs in pack_operations:
1521
1250
# we may have no-ops from the setup logic
1522
1251
if len(packs) == 0:
1524
packer = _packer_class(self, packs, '.autopack',
1525
reload_func=reload_func)
1528
except errors.RetryWithNewPacks:
1529
# An exception is propagating out of this context, make sure
1530
# this packer has cleaned up. Packer() doesn't set its new_pack
1531
# state into the RepositoryPackCollection object, so we only
1532
# have access to it directly here.
1533
if packer.new_pack is not None:
1534
packer.new_pack.abort()
1253
_packer_class(self, packs, '.autopack').pack()
1536
1254
for pack in packs:
1537
1255
self._remove_pack_from_memory(pack)
1538
1256
# record the newly available packs and stop advertising the old
1540
to_be_obsoleted = []
1541
for _, packs in pack_operations:
1542
to_be_obsoleted.extend(packs)
1543
result = self._save_pack_names(clear_obsolete_packs=True,
1544
obsolete_packs=to_be_obsoleted)
1547
def _flush_new_pack(self):
1548
if self._new_pack is not None:
1549
self._new_pack.flush()
1258
self._save_pack_names(clear_obsolete_packs=True)
1259
# Move the old packs out of the way now they are no longer referenced.
1260
for revision_count, packs in pack_operations:
1261
self._obsolete_packs(packs)
1551
1263
def lock_names(self):
1552
1264
"""Acquire the mutex around the pack-names index.
1554
1266
This cannot be used in the middle of a read-only transaction on the
1557
1269
self.repo.control_files.lock_write()
1559
def _already_packed(self):
1560
"""Is the collection already packed?"""
1561
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1563
def pack(self, hint=None, clean_obsolete_packs=False):
1564
1272
"""Pack the pack collection totally."""
1565
1273
self.ensure_loaded()
1566
1274
total_packs = len(self._names)
1567
if self._already_packed():
1276
# This is arguably wrong because we might not be optimal, but for
1277
# now lets leave it in. (e.g. reconcile -> one pack. But not
1569
1280
total_revisions = self.revision_index.combined_index.key_count()
1570
1281
# XXX: the following may want to be a class, to pack with a given
1572
1283
mutter('Packing repository %s, which has %d pack files, '
1573
'containing %d revisions with hint %r.', self, total_packs,
1574
total_revisions, hint)
1284
'containing %d revisions into 1 packs.', self, total_packs,
1575
1286
# determine which packs need changing
1287
pack_distribution = [1]
1576
1288
pack_operations = [[0, []]]
1577
1289
for pack in self.all_packs():
1578
if hint is None or pack.name in hint:
1579
# Either no hint was provided (so we are packing everything),
1580
# or this pack was included in the hint.
1581
pack_operations[-1][0] += pack.get_revision_count()
1582
pack_operations[-1][1].append(pack)
1290
pack_operations[-1][0] += pack.get_revision_count()
1291
pack_operations[-1][1].append(pack)
1583
1292
self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
if clean_obsolete_packs:
1586
self._clear_obsolete_packs()
1588
1294
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1295
"""Plan a pack operation.
1677
1375
inv_index = self._make_index(name, '.iix')
1678
1376
txt_index = self._make_index(name, '.tix')
1679
1377
sig_index = self._make_index(name, '.six')
1680
if self.chk_index is not None:
1681
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1684
1378
result = ExistingPack(self._pack_transport, name, rev_index,
1685
inv_index, txt_index, sig_index, chk_index)
1379
inv_index, txt_index, sig_index)
1686
1380
self.add_pack_to_memory(result)
1689
def _resume_pack(self, name):
1690
"""Get a suspended Pack object by name.
1692
:param name: The name of the pack - e.g. '123456'
1693
:return: A Pack object.
1695
if not re.match('[a-f0-9]{32}', name):
1696
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1698
raise errors.UnresumableWriteGroup(
1699
self.repo, [name], 'Malformed write group token')
1701
rev_index = self._make_index(name, '.rix', resume=True)
1702
inv_index = self._make_index(name, '.iix', resume=True)
1703
txt_index = self._make_index(name, '.tix', resume=True)
1704
sig_index = self._make_index(name, '.six', resume=True)
1705
if self.chk_index is not None:
1706
chk_index = self._make_index(name, '.cix', resume=True,
1707
unlimited_cache=True)
1710
result = self.resumed_pack_factory(name, rev_index, inv_index,
1711
txt_index, sig_index, self._upload_transport,
1712
self._pack_transport, self._index_transport, self,
1713
chk_index=chk_index)
1714
except errors.NoSuchFile, e:
1715
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1716
self.add_pack_to_memory(result)
1717
self._resumed_packs.append(result)
1720
1383
def allocate(self, a_new_pack):
1721
1384
"""Allocate name in the list of packs.
1740
1403
return self._index_class(self.transport, 'pack-names', None
1741
1404
).iter_all_entries()
1743
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1406
def _make_index(self, name, suffix):
1744
1407
size_offset = self._suffix_offsets[suffix]
1745
1408
index_name = name + suffix
1747
transport = self._upload_transport
1748
index_size = transport.stat(index_name).st_size
1750
transport = self._index_transport
1751
index_size = self._names[name][size_offset]
1752
return self._index_class(transport, index_name, index_size,
1753
unlimited_cache=unlimited_cache)
1409
index_size = self._names[name][size_offset]
1410
return self._index_class(
1411
self._index_transport, index_name, index_size)
1755
1413
def _max_pack_count(self, total_revisions):
1756
1414
"""Return the maximum number of packs to use for total revisions.
1758
1416
:param total_revisions: The total number of revisions in the
1784
1442
:param return: None.
1786
1444
for pack in packs:
1788
pack.pack_transport.rename(pack.file_name(),
1789
'../obsolete_packs/' + pack.file_name())
1790
except (errors.PathError, errors.TransportError), e:
1791
# TODO: Should these be warnings or mutters?
1792
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1445
pack.pack_transport.rename(pack.file_name(),
1446
'../obsolete_packs/' + pack.file_name())
1794
1447
# TODO: Probably needs to know all possible indices for this pack
1795
1448
# - or maybe list the directory and move all indices matching this
1796
1449
# name whether we recognize it or not?
1797
suffixes = ['.iix', '.six', '.tix', '.rix']
1798
if self.chk_index is not None:
1799
suffixes.append('.cix')
1800
for suffix in suffixes:
1802
self._index_transport.rename(pack.name + suffix,
1803
'../obsolete_packs/' + pack.name + suffix)
1804
except (errors.PathError, errors.TransportError), e:
1805
mutter("couldn't rename obsolete index, skipping it:\n%s"
1450
for suffix in ('.iix', '.six', '.tix', '.rix'):
1451
self._index_transport.rename(pack.name + suffix,
1452
'../obsolete_packs/' + pack.name + suffix)
1808
1454
def pack_distribution(self, total_revisions):
1809
1455
"""Generate a list of the number of revisions to put in each pack.
1835
1481
self._remove_pack_indices(pack)
1836
1482
self.packs.remove(pack)
1838
def _remove_pack_indices(self, pack, ignore_missing=False):
1839
"""Remove the indices for pack from the aggregated indices.
1841
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1843
for index_type in Pack.index_definitions.keys():
1844
attr_name = index_type + '_index'
1845
aggregate_index = getattr(self, attr_name)
1846
if aggregate_index is not None:
1847
pack_index = getattr(pack, attr_name)
1849
aggregate_index.remove_index(pack_index)
1484
def _remove_pack_indices(self, pack):
1485
"""Remove the indices for pack from the aggregated indices."""
1486
self.revision_index.remove_index(pack.revision_index, pack)
1487
self.inventory_index.remove_index(pack.inventory_index, pack)
1488
self.text_index.remove_index(pack.text_index, pack)
1489
self.signature_index.remove_index(pack.signature_index, pack)
1855
1491
def reset(self):
1856
1492
"""Clear all cached data."""
1857
1493
# cached revision data
1494
self.repo._revision_knit = None
1858
1495
self.revision_index.clear()
1859
1496
# cached signature data
1497
self.repo._signature_knit = None
1860
1498
self.signature_index.clear()
1861
1499
# cached file text data
1862
1500
self.text_index.clear()
1501
self.repo._text_knit = None
1863
1502
# cached inventory data
1864
1503
self.inventory_index.clear()
1866
if self.chk_index is not None:
1867
self.chk_index.clear()
1868
1504
# remove the open pack
1869
1505
self._new_pack = None
1870
1506
# information about packs.
1873
1509
self._packs_by_name = {}
1874
1510
self._packs_at_load = None
1512
def _make_index_map(self, index_suffix):
1513
"""Return information on existing indices.
1515
:param suffix: Index suffix added to pack name.
1517
:returns: (pack_map, indices) where indices is a list of GraphIndex
1518
objects, and pack_map is a mapping from those objects to the
1519
pack tuple they describe.
1521
# TODO: stop using this; it creates new indices unnecessarily.
1522
self.ensure_loaded()
1523
suffix_map = {'.rix': 'revision_index',
1524
'.six': 'signature_index',
1525
'.iix': 'inventory_index',
1526
'.tix': 'text_index',
1528
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1529
suffix_map[index_suffix])
1531
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1532
"""Convert a list of packs to an index pack map and index list.
1534
:param packs: The packs list to process.
1535
:param index_attribute: The attribute that the desired index is found
1537
:return: A tuple (map, list) where map contains the dict from
1538
index:pack_tuple, and lsit contains the indices in the same order
1544
index = getattr(pack, index_attribute)
1545
indices.append(index)
1546
pack_map[index] = (pack.pack_transport, pack.file_name())
1547
return pack_map, indices
1549
def _index_contents(self, pack_map, key_filter=None):
1550
"""Get an iterable of the index contents from a pack_map.
1552
:param pack_map: A map from indices to pack details.
1553
:param key_filter: An optional filter to limit the
1556
indices = [index for index in pack_map.iterkeys()]
1557
all_index = CombinedGraphIndex(indices)
1558
if key_filter is None:
1559
return all_index.iter_all_entries()
1561
return all_index.iter_entries(key_filter)
1876
1563
def _unlock_names(self):
1877
1564
"""Release the mutex around the pack-names index."""
1878
1565
self.repo.control_files.unlock()
1965
1651
:param clear_obsolete_packs: If True, clear out the contents of the
1966
1652
obsolete_packs directory.
1967
:param obsolete_packs: Packs that are obsolete once the new pack-names
1968
file has been written.
1969
:return: A list of the names saved that were not previously on disk.
1971
already_obsolete = []
1972
1654
self.lock_names()
1974
1656
builder = self._index_builder_class()
1975
(disk_nodes, deleted_nodes, new_nodes,
1976
orig_disk_nodes) = self._diff_pack_names()
1977
# TODO: handle same-name, index-size-changes here -
1657
disk_nodes, deleted_nodes, new_nodes = self._diff_pack_names()
1658
# TODO: handle same-name, index-size-changes here -
1978
1659
# e.g. use the value from disk, not ours, *unless* we're the one
1980
1661
for key, value in disk_nodes:
1981
1662
builder.add_node(key, value)
1982
1663
self.transport.put_file('pack-names', builder.finish(),
1983
1664
mode=self.repo.bzrdir._get_file_mode())
1665
# move the baseline forward
1984
1666
self._packs_at_load = disk_nodes
1985
1667
if clear_obsolete_packs:
1988
to_preserve = set([o.name for o in obsolete_packs])
1989
already_obsolete = self._clear_obsolete_packs(to_preserve)
1668
self._clear_obsolete_packs()
1991
1670
self._unlock_names()
1992
1671
# synchronise the memory packs list with what we just wrote:
1993
1672
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
# TODO: We could add one more condition here. "if o.name not in
1996
# orig_disk_nodes and o != the new_pack we haven't written to
1997
# disk yet. However, the new pack object is not easily
1998
# accessible here (it would have to be passed through the
1999
# autopacking code, etc.)
2000
obsolete_packs = [o for o in obsolete_packs
2001
if o.name not in already_obsolete]
2002
self._obsolete_packs(obsolete_packs)
2003
return [new_node[0][0] for new_node in new_nodes]
2005
1674
def reload_pack_names(self):
2006
1675
"""Sync our pack listing with what is present in the repository.
2008
1677
This should be called when we find out that something we thought was
2009
1678
present is now missing. This happens when another process re-packs the
2010
1679
repository, etc.
2012
:return: True if the in-memory list of packs has been altered at all.
2014
# The ensure_loaded call is to handle the case where the first call
2015
# made involving the collection was to reload_pack_names, where we
2016
# don't have a view of disk contents. Its a bit of a bandaid, and
2017
# causes two reads of pack-names, but its a rare corner case not struck
2018
# with regular push/pull etc.
2019
first_read = self.ensure_loaded()
1681
# This is functionally similar to _save_pack_names, but we don't write
2022
1682
# out the new value.
2023
(disk_nodes, deleted_nodes, new_nodes,
2024
orig_disk_nodes) = self._diff_pack_names()
2025
# _packs_at_load is meant to be the explicit list of names in
2026
# 'pack-names' at then start. As such, it should not contain any
2027
# pending names that haven't been written out yet.
2028
self._packs_at_load = orig_disk_nodes
1683
disk_nodes, _, _ = self._diff_pack_names()
1684
self._packs_at_load = disk_nodes
2029
1685
(removed, added,
2030
1686
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
1687
if removed or added or modified:
2035
def _restart_autopack(self):
2036
"""Reload the pack names list, and restart the autopack code."""
2037
if not self.reload_pack_names():
2038
# Re-raise the original exception, because something went missing
2039
# and a restart didn't find it
2041
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2043
def _clear_obsolete_packs(self, preserve=None):
1691
def _clear_obsolete_packs(self):
2044
1692
"""Delete everything from the obsolete-packs directory.
2046
:return: A list of pack identifiers (the filename without '.pack') that
2047
were found in obsolete_packs.
2050
1694
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
if preserve is None:
2053
1695
for filename in obsolete_pack_transport.list_dir('.'):
2054
name, ext = osutils.splitext(filename)
2057
if name in preserve:
2060
1697
obsolete_pack_transport.delete(filename)
2061
1698
except (errors.PathError, errors.TransportError), e:
2062
warning("couldn't delete obsolete pack, skipping it:\n%s"
1699
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2066
1701
def _start_write_group(self):
2067
1702
# Do not permit preparation for writing if we're not in a 'write lock'.
2068
1703
if not self.repo.is_write_locked():
2069
1704
raise errors.NotWriteLocked(self)
2070
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1705
self._new_pack = NewPack(self, upload_suffix='.pack',
2071
1706
file_mode=self.repo.bzrdir._get_file_mode())
2072
1707
# allow writing: queue writes to a new index
2073
1708
self.revision_index.add_writable_index(self._new_pack.revision_index,
2094
1723
# FIXME: just drop the transient index.
2095
1724
# forget what names there are
2096
1725
if self._new_pack is not None:
2097
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
operation.add_cleanup(setattr, self, '_new_pack', None)
2099
# If we aborted while in the middle of finishing the write
2100
# group, _remove_pack_indices could fail because the indexes are
2101
# already gone. But they're not there we shouldn't fail in this
2102
# case, so we pass ignore_missing=True.
2103
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
ignore_missing=True)
2105
operation.run_simple()
2106
for resumed_pack in self._resumed_packs:
2107
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
# See comment in previous finally block.
2109
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
ignore_missing=True)
2111
operation.run_simple()
2112
del self._resumed_packs[:]
2114
def _remove_resumed_pack_indices(self):
2115
for resumed_pack in self._resumed_packs:
2116
self._remove_pack_indices(resumed_pack)
2117
del self._resumed_packs[:]
2119
def _check_new_inventories(self):
2120
"""Detect missing inventories in this write group.
2122
:returns: list of strs, summarising any problems found. If the list is
2123
empty no problems were found.
2125
# The base implementation does no checks. GCRepositoryPackCollection
1726
self._new_pack.abort()
1727
self._remove_pack_indices(self._new_pack)
1728
self._new_pack = None
1729
self.repo._text_knit = None
2129
1731
def _commit_write_group(self):
2131
for prefix, versioned_file in (
2132
('revisions', self.repo.revisions),
2133
('inventories', self.repo.inventories),
2134
('texts', self.repo.texts),
2135
('signatures', self.repo.signatures),
2137
missing = versioned_file.get_missing_compression_parent_keys()
2138
all_missing.update([(prefix,) + key for key in missing])
2140
raise errors.BzrCheckError(
2141
"Repository %s has missing compression parent(s) %r "
2142
% (self.repo, sorted(all_missing)))
2143
problems = self._check_new_inventories()
2145
problems_summary = '\n'.join(problems)
2146
raise errors.BzrCheckError(
2147
"Cannot add revision(s) to repository: " + problems_summary)
2148
1732
self._remove_pack_indices(self._new_pack)
2149
any_new_content = False
2150
1733
if self._new_pack.data_inserted():
2151
1734
# get all the data to disk and read to use
2152
1735
self._new_pack.finish()
2153
1736
self.allocate(self._new_pack)
2154
1737
self._new_pack = None
2155
any_new_content = True
2157
self._new_pack.abort()
2158
self._new_pack = None
2159
for resumed_pack in self._resumed_packs:
2160
# XXX: this is a pretty ugly way to turn the resumed pack into a
2161
# properly committed pack.
2162
self._names[resumed_pack.name] = None
2163
self._remove_pack_from_memory(resumed_pack)
2164
resumed_pack.finish()
2165
self.allocate(resumed_pack)
2166
any_new_content = True
2167
del self._resumed_packs[:]
2169
result = self.autopack()
1738
if not self.autopack():
2171
1739
# when autopack takes no steps, the names list is still
2173
return self._save_pack_names()
2177
def _suspend_write_group(self):
2178
tokens = [pack.name for pack in self._resumed_packs]
2179
self._remove_pack_indices(self._new_pack)
2180
if self._new_pack.data_inserted():
2181
# get all the data to disk and read to use
2182
self._new_pack.finish(suspend=True)
2183
tokens.append(self._new_pack.name)
2184
self._new_pack = None
1741
self._save_pack_names()
2186
1743
self._new_pack.abort()
2187
1744
self._new_pack = None
2188
self._remove_resumed_pack_indices()
2191
def _resume_write_group(self, tokens):
2192
for token in tokens:
2193
self._resume_pack(token)
1745
self.repo._text_knit = None
2196
1748
class KnitPackRepository(KnitRepository):
2197
1749
"""Repository with knit objects stored inside pack containers.
2199
1751
The layering for a KnitPackRepository is:
2201
1753
Graph | HPSS | Repository public layer |
2256
1805
deltas=True, parents=True, is_locked=self.is_locked),
2257
1806
data_access=self._pack_collection.text_index.data_access,
2258
1807
max_delta_chain=200)
2259
if _format.supports_chks:
2260
# No graph, no compression:- references from chks are between
2261
# different objects not temporal versions of the same; and without
2262
# some sort of temporal structure knit compression will just fail.
2263
self.chk_bytes = KnitVersionedFiles(
2264
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2265
add_callback=self._pack_collection.chk_index.add_callback,
2266
deltas=False, parents=False, is_locked=self.is_locked),
2267
data_access=self._pack_collection.chk_index.data_access,
2270
self.chk_bytes = None
2271
1808
# True when the repository object is 'write locked' (as opposed to the
2272
# physical lock only taken out around changes to the pack-names list.)
1809
# physical lock only taken out around changes to the pack-names list.)
2273
1810
# Another way to represent this would be a decorator around the control
2274
1811
# files object that presents logical locks as physical ones - if this
2275
1812
# gets ugly consider that alternative design. RBC 20071011
2279
1816
self._reconcile_does_inventory_gc = True
2280
1817
self._reconcile_fixes_text_parents = True
2281
1818
self._reconcile_backsup_inventory = False
1819
self._fetch_order = 'unordered'
2283
def _warn_if_deprecated(self, branch=None):
1821
def _warn_if_deprecated(self):
2284
1822
# This class isn't deprecated, but one sub-format is
2285
1823
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1824
from bzrlib import repository
1825
if repository._deprecation_warning_done:
1827
repository._deprecation_warning_done = True
1828
warning("Format %s for %s is deprecated - please use"
1829
" 'bzr upgrade --1.6.1-rich-root'"
1830
% (self._format, self.bzrdir.transport.base))
2288
1832
def _abort_write_group(self):
2289
self.revisions._index._key_dependencies.clear()
2290
1833
self._pack_collection._abort_write_group()
2292
def _get_source(self, to_format):
2293
if to_format.network_name() == self._format.network_name():
2294
return KnitPackStreamSource(self, to_format)
2295
return super(KnitPackRepository, self)._get_source(to_format)
1835
def _find_inconsistent_revision_parents(self):
1836
"""Find revisions with incorrectly cached parents.
1838
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1839
parents-in-revision).
1841
if not self.is_locked():
1842
raise errors.ObjectNotLocked(self)
1843
pb = ui.ui_factory.nested_progress_bar()
1846
revision_nodes = self._pack_collection.revision_index \
1847
.combined_index.iter_all_entries()
1848
index_positions = []
1849
# Get the cached index values for all revisions, and also the location
1850
# in each index of the revision text so we can perform linear IO.
1851
for index, key, value, refs in revision_nodes:
1852
pos, length = value[1:].split(' ')
1853
index_positions.append((index, int(pos), key[0],
1854
tuple(parent[0] for parent in refs[0])))
1855
pb.update("Reading revision index.", 0, 0)
1856
index_positions.sort()
1857
batch_count = len(index_positions) / 1000 + 1
1858
pb.update("Checking cached revision graph.", 0, batch_count)
1859
for offset in xrange(batch_count):
1860
pb.update("Checking cached revision graph.", offset)
1861
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1864
rev_ids = [item[2] for item in to_query]
1865
revs = self.get_revisions(rev_ids)
1866
for revision, item in zip(revs, to_query):
1867
index_parents = item[3]
1868
rev_parents = tuple(revision.parent_ids)
1869
if index_parents != rev_parents:
1870
result.append((revision.revision_id, index_parents, rev_parents))
1875
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1876
def get_parents(self, revision_ids):
1877
"""See graph._StackedParentsProvider.get_parents."""
1878
parent_map = self.get_parent_map(revision_ids)
1879
return [parent_map.get(r, None) for r in revision_ids]
2297
1881
def _make_parents_provider(self):
2298
1882
return graph.CachingParentsProvider(self)
2300
1884
def _refresh_data(self):
2301
if not self.is_locked():
2303
self._pack_collection.reload_pack_names()
1885
if self._write_lock_count == 1 or (
1886
self.control_files._lock_count == 1 and
1887
self.control_files._lock_mode == 'r'):
1888
# forget what names there are
1889
self._pack_collection.reset()
1890
# XXX: Better to do an in-memory merge when acquiring a new lock -
1891
# factor out code from _save_pack_names.
1892
self._pack_collection.ensure_loaded()
2305
1894
def _start_write_group(self):
2306
1895
self._pack_collection._start_write_group()
2308
1897
def _commit_write_group(self):
2309
hint = self._pack_collection._commit_write_group()
2310
self.revisions._index._key_dependencies.clear()
2313
def suspend_write_group(self):
2314
# XXX check self._write_group is self.get_transaction()?
2315
tokens = self._pack_collection._suspend_write_group()
2316
self.revisions._index._key_dependencies.clear()
2317
self._write_group = None
2320
def _resume_write_group(self, tokens):
2321
self._start_write_group()
2323
self._pack_collection._resume_write_group(tokens)
2324
except errors.UnresumableWriteGroup:
2325
self._abort_write_group()
2327
for pack in self._pack_collection._resumed_packs:
2328
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1898
return self._pack_collection._commit_write_group()
2330
1900
def get_transaction(self):
2331
1901
if self._write_lock_count:
2340
1910
return self._write_lock_count
2342
1912
def lock_write(self, token=None):
2343
locked = self.is_locked()
2344
if not self._write_lock_count and locked:
1913
if not self._write_lock_count and self.is_locked():
2345
1914
raise errors.ReadOnlyError(self)
2346
1915
self._write_lock_count += 1
2347
1916
if self._write_lock_count == 1:
2348
1917
self._transaction = transactions.WriteTransaction()
2350
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2351
note('%r was write locked again', self)
2352
self._prev_lock = 'w'
2353
1918
for repo in self._fallback_repositories:
2354
1919
# Writes don't affect fallback repos
2355
1920
repo.lock_read()
2356
self._refresh_data()
1921
self._refresh_data()
2358
1923
def lock_read(self):
2359
locked = self.is_locked()
2360
1924
if self._write_lock_count:
2361
1925
self._write_lock_count += 1
2363
1927
self.control_files.lock_read()
2365
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2366
note('%r was read locked again', self)
2367
self._prev_lock = 'r'
2368
1928
for repo in self._fallback_repositories:
1929
# Writes don't affect fallback repos
2369
1930
repo.lock_read()
2370
self._refresh_data()
1931
self._refresh_data()
2372
1933
def leave_lock_in_place(self):
2373
1934
# not supported - raise an error
2413
1969
transaction = self._transaction
2414
1970
self._transaction = None
2415
1971
transaction.finish()
1972
for repo in self._fallback_repositories:
2417
1975
self.control_files.unlock()
2419
if not self.is_locked():
2420
1976
for repo in self._fallback_repositories:
2424
class KnitPackStreamSource(StreamSource):
2425
"""A StreamSource used to transfer data between same-format KnitPack repos.
2427
This source assumes:
2428
1) Same serialization format for all objects
2429
2) Same root information
2430
3) XML format inventories
2431
4) Atomic inserts (so we can stream inventory texts before text
2436
def __init__(self, from_repository, to_format):
2437
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
self._text_keys = None
2439
self._text_fetch_order = 'unordered'
2441
def _get_filtered_inv_stream(self, revision_ids):
2442
from_repo = self.from_repository
2443
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
parent_keys = [(p,) for p in parent_ids]
2445
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
parent_text_keys = set(find_text_keys(
2447
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
content_text_keys = set()
2449
knit = KnitVersionedFiles(None, None)
2450
factory = KnitPlainFactory()
2451
def find_text_keys_from_content(record):
2452
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
raise ValueError("Unknown content storage kind for"
2454
" inventory text: %s" % (record.storage_kind,))
2455
# It's a knit record, it has a _raw_record field (even if it was
2456
# reconstituted from a network stream).
2457
raw_data = record._raw_record
2458
# read the entire thing
2459
revision_id = record.key[-1]
2460
content, _ = knit._parse_record(revision_id, raw_data)
2461
if record.storage_kind == 'knit-delta-gz':
2462
line_iterator = factory.get_linedelta_content(content)
2463
elif record.storage_kind == 'knit-ft-gz':
2464
line_iterator = factory.get_fulltext_content(content)
2465
content_text_keys.update(find_text_keys(
2466
[(line, revision_id) for line in line_iterator]))
2467
revision_keys = [(r,) for r in revision_ids]
2468
def _filtered_inv_stream():
2469
source_vf = from_repo.inventories
2470
stream = source_vf.get_record_stream(revision_keys,
2472
for record in stream:
2473
if record.storage_kind == 'absent':
2474
raise errors.NoSuchRevision(from_repo, record.key)
2475
find_text_keys_from_content(record)
2477
self._text_keys = content_text_keys - parent_text_keys
2478
return ('inventories', _filtered_inv_stream())
2480
def _get_text_stream(self):
2481
# Note: We know we don't have to handle adding root keys, because both
2482
# the source and target are the identical network name.
2483
text_stream = self.from_repository.texts.get_record_stream(
2484
self._text_keys, self._text_fetch_order, False)
2485
return ('texts', text_stream)
2487
def get_stream(self, search):
2488
revision_ids = search.get_keys()
2489
for stream_info in self._fetch_revision_texts(revision_ids):
2491
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
yield self._get_filtered_inv_stream(revision_ids)
2493
yield self._get_text_stream()
2497
1980
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
1981
"""Format logic for pack structured repositories.
2544
2021
builder = self.index_builder_class()
2545
2022
files = [('pack-names', builder.finish())]
2546
2023
utf8_files = [('format', self.get_format_string())]
2548
2025
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2549
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
2550
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
2026
return self.open(a_bzrdir=a_bzrdir, _found=True)
2553
2028
def open(self, a_bzrdir, _found=False, _override_transport=None):
2554
2029
"""See RepositoryFormat.open().
2556
2031
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2557
2032
repository at a slightly different url
2558
2033
than normal. I.e. during 'upgrade'.
2877
2388
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2391
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2392
"""A no-subtrees development repository.
2394
This format should be retained until the second release after bzr 1.7.
2396
This is pack-1.6.1 with B+Tree indices.
2399
repository_class = KnitPackRepository
2400
_commit_builder_class = PackCommitBuilder
2401
supports_external_lookups = True
2402
# What index classes to use
2403
index_builder_class = BTreeBuilder
2404
index_class = BTreeGraphIndex
2407
def _serializer(self):
2408
return xml5.serializer_v5
2410
def _get_matching_bzrdir(self):
2411
return bzrdir.format_registry.make_bzrdir('development2')
2413
def _ignore_setting_bzrdir(self, format):
2416
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2418
def get_format_string(self):
2419
"""See RepositoryFormat.get_format_string()."""
2420
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2422
def get_format_description(self):
2423
"""See RepositoryFormat.get_format_description()."""
2424
return ("Development repository format, currently the same as "
2425
"1.6.1 with B+Trees.\n")
2427
def check_conversion_target(self, target_format):
2880
2431
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2881
2432
"""A subtrees development repository.
2883
2434
This format should be retained until the second release after bzr 1.7.
2885
2436
1.6.1-subtree[as it might have been] with B+Tree indices.
2887
This is [now] retained until we have a CHK based subtree format in
2891
2439
repository_class = KnitPackRepository
2892
2440
_commit_builder_class = PackRootCommitBuilder
2893
2441
rich_root_data = True
2895
2442
supports_tree_reference = True
2896
2443
supports_external_lookups = True
2897
2444
# What index classes to use