154
139
texts/deltas (via (fileid, revisionid) tuples).
155
140
:param signature_index: A GraphIndex for determining what signatures are
156
141
present in the Pack and accessing the locations of their texts.
157
:param chk_index: A GraphIndex for accessing content by CHK, if the
160
143
self.revision_index = revision_index
161
144
self.inventory_index = inventory_index
162
145
self.text_index = text_index
163
146
self.signature_index = signature_index
164
self.chk_index = chk_index
166
148
def access_tuple(self):
167
149
"""Return a tuple (transport, name) for the pack content."""
168
150
return self.pack_transport, self.file_name()
170
def _check_references(self):
171
"""Make sure our external references are present.
173
Packs are allowed to have deltas whose base is not in the pack, but it
174
must be present somewhere in this collection. It is not allowed to
175
have deltas based on a fallback repository.
176
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
179
for (index_name, external_refs, index) in [
181
self._get_external_refs(self.text_index),
182
self._pack_collection.text_index.combined_index),
184
self._get_external_refs(self.inventory_index),
185
self._pack_collection.inventory_index.combined_index),
187
missing = external_refs.difference(
188
k for (idx, k, v, r) in
189
index.iter_entries(external_refs))
191
missing_items[index_name] = sorted(list(missing))
193
from pprint import pformat
194
raise errors.BzrCheckError(
195
"Newly created pack file %r has delta references to "
196
"items not in its repository:\n%s"
197
% (self, pformat(missing_items)))
199
152
def file_name(self):
200
153
"""Get the file name for the pack on disk."""
201
154
return self.name + '.pack'
227
172
"""The text index is the name + .tix."""
228
173
return self.index_name('text', name)
230
def _replace_index_with_readonly(self, index_type):
231
unlimited_cache = False
232
if index_type == 'chk':
233
unlimited_cache = True
234
setattr(self, index_type + '_index',
235
self.index_class(self.index_transport,
236
self.index_name(index_type, self.name),
237
self.index_sizes[self.index_offset(index_type)],
238
unlimited_cache=unlimited_cache))
175
def _external_compression_parents_of_texts(self):
178
for node in self.text_index.iter_all_entries():
180
refs.update(node[3][1])
241
184
class ExistingPack(Pack):
242
185
"""An in memory proxy for an existing .pack and its disk indices."""
244
187
def __init__(self, pack_transport, name, revision_index, inventory_index,
245
text_index, signature_index, chk_index=None):
188
text_index, signature_index):
246
189
"""Create an ExistingPack object.
248
191
:param pack_transport: The transport where the pack file resides.
249
192
:param name: The name of the pack on disk in the pack_transport.
251
194
Pack.__init__(self, revision_index, inventory_index, text_index,
252
signature_index, chk_index)
254
197
self.pack_transport = pack_transport
255
198
if None in (revision_index, inventory_index, text_index,
263
206
return not self.__eq__(other)
265
208
def __repr__(self):
266
return "<%s.%s object at 0x%x, %s, %s" % (
267
self.__class__.__module__, self.__class__.__name__, id(self),
268
self.pack_transport, self.name)
271
class ResumedPack(ExistingPack):
273
def __init__(self, name, revision_index, inventory_index, text_index,
274
signature_index, upload_transport, pack_transport, index_transport,
275
pack_collection, chk_index=None):
276
"""Create a ResumedPack object."""
277
ExistingPack.__init__(self, pack_transport, name, revision_index,
278
inventory_index, text_index, signature_index,
280
self.upload_transport = upload_transport
281
self.index_transport = index_transport
282
self.index_sizes = [None, None, None, None]
284
('revision', revision_index),
285
('inventory', inventory_index),
286
('text', text_index),
287
('signature', signature_index),
289
if chk_index is not None:
290
indices.append(('chk', chk_index))
291
self.index_sizes.append(None)
292
for index_type, index in indices:
293
offset = self.index_offset(index_type)
294
self.index_sizes[offset] = index._size
295
self.index_class = pack_collection._index_class
296
self._pack_collection = pack_collection
297
self._state = 'resumed'
298
# XXX: perhaps check that the .pack file exists?
300
def access_tuple(self):
301
if self._state == 'finished':
302
return Pack.access_tuple(self)
303
elif self._state == 'resumed':
304
return self.upload_transport, self.file_name()
306
raise AssertionError(self._state)
309
self.upload_transport.delete(self.file_name())
310
indices = [self.revision_index, self.inventory_index, self.text_index,
311
self.signature_index]
312
if self.chk_index is not None:
313
indices.append(self.chk_index)
314
for index in indices:
315
index._transport.delete(index._name)
318
self._check_references()
319
index_types = ['revision', 'inventory', 'text', 'signature']
320
if self.chk_index is not None:
321
index_types.append('chk')
322
for index_type in index_types:
323
old_name = self.index_name(index_type, self.name)
324
new_name = '../indices/' + old_name
325
self.upload_transport.rename(old_name, new_name)
326
self._replace_index_with_readonly(index_type)
327
new_name = '../packs/' + self.file_name()
328
self.upload_transport.rename(self.file_name(), new_name)
329
self._state = 'finished'
331
def _get_external_refs(self, index):
332
"""Return compression parents for this index that are not present.
334
This returns any compression parents that are referenced by this index,
335
which are not contained *in* this index. They may be present elsewhere.
337
return index.external_references(1)
209
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
210
id(self), self.pack_transport, self.name)
340
213
class NewPack(Pack):
341
214
"""An in memory proxy for a pack which is being created."""
343
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
216
# A map of index 'type' to the file extension and position in the
218
index_definitions = {
219
'revision': ('.rix', 0),
220
'inventory': ('.iix', 1),
222
'signature': ('.six', 3),
225
def __init__(self, upload_transport, index_transport, pack_transport,
226
upload_suffix='', file_mode=None, index_builder_class=None,
344
228
"""Create a NewPack instance.
346
:param pack_collection: A PackCollection into which this is being inserted.
230
:param upload_transport: A writable transport for the pack to be
231
incrementally uploaded to.
232
:param index_transport: A writable transport for the pack's indices to
233
be written to when the pack is finished.
234
:param pack_transport: A writable transport for the pack to be renamed
235
to when the upload is complete. This *must* be the same as
236
upload_transport.clone('../packs').
347
237
:param upload_suffix: An optional suffix to be given to any temporary
348
238
files created during the pack creation. e.g '.autopack'
349
:param file_mode: Unix permissions for newly created file.
239
:param file_mode: An optional file mode to create the new files with.
240
:param index_builder_class: Required keyword parameter - the class of
241
index builder to use.
242
:param index_class: Required keyword parameter - the class of index
351
245
# The relative locations of the packs are constrained, but all are
352
246
# passed in because the caller has them, so as to avoid object churn.
353
index_builder_class = pack_collection._index_builder_class
354
if pack_collection.chk_index is not None:
355
chk_index = index_builder_class(reference_lists=0)
358
247
Pack.__init__(self,
359
248
# Revisions: parents list, no text compression.
360
249
index_builder_class(reference_lists=1),
369
258
# Signatures: Just blobs to store, no compression, no parents
371
260
index_builder_class(reference_lists=0),
372
# CHK based storage - just blobs, no compression or parents.
375
self._pack_collection = pack_collection
376
262
# When we make readonly indices, we need this.
377
self.index_class = pack_collection._index_class
263
self.index_class = index_class
378
264
# where should the new pack be opened
379
self.upload_transport = pack_collection._upload_transport
265
self.upload_transport = upload_transport
380
266
# where are indices written out to
381
self.index_transport = pack_collection._index_transport
267
self.index_transport = index_transport
382
268
# where is the pack renamed to when it is finished?
383
self.pack_transport = pack_collection._pack_transport
269
self.pack_transport = pack_transport
384
270
# What file mode to upload the pack and indices with.
385
271
self._file_mode = file_mode
386
272
# tracks the content written to the .pack file.
387
273
self._hash = osutils.md5()
388
# a tuple with the length in bytes of the indices, once the pack
389
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
274
# a four-tuple with the length in bytes of the indices, once the pack
275
# is finalised. (rev, inv, text, sigs)
390
276
self.index_sizes = None
391
277
# How much data to cache when writing packs. Note that this is not
392
278
# synchronised with reads, because it's not in the transport layer, so
477
352
- stores the index size tuple for the pack in the index_sizes
480
self.finish_content()
482
self._check_references()
357
self._write_data('', flush=True)
358
self.name = self._hash.hexdigest()
484
360
# XXX: It'd be better to write them all to temporary names, then
485
361
# rename them all into place, so that the window when only some are
486
362
# visible is smaller. On the other hand none will be seen until
487
363
# they're in the names list.
488
364
self.index_sizes = [None, None, None, None]
489
self._write_index('revision', self.revision_index, 'revision', suspend)
490
self._write_index('inventory', self.inventory_index, 'inventory',
492
self._write_index('text', self.text_index, 'file texts', suspend)
365
self._write_index('revision', self.revision_index, 'revision')
366
self._write_index('inventory', self.inventory_index, 'inventory')
367
self._write_index('text', self.text_index, 'file texts')
493
368
self._write_index('signature', self.signature_index,
494
'revision signatures', suspend)
495
if self.chk_index is not None:
496
self.index_sizes.append(None)
497
self._write_index('chk', self.chk_index,
498
'content hash bytes', suspend)
369
'revision signatures')
499
370
self.write_stream.close()
500
371
# Note that this will clobber an existing pack with the same name,
501
372
# without checking for hash collisions. While this is undesirable this
527
397
self._hash.update(bytes)
528
398
self._buffer[:] = [[], 0]
530
def _get_external_refs(self, index):
531
return index._external_references()
400
def index_name(self, index_type, name):
401
"""Get the disk name of an index type for pack name 'name'."""
402
return name + NewPack.index_definitions[index_type][0]
404
def index_offset(self, index_type):
405
"""Get the position in a index_size array for a given index type."""
406
return NewPack.index_definitions[index_type][1]
408
def _replace_index_with_readonly(self, index_type):
409
setattr(self, index_type + '_index',
410
self.index_class(self.index_transport,
411
self.index_name(index_type, self.name),
412
self.index_sizes[self.index_offset(index_type)]))
533
414
def set_write_cache_size(self, size):
534
415
self._cache_limit = size
536
def _write_index(self, index_type, index, label, suspend=False):
417
def _write_index(self, index_type, index, label):
537
418
"""Write out an index.
539
420
:param index_type: The type of index to write - e.g. 'revision'.
541
422
:param label: What label to give the index e.g. 'revision'.
543
424
index_name = self.index_name(index_type, self.name)
545
transport = self.upload_transport
547
transport = self.index_transport
548
self.index_sizes[self.index_offset(index_type)] = transport.put_file(
549
index_name, index.finish(), mode=self._file_mode)
425
self.index_sizes[self.index_offset(index_type)] = \
426
self.index_transport.put_file(index_name, index.finish(),
427
mode=self._file_mode)
550
428
if 'pack' in debug.debug_flags:
551
429
# XXX: size might be interesting?
552
430
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
553
431
time.ctime(), label, self.upload_transport.base,
554
432
self.random_name, time.time() - self.start_time)
555
# Replace the writable index on this object with a readonly,
433
# Replace the writable index on this object with a readonly,
556
434
# presently unloaded index. We should alter
557
435
# the index layer to make its finish() error if add_node is
558
436
# subsequently used. RBC
567
445
such as 'revision index'.
569
447
A CombinedIndex provides an index on a single key space built up
570
from several on-disk indices. The AggregateIndex builds on this
448
from several on-disk indices. The AggregateIndex builds on this
571
449
to provide a knit access layer, and allows having up to one writable
572
450
index within the collection.
574
452
# XXX: Probably 'can be written to' could/should be separated from 'acts
575
453
# like a knit index' -- mbp 20071024
577
def __init__(self, reload_func=None, flush_func=None):
578
"""Create an AggregateIndex.
580
:param reload_func: A function to call if we find we are missing an
581
index. Should have the form reload_func() => True if the list of
582
active pack files has changed.
584
self._reload_func = reload_func
456
"""Create an AggregateIndex."""
585
457
self.index_to_pack = {}
586
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
587
self.data_access = _DirectPackAccess(self.index_to_pack,
588
reload_func=reload_func,
589
flush_func=flush_func)
458
self.combined_index = CombinedGraphIndex([])
459
self.data_access = _DirectPackAccess(self.index_to_pack)
460
self.add_callback = None
462
def replace_indices(self, index_to_pack, indices):
463
"""Replace the current mappings with fresh ones.
465
This should probably not be used eventually, rather incremental add and
466
removal of indices. It has been added during refactoring of existing
469
:param index_to_pack: A mapping from index objects to
470
(transport, name) tuples for the pack file data.
471
:param indices: A list of indices.
473
# refresh the revision pack map dict without replacing the instance.
474
self.index_to_pack.clear()
475
self.index_to_pack.update(index_to_pack)
476
# XXX: API break - clearly a 'replace' method would be good?
477
self.combined_index._indices[:] = indices
478
# the current add nodes callback for the current writable index if
590
480
self.add_callback = None
592
482
def add_index(self, index, pack):
679
562
def _extra_init(self):
680
563
"""A template hook to allow extending the constructor trivially."""
682
def _pack_map_and_index_list(self, index_attribute):
683
"""Convert a list of packs to an index pack map and index list.
685
:param index_attribute: The attribute that the desired index is found
687
:return: A tuple (map, list) where map contains the dict from
688
index:pack_tuple, and list contains the indices in the preferred
693
for pack_obj in self.packs:
694
index = getattr(pack_obj, index_attribute)
695
indices.append(index)
696
pack_map[index] = pack_obj
697
return pack_map, indices
699
def _index_contents(self, indices, key_filter=None):
700
"""Get an iterable of the index contents from a pack_map.
702
:param indices: The list of indices to query
703
:param key_filter: An optional filter to limit the keys returned.
705
all_index = CombinedGraphIndex(indices)
706
if key_filter is None:
707
return all_index.iter_all_entries()
709
return all_index.iter_entries(key_filter)
711
565
def pack(self, pb=None):
712
566
"""Create a new pack by reading data from other packs.
714
568
This does little more than a bulk copy of data. One key difference
715
569
is that data with the same item key across multiple packs is elided
716
570
from the output. The new pack is written into the current pack store
717
along with its indices, and the name added to the pack names. The
571
along with its indices, and the name added to the pack names. The
718
572
source packs are not altered and are not required to be in the current
751
604
def open_pack(self):
752
605
"""Open a pack for the pack we are creating."""
753
new_pack = self._pack_collection.pack_factory(self._pack_collection,
754
upload_suffix=self.suffix,
755
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
756
# We know that we will process all nodes in order, and don't need to
757
# query, so don't combine any indices spilled to disk until we are done
758
new_pack.revision_index.set_optimize(combine_backing_indices=False)
759
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
760
new_pack.text_index.set_optimize(combine_backing_indices=False)
761
new_pack.signature_index.set_optimize(combine_backing_indices=False)
764
def _update_pack_order(self, entries, index_to_pack_map):
765
"""Determine how we want our packs to be ordered.
767
This changes the sort order of the self.packs list so that packs unused
768
by 'entries' will be at the end of the list, so that future requests
769
can avoid probing them. Used packs will be at the front of the
770
self.packs list, in the order of their first use in 'entries'.
772
:param entries: A list of (index, ...) tuples
773
:param index_to_pack_map: A mapping from index objects to pack objects.
777
for entry in entries:
779
if index not in seen_indexes:
780
packs.append(index_to_pack_map[index])
781
seen_indexes.add(index)
782
if len(packs) == len(self.packs):
783
if 'pack' in debug.debug_flags:
784
mutter('Not changing pack list, all packs used.')
786
seen_packs = set(packs)
787
for pack in self.packs:
788
if pack not in seen_packs:
791
if 'pack' in debug.debug_flags:
792
old_names = [p.access_tuple()[1] for p in self.packs]
793
new_names = [p.access_tuple()[1] for p in packs]
794
mutter('Reordering packs\nfrom: %s\n to: %s',
795
old_names, new_names)
606
return NewPack(self._pack_collection._upload_transport,
607
self._pack_collection._index_transport,
608
self._pack_collection._pack_transport, upload_suffix=self.suffix,
609
file_mode=self._pack_collection.repo.bzrdir._get_file_mode(),
610
index_builder_class=self._pack_collection._index_builder_class,
611
index_class=self._pack_collection._index_class)
798
613
def _copy_revision_texts(self):
799
614
"""Copy revision data to the new pack."""
942
756
self._pack_collection.allocate(new_pack)
945
def _copy_chks(self, refs=None):
946
# XXX: Todo, recursive follow-pointers facility when fetching some
948
chk_index_map, chk_indices = self._pack_map_and_index_list(
950
chk_nodes = self._index_contents(chk_indices, refs)
952
# TODO: This isn't strictly tasteful as we are accessing some private
953
# variables (_serializer). Perhaps a better way would be to have
954
# Repository._deserialise_chk_node()
955
search_key_func = chk_map.search_key_registry.get(
956
self._pack_collection.repo._serializer.search_key_name)
957
def accumlate_refs(lines):
958
# XXX: move to a generic location
960
bytes = ''.join(lines)
961
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
962
new_refs.update(node.refs())
963
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
964
self.new_pack.chk_index, output_lines=accumlate_refs)
967
def _copy_nodes(self, nodes, index_map, writer, write_index,
969
"""Copy knit nodes between packs with no graph references.
971
:param output_lines: Output full texts of copied items.
759
def _copy_nodes(self, nodes, index_map, writer, write_index):
760
"""Copy knit nodes between packs with no graph references."""
973
761
pb = ui.ui_factory.nested_progress_bar()
975
763
return self._do_copy_nodes(nodes, index_map, writer,
976
write_index, pb, output_lines=output_lines)
980
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
768
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
982
769
# for record verification
983
770
knit = KnitVersionedFiles(None, None)
984
771
# plan a readv on each source pack:
1005
792
# linear scan up the pack
1006
793
pack_readv_requests.sort()
1008
pack_obj = index_map[index]
1009
transport, path = pack_obj.access_tuple()
1011
reader = pack.make_readv_reader(transport, path,
1012
[offset[0:2] for offset in pack_readv_requests])
1013
except errors.NoSuchFile:
1014
if self._reload_func is not None:
795
transport, path = index_map[index]
796
reader = pack.make_readv_reader(transport, path,
797
[offset[0:2] for offset in pack_readv_requests])
1017
798
for (names, read_func), (_1, _2, (key, eol_flag)) in \
1018
799
izip(reader.iter_records(), pack_readv_requests):
1019
800
raw_data = read_func(None)
1020
801
# check the header only
1021
if output_lines is not None:
1022
output_lines(knit._parse_record(key[-1], raw_data)[0])
1024
df, _ = knit._parse_record_header(key, raw_data)
802
df, _ = knit._parse_record_header(key, raw_data)
1026
804
pos, size = writer.add_bytes_record(raw_data, names)
1027
805
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1028
806
pb.update("Copied record", record_index)
1088
860
record_index += 1
1090
862
def _get_text_nodes(self):
1091
text_index_map, text_indices = self._pack_map_and_index_list(
1093
return text_index_map, self._index_contents(text_indices,
863
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
864
self.packs, 'text_index')[0]
865
return text_index_map, self._pack_collection._index_contents(text_index_map,
1094
866
self._text_filter)
1096
868
def _least_readv_node_readv(self, nodes):
1097
869
"""Generate request groups for nodes using the least readv's.
1099
871
:param nodes: An iterable of graph index nodes.
1100
872
:return: Total node count and an iterator of the data needed to perform
1101
873
readvs to obtain the data for nodes. Each item yielded by the
1102
874
iterator is a tuple with:
1103
875
index, readv_vector, node_vector. readv_vector is a list ready to
1104
876
hand to the transport readv method, and node_vector is a list of
1105
(key, eol_flag, references) for the node retrieved by the
877
(key, eol_flag, references) for the the node retrieved by the
1106
878
matching readv_vector.
1108
880
# group by pack so we do one readv per pack
1384
1140
self._pack_transport = pack_transport
1385
1141
self._index_builder_class = index_builder_class
1386
1142
self._index_class = index_class
1387
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1143
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1389
1144
self.packs = []
1390
1145
# name:Pack mapping
1392
1146
self._packs_by_name = {}
1393
1147
# the previous pack-names content
1394
1148
self._packs_at_load = None
1395
1149
# when a pack is being created by this object, the state of that pack.
1396
1150
self._new_pack = None
1397
1151
# aggregated revision index data
1398
flush = self._flush_new_pack
1399
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1401
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1402
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1403
all_indices = [self.revision_index, self.inventory_index,
1404
self.text_index, self.signature_index]
1406
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1407
all_indices.append(self.chk_index)
1409
# used to determine if we're using a chk_index elsewhere.
1410
self.chk_index = None
1411
# Tell all the CombinedGraphIndex objects about each other, so they can
1412
# share hints about which pack names to search first.
1413
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1414
for combined_idx in all_combined:
1415
combined_idx.set_sibling_indices(
1416
set(all_combined).difference([combined_idx]))
1418
self._resumed_packs = []
1421
return '%s(%r)' % (self.__class__.__name__, self.repo)
1152
self.revision_index = AggregateIndex()
1153
self.inventory_index = AggregateIndex()
1154
self.text_index = AggregateIndex()
1155
self.signature_index = AggregateIndex()
1423
1157
def add_pack_to_memory(self, pack):
1424
1158
"""Make a Pack object available to the repository to satisfy queries.
1426
1160
:param pack: A Pack object.
1428
1162
if pack.name in self._packs_by_name:
1429
raise AssertionError(
1430
'pack %s already in _packs_by_name' % (pack.name,))
1163
raise AssertionError()
1431
1164
self.packs.append(pack)
1432
1165
self._packs_by_name[pack.name] = pack
1433
1166
self.revision_index.add_index(pack.revision_index, pack)
1434
1167
self.inventory_index.add_index(pack.inventory_index, pack)
1435
1168
self.text_index.add_index(pack.text_index, pack)
1436
1169
self.signature_index.add_index(pack.signature_index, pack)
1437
if self.chk_index is not None:
1438
self.chk_index.add_index(pack.chk_index, pack)
1440
1171
def all_packs(self):
1441
1172
"""Return a list of all the Pack objects this repository has.
1463
1194
in synchronisation with certain steps. Otherwise the names collection
1464
1195
is not flushed.
1466
:return: Something evaluating true if packing took place.
1197
:return: True if packing took place.
1470
return self._do_autopack()
1471
except errors.RetryAutopack:
1472
# If we get a RetryAutopack exception, we should abort the
1473
# current action, and retry.
1476
def _do_autopack(self):
1477
1199
# XXX: Should not be needed when the management of indices is sane.
1478
1200
total_revisions = self.revision_index.combined_index.key_count()
1479
1201
total_packs = len(self._names)
1480
1202
if self._max_pack_count(total_revisions) >= total_packs:
1204
# XXX: the following may want to be a class, to pack with a given
1206
mutter('Auto-packing repository %s, which has %d pack files, '
1207
'containing %d revisions into %d packs.', self, total_packs,
1208
total_revisions, self._max_pack_count(total_revisions))
1482
1209
# determine which packs need changing
1483
1210
pack_distribution = self.pack_distribution(total_revisions)
1484
1211
existing_packs = []
1492
1219
# group their data with the relevant commit, and that may
1493
1220
# involve rewriting ancient history - which autopack tries to
1494
1221
# avoid. Alternatively we could not group the data but treat
1495
# each of these as having a single revision, and thus add
1222
# each of these as having a single revision, and thus add
1496
1223
# one revision for each to the total revision count, to get
1497
1224
# a matching distribution.
1499
1226
existing_packs.append((revision_count, pack))
1500
1227
pack_operations = self.plan_autopack_combinations(
1501
1228
existing_packs, pack_distribution)
1502
num_new_packs = len(pack_operations)
1503
num_old_packs = sum([len(po[1]) for po in pack_operations])
1504
num_revs_affected = sum([po[0] for po in pack_operations])
1505
mutter('Auto-packing repository %s, which has %d pack files, '
1506
'containing %d revisions. Packing %d files into %d affecting %d'
1507
' revisions', self, total_packs, total_revisions, num_old_packs,
1508
num_new_packs, num_revs_affected)
1509
result = self._execute_pack_operations(pack_operations,
1510
reload_func=self._restart_autopack)
1511
mutter('Auto-packing repository %s completed', self)
1229
self._execute_pack_operations(pack_operations)
1514
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1232
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1516
1233
"""Execute a series of pack operations.
1518
1235
:param pack_operations: A list of [revision_count, packs_to_combine].
1519
1236
:param _packer_class: The class of packer to use (default: Packer).
1520
:return: The new pack names.
1522
1239
for revision_count, packs in pack_operations:
1523
1240
# we may have no-ops from the setup logic
1524
1241
if len(packs) == 0:
1526
packer = _packer_class(self, packs, '.autopack',
1527
reload_func=reload_func)
1530
except errors.RetryWithNewPacks:
1531
# An exception is propagating out of this context, make sure
1532
# this packer has cleaned up. Packer() doesn't set its new_pack
1533
# state into the RepositoryPackCollection object, so we only
1534
# have access to it directly here.
1535
if packer.new_pack is not None:
1536
packer.new_pack.abort()
1243
_packer_class(self, packs, '.autopack').pack()
1538
1244
for pack in packs:
1539
1245
self._remove_pack_from_memory(pack)
1540
1246
# record the newly available packs and stop advertising the old
1542
to_be_obsoleted = []
1543
for _, packs in pack_operations:
1544
to_be_obsoleted.extend(packs)
1545
result = self._save_pack_names(clear_obsolete_packs=True,
1546
obsolete_packs=to_be_obsoleted)
1549
def _flush_new_pack(self):
1550
if self._new_pack is not None:
1551
self._new_pack.flush()
1248
self._save_pack_names(clear_obsolete_packs=True)
1249
# Move the old packs out of the way now they are no longer referenced.
1250
for revision_count, packs in pack_operations:
1251
self._obsolete_packs(packs)
1553
1253
def lock_names(self):
1554
1254
"""Acquire the mutex around the pack-names index.
1556
1256
This cannot be used in the middle of a read-only transaction on the
1559
1259
self.repo.control_files.lock_write()
1561
def _already_packed(self):
1562
"""Is the collection already packed?"""
1563
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1565
def pack(self, hint=None, clean_obsolete_packs=False):
1566
1262
"""Pack the pack collection totally."""
1567
1263
self.ensure_loaded()
1568
1264
total_packs = len(self._names)
1569
if self._already_packed():
1266
# This is arguably wrong because we might not be optimal, but for
1267
# now lets leave it in. (e.g. reconcile -> one pack. But not
1571
1270
total_revisions = self.revision_index.combined_index.key_count()
1572
1271
# XXX: the following may want to be a class, to pack with a given
1574
1273
mutter('Packing repository %s, which has %d pack files, '
1575
'containing %d revisions with hint %r.', self, total_packs,
1576
total_revisions, hint)
1274
'containing %d revisions into 1 packs.', self, total_packs,
1577
1276
# determine which packs need changing
1277
pack_distribution = [1]
1578
1278
pack_operations = [[0, []]]
1579
1279
for pack in self.all_packs():
1580
if hint is None or pack.name in hint:
1581
# Either no hint was provided (so we are packing everything),
1582
# or this pack was included in the hint.
1583
pack_operations[-1][0] += pack.get_revision_count()
1584
pack_operations[-1][1].append(pack)
1280
pack_operations[-1][0] += pack.get_revision_count()
1281
pack_operations[-1][1].append(pack)
1585
1282
self._execute_pack_operations(pack_operations, OptimisingPacker)
1587
if clean_obsolete_packs:
1588
self._clear_obsolete_packs()
1590
1284
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1591
1285
"""Plan a pack operation.
1679
1365
inv_index = self._make_index(name, '.iix')
1680
1366
txt_index = self._make_index(name, '.tix')
1681
1367
sig_index = self._make_index(name, '.six')
1682
if self.chk_index is not None:
1683
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1686
1368
result = ExistingPack(self._pack_transport, name, rev_index,
1687
inv_index, txt_index, sig_index, chk_index)
1369
inv_index, txt_index, sig_index)
1688
1370
self.add_pack_to_memory(result)
1691
def _resume_pack(self, name):
1692
"""Get a suspended Pack object by name.
1694
:param name: The name of the pack - e.g. '123456'
1695
:return: A Pack object.
1697
if not re.match('[a-f0-9]{32}', name):
1698
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1700
raise errors.UnresumableWriteGroup(
1701
self.repo, [name], 'Malformed write group token')
1703
rev_index = self._make_index(name, '.rix', resume=True)
1704
inv_index = self._make_index(name, '.iix', resume=True)
1705
txt_index = self._make_index(name, '.tix', resume=True)
1706
sig_index = self._make_index(name, '.six', resume=True)
1707
if self.chk_index is not None:
1708
chk_index = self._make_index(name, '.cix', resume=True,
1709
unlimited_cache=True)
1712
result = self.resumed_pack_factory(name, rev_index, inv_index,
1713
txt_index, sig_index, self._upload_transport,
1714
self._pack_transport, self._index_transport, self,
1715
chk_index=chk_index)
1716
except errors.NoSuchFile, e:
1717
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1718
self.add_pack_to_memory(result)
1719
self._resumed_packs.append(result)
1722
1373
def allocate(self, a_new_pack):
1723
1374
"""Allocate name in the list of packs.
1742
1393
return self._index_class(self.transport, 'pack-names', None
1743
1394
).iter_all_entries()
1745
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1396
def _make_index(self, name, suffix):
1746
1397
size_offset = self._suffix_offsets[suffix]
1747
1398
index_name = name + suffix
1749
transport = self._upload_transport
1750
index_size = transport.stat(index_name).st_size
1752
transport = self._index_transport
1753
index_size = self._names[name][size_offset]
1754
return self._index_class(transport, index_name, index_size,
1755
unlimited_cache=unlimited_cache)
1399
index_size = self._names[name][size_offset]
1400
return self._index_class(
1401
self._index_transport, index_name, index_size)
1757
1403
def _max_pack_count(self, total_revisions):
1758
1404
"""Return the maximum number of packs to use for total revisions.
1760
1406
:param total_revisions: The total number of revisions in the
1786
1432
:param return: None.
1788
1434
for pack in packs:
1790
pack.pack_transport.rename(pack.file_name(),
1791
'../obsolete_packs/' + pack.file_name())
1792
except (errors.PathError, errors.TransportError), e:
1793
# TODO: Should these be warnings or mutters?
1794
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1435
pack.pack_transport.rename(pack.file_name(),
1436
'../obsolete_packs/' + pack.file_name())
1796
1437
# TODO: Probably needs to know all possible indices for this pack
1797
1438
# - or maybe list the directory and move all indices matching this
1798
1439
# name whether we recognize it or not?
1799
suffixes = ['.iix', '.six', '.tix', '.rix']
1800
if self.chk_index is not None:
1801
suffixes.append('.cix')
1802
for suffix in suffixes:
1804
self._index_transport.rename(pack.name + suffix,
1805
'../obsolete_packs/' + pack.name + suffix)
1806
except (errors.PathError, errors.TransportError), e:
1807
mutter("couldn't rename obsolete index, skipping it:\n%s"
1440
for suffix in ('.iix', '.six', '.tix', '.rix'):
1441
self._index_transport.rename(pack.name + suffix,
1442
'../obsolete_packs/' + pack.name + suffix)
1810
1444
def pack_distribution(self, total_revisions):
1811
1445
"""Generate a list of the number of revisions to put in each pack.
1830
1464
def _remove_pack_from_memory(self, pack):
1831
1465
"""Remove pack from the packs accessed by this repository.
1833
1467
Only affects memory state, until self._save_pack_names() is invoked.
1835
1469
self._names.pop(pack.name)
1836
1470
self._packs_by_name.pop(pack.name)
1837
1471
self._remove_pack_indices(pack)
1838
self.packs.remove(pack)
1840
def _remove_pack_indices(self, pack, ignore_missing=False):
1841
"""Remove the indices for pack from the aggregated indices.
1843
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1845
for index_type in Pack.index_definitions.keys():
1846
attr_name = index_type + '_index'
1847
aggregate_index = getattr(self, attr_name)
1848
if aggregate_index is not None:
1849
pack_index = getattr(pack, attr_name)
1851
aggregate_index.remove_index(pack_index)
1473
def _remove_pack_indices(self, pack):
1474
"""Remove the indices for pack from the aggregated indices."""
1475
self.revision_index.remove_index(pack.revision_index, pack)
1476
self.inventory_index.remove_index(pack.inventory_index, pack)
1477
self.text_index.remove_index(pack.text_index, pack)
1478
self.signature_index.remove_index(pack.signature_index, pack)
1857
1480
def reset(self):
1858
1481
"""Clear all cached data."""
1859
1482
# cached revision data
1483
self.repo._revision_knit = None
1860
1484
self.revision_index.clear()
1861
1485
# cached signature data
1486
self.repo._signature_knit = None
1862
1487
self.signature_index.clear()
1863
1488
# cached file text data
1864
1489
self.text_index.clear()
1490
self.repo._text_knit = None
1865
1491
# cached inventory data
1866
1492
self.inventory_index.clear()
1868
if self.chk_index is not None:
1869
self.chk_index.clear()
1870
1493
# remove the open pack
1871
1494
self._new_pack = None
1872
1495
# information about packs.
1875
1498
self._packs_by_name = {}
1876
1499
self._packs_at_load = None
1501
def _make_index_map(self, index_suffix):
1502
"""Return information on existing indices.
1504
:param suffix: Index suffix added to pack name.
1506
:returns: (pack_map, indices) where indices is a list of GraphIndex
1507
objects, and pack_map is a mapping from those objects to the
1508
pack tuple they describe.
1510
# TODO: stop using this; it creates new indices unnecessarily.
1511
self.ensure_loaded()
1512
suffix_map = {'.rix': 'revision_index',
1513
'.six': 'signature_index',
1514
'.iix': 'inventory_index',
1515
'.tix': 'text_index',
1517
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1518
suffix_map[index_suffix])
1520
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1521
"""Convert a list of packs to an index pack map and index list.
1523
:param packs: The packs list to process.
1524
:param index_attribute: The attribute that the desired index is found
1526
:return: A tuple (map, list) where map contains the dict from
1527
index:pack_tuple, and lsit contains the indices in the same order
1533
index = getattr(pack, index_attribute)
1534
indices.append(index)
1535
pack_map[index] = (pack.pack_transport, pack.file_name())
1536
return pack_map, indices
1538
def _index_contents(self, pack_map, key_filter=None):
1539
"""Get an iterable of the index contents from a pack_map.
1541
:param pack_map: A map from indices to pack details.
1542
:param key_filter: An optional filter to limit the
1545
indices = [index for index in pack_map.iterkeys()]
1546
all_index = CombinedGraphIndex(indices)
1547
if key_filter is None:
1548
return all_index.iter_all_entries()
1550
return all_index.iter_entries(key_filter)
1878
1552
def _unlock_names(self):
1879
1553
"""Release the mutex around the pack-names index."""
1880
1554
self.repo.control_files.unlock()
1882
def _diff_pack_names(self):
1883
"""Read the pack names from disk, and compare it to the one in memory.
1885
:return: (disk_nodes, deleted_nodes, new_nodes)
1886
disk_nodes The final set of nodes that should be referenced
1887
deleted_nodes Nodes which have been removed from when we started
1888
new_nodes Nodes that are newly introduced
1890
# load the disk nodes across
1892
for index, key, value in self._iter_disk_pack_index():
1893
disk_nodes.add((key, value))
1894
orig_disk_nodes = set(disk_nodes)
1896
# do a two-way diff against our original content
1897
current_nodes = set()
1898
for name, sizes in self._names.iteritems():
1900
((name, ), ' '.join(str(size) for size in sizes)))
1902
# Packs no longer present in the repository, which were present when we
1903
# locked the repository
1904
deleted_nodes = self._packs_at_load - current_nodes
1905
# Packs which this process is adding
1906
new_nodes = current_nodes - self._packs_at_load
1908
# Update the disk_nodes set to include the ones we are adding, and
1909
# remove the ones which were removed by someone else
1910
disk_nodes.difference_update(deleted_nodes)
1911
disk_nodes.update(new_nodes)
1913
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1915
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1916
"""Given the correct set of pack files, update our saved info.
1918
:return: (removed, added, modified)
1919
removed pack names removed from self._names
1920
added pack names added to self._names
1921
modified pack names that had changed value
1926
## self._packs_at_load = disk_nodes
1556
def _save_pack_names(self, clear_obsolete_packs=False):
1557
"""Save the list of packs.
1559
This will take out the mutex around the pack names list for the
1560
duration of the method call. If concurrent updates have been made, a
1561
three-way merge between the current list and the current in memory list
1564
:param clear_obsolete_packs: If True, clear out the contents of the
1565
obsolete_packs directory.
1569
builder = self._index_builder_class()
1570
# load the disk nodes across
1572
for index, key, value in self._iter_disk_pack_index():
1573
disk_nodes.add((key, value))
1574
# do a two-way diff against our original content
1575
current_nodes = set()
1576
for name, sizes in self._names.iteritems():
1578
((name, ), ' '.join(str(size) for size in sizes)))
1579
deleted_nodes = self._packs_at_load - current_nodes
1580
new_nodes = current_nodes - self._packs_at_load
1581
disk_nodes.difference_update(deleted_nodes)
1582
disk_nodes.update(new_nodes)
1583
# TODO: handle same-name, index-size-changes here -
1584
# e.g. use the value from disk, not ours, *unless* we're the one
1586
for key, value in disk_nodes:
1587
builder.add_node(key, value)
1588
self.transport.put_file('pack-names', builder.finish(),
1589
mode=self.repo.bzrdir._get_file_mode())
1590
# move the baseline forward
1591
self._packs_at_load = disk_nodes
1592
if clear_obsolete_packs:
1593
self._clear_obsolete_packs()
1595
self._unlock_names()
1596
# synchronise the memory packs list with what we just wrote:
1927
1597
new_names = dict(disk_nodes)
1928
1598
# drop no longer present nodes
1929
1599
for pack in self.all_packs():
1930
1600
if (pack.name,) not in new_names:
1931
removed.append(pack.name)
1932
1601
self._remove_pack_from_memory(pack)
1933
1602
# add new nodes/refresh existing ones
1934
1603
for key, value in disk_nodes:
1948
1617
self._remove_pack_from_memory(self.get_pack_by_name(name))
1949
1618
self._names[name] = sizes
1950
1619
self.get_pack_by_name(name)
1951
modified.append(name)
1954
1622
self._names[name] = sizes
1955
1623
self.get_pack_by_name(name)
1957
return removed, added, modified
1959
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1960
"""Save the list of packs.
1962
This will take out the mutex around the pack names list for the
1963
duration of the method call. If concurrent updates have been made, a
1964
three-way merge between the current list and the current in memory list
1967
:param clear_obsolete_packs: If True, clear out the contents of the
1968
obsolete_packs directory.
1969
:param obsolete_packs: Packs that are obsolete once the new pack-names
1970
file has been written.
1971
:return: A list of the names saved that were not previously on disk.
1973
already_obsolete = []
1976
builder = self._index_builder_class()
1977
(disk_nodes, deleted_nodes, new_nodes,
1978
orig_disk_nodes) = self._diff_pack_names()
1979
# TODO: handle same-name, index-size-changes here -
1980
# e.g. use the value from disk, not ours, *unless* we're the one
1982
for key, value in disk_nodes:
1983
builder.add_node(key, value)
1984
self.transport.put_file('pack-names', builder.finish(),
1985
mode=self.repo.bzrdir._get_file_mode())
1986
self._packs_at_load = disk_nodes
1987
if clear_obsolete_packs:
1990
to_preserve = set([o.name for o in obsolete_packs])
1991
already_obsolete = self._clear_obsolete_packs(to_preserve)
1993
self._unlock_names()
1994
# synchronise the memory packs list with what we just wrote:
1995
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1997
# TODO: We could add one more condition here. "if o.name not in
1998
# orig_disk_nodes and o != the new_pack we haven't written to
1999
# disk yet. However, the new pack object is not easily
2000
# accessible here (it would have to be passed through the
2001
# autopacking code, etc.)
2002
obsolete_packs = [o for o in obsolete_packs
2003
if o.name not in already_obsolete]
2004
self._obsolete_packs(obsolete_packs)
2005
return [new_node[0][0] for new_node in new_nodes]
2007
def reload_pack_names(self):
2008
"""Sync our pack listing with what is present in the repository.
2010
This should be called when we find out that something we thought was
2011
present is now missing. This happens when another process re-packs the
2014
:return: True if the in-memory list of packs has been altered at all.
2016
# The ensure_loaded call is to handle the case where the first call
2017
# made involving the collection was to reload_pack_names, where we
2018
# don't have a view of disk contents. Its a bit of a bandaid, and
2019
# causes two reads of pack-names, but its a rare corner case not struck
2020
# with regular push/pull etc.
2021
first_read = self.ensure_loaded()
2024
# out the new value.
2025
(disk_nodes, deleted_nodes, new_nodes,
2026
orig_disk_nodes) = self._diff_pack_names()
2027
# _packs_at_load is meant to be the explicit list of names in
2028
# 'pack-names' at then start. As such, it should not contain any
2029
# pending names that haven't been written out yet.
2030
self._packs_at_load = orig_disk_nodes
2032
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2033
if removed or added or modified:
2037
def _restart_autopack(self):
2038
"""Reload the pack names list, and restart the autopack code."""
2039
if not self.reload_pack_names():
2040
# Re-raise the original exception, because something went missing
2041
# and a restart didn't find it
2043
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2045
def _clear_obsolete_packs(self, preserve=None):
1625
def _clear_obsolete_packs(self):
2046
1626
"""Delete everything from the obsolete-packs directory.
2048
:return: A list of pack identifiers (the filename without '.pack') that
2049
were found in obsolete_packs.
2052
1628
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2053
if preserve is None:
2055
1629
for filename in obsolete_pack_transport.list_dir('.'):
2056
name, ext = osutils.splitext(filename)
2059
if name in preserve:
2062
1631
obsolete_pack_transport.delete(filename)
2063
1632
except (errors.PathError, errors.TransportError), e:
2064
warning("couldn't delete obsolete pack, skipping it:\n%s"
1633
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2068
1635
def _start_write_group(self):
2069
1636
# Do not permit preparation for writing if we're not in a 'write lock'.
2070
1637
if not self.repo.is_write_locked():
2071
1638
raise errors.NotWriteLocked(self)
2072
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
2073
file_mode=self.repo.bzrdir._get_file_mode())
1639
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1640
self._pack_transport, upload_suffix='.pack',
1641
file_mode=self.repo.bzrdir._get_file_mode(),
1642
index_builder_class=self._index_builder_class,
1643
index_class=self._index_class)
2074
1644
# allow writing: queue writes to a new index
2075
1645
self.revision_index.add_writable_index(self._new_pack.revision_index,
2076
1646
self._new_pack)
2096
1660
# FIXME: just drop the transient index.
2097
1661
# forget what names there are
2098
1662
if self._new_pack is not None:
2099
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2100
operation.add_cleanup(setattr, self, '_new_pack', None)
2101
# If we aborted while in the middle of finishing the write
2102
# group, _remove_pack_indices could fail because the indexes are
2103
# already gone. But they're not there we shouldn't fail in this
2104
# case, so we pass ignore_missing=True.
2105
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2106
ignore_missing=True)
2107
operation.run_simple()
2108
for resumed_pack in self._resumed_packs:
2109
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2110
# See comment in previous finally block.
2111
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2112
ignore_missing=True)
2113
operation.run_simple()
2114
del self._resumed_packs[:]
2116
def _remove_resumed_pack_indices(self):
2117
for resumed_pack in self._resumed_packs:
2118
self._remove_pack_indices(resumed_pack)
2119
del self._resumed_packs[:]
2121
def _check_new_inventories(self):
2122
"""Detect missing inventories in this write group.
2124
:returns: list of strs, summarising any problems found. If the list is
2125
empty no problems were found.
2127
# The base implementation does no checks. GCRepositoryPackCollection
1663
self._new_pack.abort()
1664
self._remove_pack_indices(self._new_pack)
1665
self._new_pack = None
1666
self.repo._text_knit = None
2131
1668
def _commit_write_group(self):
2133
for prefix, versioned_file in (
2134
('revisions', self.repo.revisions),
2135
('inventories', self.repo.inventories),
2136
('texts', self.repo.texts),
2137
('signatures', self.repo.signatures),
2139
missing = versioned_file.get_missing_compression_parent_keys()
2140
all_missing.update([(prefix,) + key for key in missing])
2142
raise errors.BzrCheckError(
2143
"Repository %s has missing compression parent(s) %r "
2144
% (self.repo, sorted(all_missing)))
2145
problems = self._check_new_inventories()
2147
problems_summary = '\n'.join(problems)
2148
raise errors.BzrCheckError(
2149
"Cannot add revision(s) to repository: " + problems_summary)
2150
1669
self._remove_pack_indices(self._new_pack)
2151
any_new_content = False
2152
1670
if self._new_pack.data_inserted():
2153
1671
# get all the data to disk and read to use
2154
1672
self._new_pack.finish()
2155
1673
self.allocate(self._new_pack)
2156
1674
self._new_pack = None
2157
any_new_content = True
2159
self._new_pack.abort()
2160
self._new_pack = None
2161
for resumed_pack in self._resumed_packs:
2162
# XXX: this is a pretty ugly way to turn the resumed pack into a
2163
# properly committed pack.
2164
self._names[resumed_pack.name] = None
2165
self._remove_pack_from_memory(resumed_pack)
2166
resumed_pack.finish()
2167
self.allocate(resumed_pack)
2168
any_new_content = True
2169
del self._resumed_packs[:]
2171
result = self.autopack()
1675
if not self.autopack():
2173
1676
# when autopack takes no steps, the names list is still
2175
return self._save_pack_names()
2179
def _suspend_write_group(self):
2180
tokens = [pack.name for pack in self._resumed_packs]
2181
self._remove_pack_indices(self._new_pack)
2182
if self._new_pack.data_inserted():
2183
# get all the data to disk and read to use
2184
self._new_pack.finish(suspend=True)
2185
tokens.append(self._new_pack.name)
2186
self._new_pack = None
1678
self._save_pack_names()
2188
1680
self._new_pack.abort()
2189
1681
self._new_pack = None
2190
self._remove_resumed_pack_indices()
2193
def _resume_write_group(self, tokens):
2194
for token in tokens:
2195
self._resume_pack(token)
1682
self.repo._text_knit = None
2198
1685
class KnitPackRepository(KnitRepository):
2199
1686
"""Repository with knit objects stored inside pack containers.
2201
1688
The layering for a KnitPackRepository is:
2203
1690
Graph | HPSS | Repository public layer |
2258
1742
deltas=True, parents=True, is_locked=self.is_locked),
2259
1743
data_access=self._pack_collection.text_index.data_access,
2260
1744
max_delta_chain=200)
2261
if _format.supports_chks:
2262
# No graph, no compression:- references from chks are between
2263
# different objects not temporal versions of the same; and without
2264
# some sort of temporal structure knit compression will just fail.
2265
self.chk_bytes = KnitVersionedFiles(
2266
_KnitGraphIndex(self._pack_collection.chk_index.combined_index,
2267
add_callback=self._pack_collection.chk_index.add_callback,
2268
deltas=False, parents=False, is_locked=self.is_locked),
2269
data_access=self._pack_collection.chk_index.data_access,
2272
self.chk_bytes = None
2273
1745
# True when the repository object is 'write locked' (as opposed to the
2274
# physical lock only taken out around changes to the pack-names list.)
1746
# physical lock only taken out around changes to the pack-names list.)
2275
1747
# Another way to represent this would be a decorator around the control
2276
1748
# files object that presents logical locks as physical ones - if this
2277
1749
# gets ugly consider that alternative design. RBC 20071011
2281
1753
self._reconcile_does_inventory_gc = True
2282
1754
self._reconcile_fixes_text_parents = True
2283
1755
self._reconcile_backsup_inventory = False
1756
self._fetch_order = 'unordered'
2285
def _warn_if_deprecated(self, branch=None):
1758
def _warn_if_deprecated(self):
2286
1759
# This class isn't deprecated, but one sub-format is
2287
1760
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2288
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1761
from bzrlib import repository
1762
if repository._deprecation_warning_done:
1764
repository._deprecation_warning_done = True
1765
warning("Format %s for %s is deprecated - please use"
1766
" 'bzr upgrade --1.6.1-rich-root'"
1767
% (self._format, self.bzrdir.transport.base))
2290
1769
def _abort_write_group(self):
2291
self.revisions._index._key_dependencies.clear()
2292
1770
self._pack_collection._abort_write_group()
2294
def _get_source(self, to_format):
2295
if to_format.network_name() == self._format.network_name():
2296
return KnitPackStreamSource(self, to_format)
2297
return super(KnitPackRepository, self)._get_source(to_format)
1772
def _find_inconsistent_revision_parents(self):
1773
"""Find revisions with incorrectly cached parents.
1775
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1776
parents-in-revision).
1778
if not self.is_locked():
1779
raise errors.ObjectNotLocked(self)
1780
pb = ui.ui_factory.nested_progress_bar()
1783
revision_nodes = self._pack_collection.revision_index \
1784
.combined_index.iter_all_entries()
1785
index_positions = []
1786
# Get the cached index values for all revisions, and also the location
1787
# in each index of the revision text so we can perform linear IO.
1788
for index, key, value, refs in revision_nodes:
1789
pos, length = value[1:].split(' ')
1790
index_positions.append((index, int(pos), key[0],
1791
tuple(parent[0] for parent in refs[0])))
1792
pb.update("Reading revision index.", 0, 0)
1793
index_positions.sort()
1794
batch_count = len(index_positions) / 1000 + 1
1795
pb.update("Checking cached revision graph.", 0, batch_count)
1796
for offset in xrange(batch_count):
1797
pb.update("Checking cached revision graph.", offset)
1798
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1801
rev_ids = [item[2] for item in to_query]
1802
revs = self.get_revisions(rev_ids)
1803
for revision, item in zip(revs, to_query):
1804
index_parents = item[3]
1805
rev_parents = tuple(revision.parent_ids)
1806
if index_parents != rev_parents:
1807
result.append((revision.revision_id, index_parents, rev_parents))
1812
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1813
def get_parents(self, revision_ids):
1814
"""See graph._StackedParentsProvider.get_parents."""
1815
parent_map = self.get_parent_map(revision_ids)
1816
return [parent_map.get(r, None) for r in revision_ids]
2299
1818
def _make_parents_provider(self):
2300
1819
return graph.CachingParentsProvider(self)
2302
1821
def _refresh_data(self):
2303
if not self.is_locked():
2305
self._pack_collection.reload_pack_names()
1822
if self._write_lock_count == 1 or (
1823
self.control_files._lock_count == 1 and
1824
self.control_files._lock_mode == 'r'):
1825
# forget what names there are
1826
self._pack_collection.reset()
1827
# XXX: Better to do an in-memory merge when acquiring a new lock -
1828
# factor out code from _save_pack_names.
1829
self._pack_collection.ensure_loaded()
2307
1831
def _start_write_group(self):
2308
1832
self._pack_collection._start_write_group()
2310
1834
def _commit_write_group(self):
2311
hint = self._pack_collection._commit_write_group()
2312
self.revisions._index._key_dependencies.clear()
2315
def suspend_write_group(self):
2316
# XXX check self._write_group is self.get_transaction()?
2317
tokens = self._pack_collection._suspend_write_group()
2318
self.revisions._index._key_dependencies.clear()
2319
self._write_group = None
2322
def _resume_write_group(self, tokens):
2323
self._start_write_group()
2325
self._pack_collection._resume_write_group(tokens)
2326
except errors.UnresumableWriteGroup:
2327
self._abort_write_group()
2329
for pack in self._pack_collection._resumed_packs:
2330
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1835
return self._pack_collection._commit_write_group()
2332
1837
def get_transaction(self):
2333
1838
if self._write_lock_count:
2342
1847
return self._write_lock_count
2344
1849
def lock_write(self, token=None):
2345
"""Lock the repository for writes.
2347
:return: A bzrlib.repository.RepositoryWriteLockResult.
2349
locked = self.is_locked()
2350
if not self._write_lock_count and locked:
1850
if not self._write_lock_count and self.is_locked():
2351
1851
raise errors.ReadOnlyError(self)
2352
1852
self._write_lock_count += 1
2353
1853
if self._write_lock_count == 1:
2354
1854
self._transaction = transactions.WriteTransaction()
2356
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
2357
note('%r was write locked again', self)
2358
self._prev_lock = 'w'
2359
1855
for repo in self._fallback_repositories:
2360
1856
# Writes don't affect fallback repos
2361
1857
repo.lock_read()
2362
self._refresh_data()
2363
return RepositoryWriteLockResult(self.unlock, None)
1858
self._refresh_data()
2365
1860
def lock_read(self):
2366
"""Lock the repository for reads.
2368
:return: A bzrlib.lock.LogicalLockResult.
2370
locked = self.is_locked()
2371
1861
if self._write_lock_count:
2372
1862
self._write_lock_count += 1
2374
1864
self.control_files.lock_read()
2376
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
2377
note('%r was read locked again', self)
2378
self._prev_lock = 'r'
2379
1865
for repo in self._fallback_repositories:
1866
# Writes don't affect fallback repos
2380
1867
repo.lock_read()
2381
self._refresh_data()
2382
return LogicalLockResult(self.unlock)
1868
self._refresh_data()
2384
1870
def leave_lock_in_place(self):
2385
1871
# not supported - raise an error
2425
1906
transaction = self._transaction
2426
1907
self._transaction = None
2427
1908
transaction.finish()
1909
for repo in self._fallback_repositories:
2429
1912
self.control_files.unlock()
2431
if not self.is_locked():
2432
1913
for repo in self._fallback_repositories:
2436
class KnitPackStreamSource(StreamSource):
2437
"""A StreamSource used to transfer data between same-format KnitPack repos.
2439
This source assumes:
2440
1) Same serialization format for all objects
2441
2) Same root information
2442
3) XML format inventories
2443
4) Atomic inserts (so we can stream inventory texts before text
2448
def __init__(self, from_repository, to_format):
2449
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2450
self._text_keys = None
2451
self._text_fetch_order = 'unordered'
2453
def _get_filtered_inv_stream(self, revision_ids):
2454
from_repo = self.from_repository
2455
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2456
parent_keys = [(p,) for p in parent_ids]
2457
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2458
parent_text_keys = set(find_text_keys(
2459
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2460
content_text_keys = set()
2461
knit = KnitVersionedFiles(None, None)
2462
factory = KnitPlainFactory()
2463
def find_text_keys_from_content(record):
2464
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2465
raise ValueError("Unknown content storage kind for"
2466
" inventory text: %s" % (record.storage_kind,))
2467
# It's a knit record, it has a _raw_record field (even if it was
2468
# reconstituted from a network stream).
2469
raw_data = record._raw_record
2470
# read the entire thing
2471
revision_id = record.key[-1]
2472
content, _ = knit._parse_record(revision_id, raw_data)
2473
if record.storage_kind == 'knit-delta-gz':
2474
line_iterator = factory.get_linedelta_content(content)
2475
elif record.storage_kind == 'knit-ft-gz':
2476
line_iterator = factory.get_fulltext_content(content)
2477
content_text_keys.update(find_text_keys(
2478
[(line, revision_id) for line in line_iterator]))
2479
revision_keys = [(r,) for r in revision_ids]
2480
def _filtered_inv_stream():
2481
source_vf = from_repo.inventories
2482
stream = source_vf.get_record_stream(revision_keys,
2484
for record in stream:
2485
if record.storage_kind == 'absent':
2486
raise errors.NoSuchRevision(from_repo, record.key)
2487
find_text_keys_from_content(record)
2489
self._text_keys = content_text_keys - parent_text_keys
2490
return ('inventories', _filtered_inv_stream())
2492
def _get_text_stream(self):
2493
# Note: We know we don't have to handle adding root keys, because both
2494
# the source and target are the identical network name.
2495
text_stream = self.from_repository.texts.get_record_stream(
2496
self._text_keys, self._text_fetch_order, False)
2497
return ('texts', text_stream)
2499
def get_stream(self, search):
2500
revision_ids = search.get_keys()
2501
for stream_info in self._fetch_revision_texts(revision_ids):
2503
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2504
yield self._get_filtered_inv_stream(revision_ids)
2505
yield self._get_text_stream()
2509
1917
class RepositoryFormatPack(MetaDirRepositoryFormat):
2510
1918
"""Format logic for pack structured repositories.
2847
2274
def get_format_string(self):
2848
2275
"""See RepositoryFormat.get_format_string()."""
2849
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2276
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2851
2278
def get_format_description(self):
2852
2279
"""See RepositoryFormat.get_format_description()."""
2853
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2856
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2857
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2859
1.6-rich-root with B+Tree indices.
2862
repository_class = KnitPackRepository
2863
_commit_builder_class = PackRootCommitBuilder
2864
rich_root_data = True
2865
supports_tree_reference = False # no subtrees
2866
supports_external_lookups = True
2867
# What index classes to use
2868
index_builder_class = BTreeBuilder
2869
index_class = BTreeGraphIndex
2872
def _serializer(self):
2873
return xml6.serializer_v6
2875
def _get_matching_bzrdir(self):
2876
return bzrdir.format_registry.make_bzrdir(
2879
def _ignore_setting_bzrdir(self, format):
2280
return ("Development repository format, currently the same as "
2281
"1.6.1 with B+Trees.\n")
2283
def check_conversion_target(self, target_format):
2882
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2884
def get_format_string(self):
2885
"""See RepositoryFormat.get_format_string()."""
2886
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2888
def get_format_description(self):
2889
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2892
2287
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2893
2288
"""A subtrees development repository.