152
132
texts/deltas (via (fileid, revisionid) tuples).
153
133
:param signature_index: A GraphIndex for determining what signatures are
154
134
present in the Pack and accessing the locations of their texts.
155
:param chk_index: A GraphIndex for accessing content by CHK, if the
158
136
self.revision_index = revision_index
159
137
self.inventory_index = inventory_index
160
138
self.text_index = text_index
161
139
self.signature_index = signature_index
162
self.chk_index = chk_index
164
141
def access_tuple(self):
165
142
"""Return a tuple (transport, name) for the pack content."""
166
143
return self.pack_transport, self.file_name()
168
def _check_references(self):
169
"""Make sure our external references are present.
171
Packs are allowed to have deltas whose base is not in the pack, but it
172
must be present somewhere in this collection. It is not allowed to
173
have deltas based on a fallback repository.
174
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
177
for (index_name, external_refs, index) in [
179
self._get_external_refs(self.text_index),
180
self._pack_collection.text_index.combined_index),
182
self._get_external_refs(self.inventory_index),
183
self._pack_collection.inventory_index.combined_index),
185
missing = external_refs.difference(
186
k for (idx, k, v, r) in
187
index.iter_entries(external_refs))
189
missing_items[index_name] = sorted(list(missing))
191
from pprint import pformat
192
raise errors.BzrCheckError(
193
"Newly created pack file %r has delta references to "
194
"items not in its repository:\n%s"
195
% (self, pformat(missing_items)))
197
145
def file_name(self):
198
146
"""Get the file name for the pack on disk."""
199
147
return self.name + '.pack'
225
165
"""The text index is the name + .tix."""
226
166
return self.index_name('text', name)
228
def _replace_index_with_readonly(self, index_type):
229
unlimited_cache = False
230
if index_type == 'chk':
231
unlimited_cache = True
232
setattr(self, index_type + '_index',
233
self.index_class(self.index_transport,
234
self.index_name(index_type, self.name),
235
self.index_sizes[self.index_offset(index_type)],
236
unlimited_cache=unlimited_cache))
168
def _external_compression_parents_of_texts(self):
171
for node in self.text_index.iter_all_entries():
173
refs.update(node[3][1])
239
177
class ExistingPack(Pack):
240
178
"""An in memory proxy for an existing .pack and its disk indices."""
242
180
def __init__(self, pack_transport, name, revision_index, inventory_index,
243
text_index, signature_index, chk_index=None):
181
text_index, signature_index):
244
182
"""Create an ExistingPack object.
246
184
:param pack_transport: The transport where the pack file resides.
247
185
:param name: The name of the pack on disk in the pack_transport.
249
187
Pack.__init__(self, revision_index, inventory_index, text_index,
250
signature_index, chk_index)
252
190
self.pack_transport = pack_transport
253
191
if None in (revision_index, inventory_index, text_index,
261
199
return not self.__eq__(other)
263
201
def __repr__(self):
264
return "<%s.%s object at 0x%x, %s, %s" % (
265
self.__class__.__module__, self.__class__.__name__, id(self),
266
self.pack_transport, self.name)
269
class ResumedPack(ExistingPack):
271
def __init__(self, name, revision_index, inventory_index, text_index,
272
signature_index, upload_transport, pack_transport, index_transport,
273
pack_collection, chk_index=None):
274
"""Create a ResumedPack object."""
275
ExistingPack.__init__(self, pack_transport, name, revision_index,
276
inventory_index, text_index, signature_index,
278
self.upload_transport = upload_transport
279
self.index_transport = index_transport
280
self.index_sizes = [None, None, None, None]
282
('revision', revision_index),
283
('inventory', inventory_index),
284
('text', text_index),
285
('signature', signature_index),
287
if chk_index is not None:
288
indices.append(('chk', chk_index))
289
self.index_sizes.append(None)
290
for index_type, index in indices:
291
offset = self.index_offset(index_type)
292
self.index_sizes[offset] = index._size
293
self.index_class = pack_collection._index_class
294
self._pack_collection = pack_collection
295
self._state = 'resumed'
296
# XXX: perhaps check that the .pack file exists?
298
def access_tuple(self):
299
if self._state == 'finished':
300
return Pack.access_tuple(self)
301
elif self._state == 'resumed':
302
return self.upload_transport, self.file_name()
304
raise AssertionError(self._state)
307
self.upload_transport.delete(self.file_name())
308
indices = [self.revision_index, self.inventory_index, self.text_index,
309
self.signature_index]
310
if self.chk_index is not None:
311
indices.append(self.chk_index)
312
for index in indices:
313
index._transport.delete(index._name)
316
self._check_references()
317
index_types = ['revision', 'inventory', 'text', 'signature']
318
if self.chk_index is not None:
319
index_types.append('chk')
320
for index_type in index_types:
321
old_name = self.index_name(index_type, self.name)
322
new_name = '../indices/' + old_name
323
self.upload_transport.rename(old_name, new_name)
324
self._replace_index_with_readonly(index_type)
325
new_name = '../packs/' + self.file_name()
326
self.upload_transport.rename(self.file_name(), new_name)
327
self._state = 'finished'
329
def _get_external_refs(self, index):
330
"""Return compression parents for this index that are not present.
332
This returns any compression parents that are referenced by this index,
333
which are not contained *in* this index. They may be present elsewhere.
335
return index.external_references(1)
202
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
203
id(self), self.pack_transport, self.name)
338
206
class NewPack(Pack):
339
207
"""An in memory proxy for a pack which is being created."""
341
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
209
# A map of index 'type' to the file extension and position in the
211
index_definitions = {
212
'revision': ('.rix', 0),
213
'inventory': ('.iix', 1),
215
'signature': ('.six', 3),
218
def __init__(self, upload_transport, index_transport, pack_transport,
219
upload_suffix='', file_mode=None):
342
220
"""Create a NewPack instance.
344
:param pack_collection: A PackCollection into which this is being inserted.
222
:param upload_transport: A writable transport for the pack to be
223
incrementally uploaded to.
224
:param index_transport: A writable transport for the pack's indices to
225
be written to when the pack is finished.
226
:param pack_transport: A writable transport for the pack to be renamed
227
to when the upload is complete. This *must* be the same as
228
upload_transport.clone('../packs').
345
229
:param upload_suffix: An optional suffix to be given to any temporary
346
230
files created during the pack creation. e.g '.autopack'
347
:param file_mode: Unix permissions for newly created file.
231
:param file_mode: An optional file mode to create the new files with.
349
233
# The relative locations of the packs are constrained, but all are
350
234
# passed in because the caller has them, so as to avoid object churn.
351
index_builder_class = pack_collection._index_builder_class
352
if pack_collection.chk_index is not None:
353
chk_index = index_builder_class(reference_lists=0)
356
235
Pack.__init__(self,
357
236
# Revisions: parents list, no text compression.
358
index_builder_class(reference_lists=1),
237
InMemoryGraphIndex(reference_lists=1),
359
238
# Inventory: We want to map compression only, but currently the
360
239
# knit code hasn't been updated enough to understand that, so we
361
240
# have a regular 2-list index giving parents and compression
363
index_builder_class(reference_lists=2),
242
InMemoryGraphIndex(reference_lists=2),
364
243
# Texts: compression and per file graph, for all fileids - so two
365
244
# reference lists and two elements in the key tuple.
366
index_builder_class(reference_lists=2, key_elements=2),
245
InMemoryGraphIndex(reference_lists=2, key_elements=2),
367
246
# Signatures: Just blobs to store, no compression, no parents
369
index_builder_class(reference_lists=0),
370
# CHK based storage - just blobs, no compression or parents.
248
InMemoryGraphIndex(reference_lists=0),
373
self._pack_collection = pack_collection
374
# When we make readonly indices, we need this.
375
self.index_class = pack_collection._index_class
376
250
# where should the new pack be opened
377
self.upload_transport = pack_collection._upload_transport
251
self.upload_transport = upload_transport
378
252
# where are indices written out to
379
self.index_transport = pack_collection._index_transport
253
self.index_transport = index_transport
380
254
# where is the pack renamed to when it is finished?
381
self.pack_transport = pack_collection._pack_transport
255
self.pack_transport = pack_transport
382
256
# What file mode to upload the pack and indices with.
383
257
self._file_mode = file_mode
384
258
# tracks the content written to the .pack file.
385
self._hash = osutils.md5()
386
# a tuple with the length in bytes of the indices, once the pack
387
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
259
self._hash = md5.new()
260
# a four-tuple with the length in bytes of the indices, once the pack
261
# is finalised. (rev, inv, text, sigs)
388
262
self.index_sizes = None
389
263
# How much data to cache when writing packs. Note that this is not
390
264
# synchronised with reads, because it's not in the transport layer, so
475
338
- stores the index size tuple for the pack in the index_sizes
478
self.finish_content()
480
self._check_references()
343
self._write_data('', flush=True)
344
self.name = self._hash.hexdigest()
482
346
# XXX: It'd be better to write them all to temporary names, then
483
347
# rename them all into place, so that the window when only some are
484
348
# visible is smaller. On the other hand none will be seen until
485
349
# they're in the names list.
486
350
self.index_sizes = [None, None, None, None]
487
self._write_index('revision', self.revision_index, 'revision', suspend)
488
self._write_index('inventory', self.inventory_index, 'inventory',
490
self._write_index('text', self.text_index, 'file texts', suspend)
351
self._write_index('revision', self.revision_index, 'revision')
352
self._write_index('inventory', self.inventory_index, 'inventory')
353
self._write_index('text', self.text_index, 'file texts')
491
354
self._write_index('signature', self.signature_index,
492
'revision signatures', suspend)
493
if self.chk_index is not None:
494
self.index_sizes.append(None)
495
self._write_index('chk', self.chk_index,
496
'content hash bytes', suspend)
355
'revision signatures')
497
356
self.write_stream.close()
498
357
# Note that this will clobber an existing pack with the same name,
499
358
# without checking for hash collisions. While this is undesirable this
565
431
such as 'revision index'.
567
433
A CombinedIndex provides an index on a single key space built up
568
from several on-disk indices. The AggregateIndex builds on this
434
from several on-disk indices. The AggregateIndex builds on this
569
435
to provide a knit access layer, and allows having up to one writable
570
436
index within the collection.
572
438
# XXX: Probably 'can be written to' could/should be separated from 'acts
573
439
# like a knit index' -- mbp 20071024
575
def __init__(self, reload_func=None, flush_func=None):
576
"""Create an AggregateIndex.
578
:param reload_func: A function to call if we find we are missing an
579
index. Should have the form reload_func() => True if the list of
580
active pack files has changed.
582
self._reload_func = reload_func
442
"""Create an AggregateIndex."""
583
443
self.index_to_pack = {}
584
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
585
self.data_access = _DirectPackAccess(self.index_to_pack,
586
reload_func=reload_func,
587
flush_func=flush_func)
444
self.combined_index = CombinedGraphIndex([])
445
self.data_access = _DirectPackAccess(self.index_to_pack)
446
self.add_callback = None
448
def replace_indices(self, index_to_pack, indices):
449
"""Replace the current mappings with fresh ones.
451
This should probably not be used eventually, rather incremental add and
452
removal of indices. It has been added during refactoring of existing
455
:param index_to_pack: A mapping from index objects to
456
(transport, name) tuples for the pack file data.
457
:param indices: A list of indices.
459
# refresh the revision pack map dict without replacing the instance.
460
self.index_to_pack.clear()
461
self.index_to_pack.update(index_to_pack)
462
# XXX: API break - clearly a 'replace' method would be good?
463
self.combined_index._indices[:] = indices
464
# the current add nodes callback for the current writable index if
588
466
self.add_callback = None
590
468
def add_index(self, index, pack):
749
590
def open_pack(self):
750
591
"""Open a pack for the pack we are creating."""
751
new_pack = self._pack_collection.pack_factory(self._pack_collection,
752
upload_suffix=self.suffix,
753
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
754
# We know that we will process all nodes in order, and don't need to
755
# query, so don't combine any indices spilled to disk until we are done
756
new_pack.revision_index.set_optimize(combine_backing_indices=False)
757
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
758
new_pack.text_index.set_optimize(combine_backing_indices=False)
759
new_pack.signature_index.set_optimize(combine_backing_indices=False)
762
def _update_pack_order(self, entries, index_to_pack_map):
763
"""Determine how we want our packs to be ordered.
765
This changes the sort order of the self.packs list so that packs unused
766
by 'entries' will be at the end of the list, so that future requests
767
can avoid probing them. Used packs will be at the front of the
768
self.packs list, in the order of their first use in 'entries'.
770
:param entries: A list of (index, ...) tuples
771
:param index_to_pack_map: A mapping from index objects to pack objects.
775
for entry in entries:
777
if index not in seen_indexes:
778
packs.append(index_to_pack_map[index])
779
seen_indexes.add(index)
780
if len(packs) == len(self.packs):
781
if 'pack' in debug.debug_flags:
782
mutter('Not changing pack list, all packs used.')
784
seen_packs = set(packs)
785
for pack in self.packs:
786
if pack not in seen_packs:
789
if 'pack' in debug.debug_flags:
790
old_names = [p.access_tuple()[1] for p in self.packs]
791
new_names = [p.access_tuple()[1] for p in packs]
792
mutter('Reordering packs\nfrom: %s\n to: %s',
793
old_names, new_names)
592
return NewPack(self._pack_collection._upload_transport,
593
self._pack_collection._index_transport,
594
self._pack_collection._pack_transport, upload_suffix=self.suffix,
595
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
796
597
def _copy_revision_texts(self):
797
598
"""Copy revision data to the new pack."""
940
740
self._pack_collection.allocate(new_pack)
943
def _copy_chks(self, refs=None):
944
# XXX: Todo, recursive follow-pointers facility when fetching some
946
chk_index_map, chk_indices = self._pack_map_and_index_list(
948
chk_nodes = self._index_contents(chk_indices, refs)
950
# TODO: This isn't strictly tasteful as we are accessing some private
951
# variables (_serializer). Perhaps a better way would be to have
952
# Repository._deserialise_chk_node()
953
search_key_func = chk_map.search_key_registry.get(
954
self._pack_collection.repo._serializer.search_key_name)
955
def accumlate_refs(lines):
956
# XXX: move to a generic location
958
bytes = ''.join(lines)
959
node = chk_map._deserialise(bytes, ("unknown",), search_key_func)
960
new_refs.update(node.refs())
961
self._copy_nodes(chk_nodes, chk_index_map, self.new_pack._writer,
962
self.new_pack.chk_index, output_lines=accumlate_refs)
965
def _copy_nodes(self, nodes, index_map, writer, write_index,
967
"""Copy knit nodes between packs with no graph references.
969
:param output_lines: Output full texts of copied items.
743
def _copy_nodes(self, nodes, index_map, writer, write_index):
744
"""Copy knit nodes between packs with no graph references."""
971
745
pb = ui.ui_factory.nested_progress_bar()
973
747
return self._do_copy_nodes(nodes, index_map, writer,
974
write_index, pb, output_lines=output_lines)
978
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb,
752
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
980
753
# for record verification
981
754
knit = KnitVersionedFiles(None, None)
982
755
# plan a readv on each source pack:
1003
776
# linear scan up the pack
1004
777
pack_readv_requests.sort()
1006
pack_obj = index_map[index]
1007
transport, path = pack_obj.access_tuple()
1009
reader = pack.make_readv_reader(transport, path,
1010
[offset[0:2] for offset in pack_readv_requests])
1011
except errors.NoSuchFile:
1012
if self._reload_func is not None:
779
transport, path = index_map[index]
780
reader = pack.make_readv_reader(transport, path,
781
[offset[0:2] for offset in pack_readv_requests])
1015
782
for (names, read_func), (_1, _2, (key, eol_flag)) in \
1016
783
izip(reader.iter_records(), pack_readv_requests):
1017
784
raw_data = read_func(None)
1018
785
# check the header only
1019
if output_lines is not None:
1020
output_lines(knit._parse_record(key[-1], raw_data)[0])
1022
df, _ = knit._parse_record_header(key, raw_data)
786
df, _ = knit._parse_record_header(key, raw_data)
1024
788
pos, size = writer.add_bytes_record(raw_data, names)
1025
789
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
1026
790
pb.update("Copied record", record_index)
1353
1101
class RepositoryPackCollection(object):
1354
1102
"""Management of packs within a repository.
1356
1104
:ivar _names: map of {pack_name: (index_size,)}
1359
pack_factory = NewPack
1360
resumed_pack_factory = ResumedPack
1362
1107
def __init__(self, repo, transport, index_transport, upload_transport,
1363
pack_transport, index_builder_class, index_class,
1365
1109
"""Create a new RepositoryPackCollection.
1367
:param transport: Addresses the repository base directory
1111
:param transport: Addresses the repository base directory
1368
1112
(typically .bzr/repository/).
1369
1113
:param index_transport: Addresses the directory containing indices.
1370
1114
:param upload_transport: Addresses the directory into which packs are written
1371
1115
while they're being created.
1372
1116
:param pack_transport: Addresses the directory of existing complete packs.
1373
:param index_builder_class: The index builder class to use.
1374
:param index_class: The index class to use.
1375
:param use_chk_index: Whether to setup and manage a CHK index.
1377
# XXX: This should call self.reset()
1378
1118
self.repo = repo
1379
1119
self.transport = transport
1380
1120
self._index_transport = index_transport
1381
1121
self._upload_transport = upload_transport
1382
1122
self._pack_transport = pack_transport
1383
self._index_builder_class = index_builder_class
1384
self._index_class = index_class
1385
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
1123
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1387
1124
self.packs = []
1388
1125
# name:Pack mapping
1390
1126
self._packs_by_name = {}
1391
1127
# the previous pack-names content
1392
1128
self._packs_at_load = None
1393
1129
# when a pack is being created by this object, the state of that pack.
1394
1130
self._new_pack = None
1395
1131
# aggregated revision index data
1396
flush = self._flush_new_pack
1397
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
1398
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
1399
self.text_index = AggregateIndex(self.reload_pack_names, flush)
1400
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
1401
all_indices = [self.revision_index, self.inventory_index,
1402
self.text_index, self.signature_index]
1404
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
1405
all_indices.append(self.chk_index)
1407
# used to determine if we're using a chk_index elsewhere.
1408
self.chk_index = None
1409
# Tell all the CombinedGraphIndex objects about each other, so they can
1410
# share hints about which pack names to search first.
1411
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
1412
for combined_idx in all_combined:
1413
combined_idx.set_sibling_indices(
1414
set(all_combined).difference([combined_idx]))
1416
self._resumed_packs = []
1419
return '%s(%r)' % (self.__class__.__name__, self.repo)
1132
self.revision_index = AggregateIndex()
1133
self.inventory_index = AggregateIndex()
1134
self.text_index = AggregateIndex()
1135
self.signature_index = AggregateIndex()
1421
1137
def add_pack_to_memory(self, pack):
1422
1138
"""Make a Pack object available to the repository to satisfy queries.
1424
1140
:param pack: A Pack object.
1426
1142
if pack.name in self._packs_by_name:
1427
raise AssertionError(
1428
'pack %s already in _packs_by_name' % (pack.name,))
1143
raise AssertionError()
1429
1144
self.packs.append(pack)
1430
1145
self._packs_by_name[pack.name] = pack
1431
1146
self.revision_index.add_index(pack.revision_index, pack)
1432
1147
self.inventory_index.add_index(pack.inventory_index, pack)
1433
1148
self.text_index.add_index(pack.text_index, pack)
1434
1149
self.signature_index.add_index(pack.signature_index, pack)
1435
if self.chk_index is not None:
1436
self.chk_index.add_index(pack.chk_index, pack)
1438
1151
def all_packs(self):
1439
1152
"""Return a list of all the Pack objects this repository has.
1490
1199
# group their data with the relevant commit, and that may
1491
1200
# involve rewriting ancient history - which autopack tries to
1492
1201
# avoid. Alternatively we could not group the data but treat
1493
# each of these as having a single revision, and thus add
1202
# each of these as having a single revision, and thus add
1494
1203
# one revision for each to the total revision count, to get
1495
1204
# a matching distribution.
1497
1206
existing_packs.append((revision_count, pack))
1498
1207
pack_operations = self.plan_autopack_combinations(
1499
1208
existing_packs, pack_distribution)
1500
num_new_packs = len(pack_operations)
1501
num_old_packs = sum([len(po[1]) for po in pack_operations])
1502
num_revs_affected = sum([po[0] for po in pack_operations])
1503
mutter('Auto-packing repository %s, which has %d pack files, '
1504
'containing %d revisions. Packing %d files into %d affecting %d'
1505
' revisions', self, total_packs, total_revisions, num_old_packs,
1506
num_new_packs, num_revs_affected)
1507
result = self._execute_pack_operations(pack_operations,
1508
reload_func=self._restart_autopack)
1509
mutter('Auto-packing repository %s completed', self)
1209
self._execute_pack_operations(pack_operations)
1512
def _execute_pack_operations(self, pack_operations, _packer_class=Packer,
1212
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1514
1213
"""Execute a series of pack operations.
1516
1215
:param pack_operations: A list of [revision_count, packs_to_combine].
1517
1216
:param _packer_class: The class of packer to use (default: Packer).
1518
:return: The new pack names.
1520
1219
for revision_count, packs in pack_operations:
1521
1220
# we may have no-ops from the setup logic
1522
1221
if len(packs) == 0:
1524
packer = _packer_class(self, packs, '.autopack',
1525
reload_func=reload_func)
1528
except errors.RetryWithNewPacks:
1529
# An exception is propagating out of this context, make sure
1530
# this packer has cleaned up. Packer() doesn't set its new_pack
1531
# state into the RepositoryPackCollection object, so we only
1532
# have access to it directly here.
1533
if packer.new_pack is not None:
1534
packer.new_pack.abort()
1223
_packer_class(self, packs, '.autopack').pack()
1536
1224
for pack in packs:
1537
1225
self._remove_pack_from_memory(pack)
1538
1226
# record the newly available packs and stop advertising the old
1540
to_be_obsoleted = []
1541
for _, packs in pack_operations:
1542
to_be_obsoleted.extend(packs)
1543
result = self._save_pack_names(clear_obsolete_packs=True,
1544
obsolete_packs=to_be_obsoleted)
1547
def _flush_new_pack(self):
1548
if self._new_pack is not None:
1549
self._new_pack.flush()
1228
self._save_pack_names(clear_obsolete_packs=True)
1229
# Move the old packs out of the way now they are no longer referenced.
1230
for revision_count, packs in pack_operations:
1231
self._obsolete_packs(packs)
1551
1233
def lock_names(self):
1552
1234
"""Acquire the mutex around the pack-names index.
1554
1236
This cannot be used in the middle of a read-only transaction on the
1557
1239
self.repo.control_files.lock_write()
1559
def _already_packed(self):
1560
"""Is the collection already packed?"""
1561
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
1563
def pack(self, hint=None, clean_obsolete_packs=False):
1564
1242
"""Pack the pack collection totally."""
1565
1243
self.ensure_loaded()
1566
1244
total_packs = len(self._names)
1567
if self._already_packed():
1246
# This is arguably wrong because we might not be optimal, but for
1247
# now lets leave it in. (e.g. reconcile -> one pack. But not
1569
1250
total_revisions = self.revision_index.combined_index.key_count()
1570
1251
# XXX: the following may want to be a class, to pack with a given
1572
1253
mutter('Packing repository %s, which has %d pack files, '
1573
'containing %d revisions with hint %r.', self, total_packs,
1574
total_revisions, hint)
1254
'containing %d revisions into 1 packs.', self, total_packs,
1575
1256
# determine which packs need changing
1257
pack_distribution = [1]
1576
1258
pack_operations = [[0, []]]
1577
1259
for pack in self.all_packs():
1578
if hint is None or pack.name in hint:
1579
# Either no hint was provided (so we are packing everything),
1580
# or this pack was included in the hint.
1581
pack_operations[-1][0] += pack.get_revision_count()
1582
pack_operations[-1][1].append(pack)
1260
pack_operations[-1][0] += pack.get_revision_count()
1261
pack_operations[-1][1].append(pack)
1583
1262
self._execute_pack_operations(pack_operations, OptimisingPacker)
1585
if clean_obsolete_packs:
1586
self._clear_obsolete_packs()
1588
1264
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1589
1265
"""Plan a pack operation.
1677
1335
inv_index = self._make_index(name, '.iix')
1678
1336
txt_index = self._make_index(name, '.tix')
1679
1337
sig_index = self._make_index(name, '.six')
1680
if self.chk_index is not None:
1681
chk_index = self._make_index(name, '.cix', unlimited_cache=True)
1684
1338
result = ExistingPack(self._pack_transport, name, rev_index,
1685
inv_index, txt_index, sig_index, chk_index)
1339
inv_index, txt_index, sig_index)
1686
1340
self.add_pack_to_memory(result)
1689
def _resume_pack(self, name):
1690
"""Get a suspended Pack object by name.
1692
:param name: The name of the pack - e.g. '123456'
1693
:return: A Pack object.
1695
if not re.match('[a-f0-9]{32}', name):
1696
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1698
raise errors.UnresumableWriteGroup(
1699
self.repo, [name], 'Malformed write group token')
1701
rev_index = self._make_index(name, '.rix', resume=True)
1702
inv_index = self._make_index(name, '.iix', resume=True)
1703
txt_index = self._make_index(name, '.tix', resume=True)
1704
sig_index = self._make_index(name, '.six', resume=True)
1705
if self.chk_index is not None:
1706
chk_index = self._make_index(name, '.cix', resume=True,
1707
unlimited_cache=True)
1710
result = self.resumed_pack_factory(name, rev_index, inv_index,
1711
txt_index, sig_index, self._upload_transport,
1712
self._pack_transport, self._index_transport, self,
1713
chk_index=chk_index)
1714
except errors.NoSuchFile, e:
1715
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1716
self.add_pack_to_memory(result)
1717
self._resumed_packs.append(result)
1720
1343
def allocate(self, a_new_pack):
1721
1344
"""Allocate name in the list of packs.
1733
1356
def _iter_disk_pack_index(self):
1734
1357
"""Iterate over the contents of the pack-names index.
1736
1359
This is used when loading the list from disk, and before writing to
1737
1360
detect updates from others during our write operation.
1738
1361
:return: An iterator of the index contents.
1740
return self._index_class(self.transport, 'pack-names', None
1363
return GraphIndex(self.transport, 'pack-names', None
1741
1364
).iter_all_entries()
1743
def _make_index(self, name, suffix, resume=False, unlimited_cache=False):
1366
def _make_index(self, name, suffix):
1744
1367
size_offset = self._suffix_offsets[suffix]
1745
1368
index_name = name + suffix
1747
transport = self._upload_transport
1748
index_size = transport.stat(index_name).st_size
1750
transport = self._index_transport
1751
index_size = self._names[name][size_offset]
1752
return self._index_class(transport, index_name, index_size,
1753
unlimited_cache=unlimited_cache)
1369
index_size = self._names[name][size_offset]
1371
self._index_transport, index_name, index_size)
1755
1373
def _max_pack_count(self, total_revisions):
1756
1374
"""Return the maximum number of packs to use for total revisions.
1758
1376
:param total_revisions: The total number of revisions in the
1784
1402
:param return: None.
1786
1404
for pack in packs:
1788
pack.pack_transport.rename(pack.file_name(),
1789
'../obsolete_packs/' + pack.file_name())
1790
except (errors.PathError, errors.TransportError), e:
1791
# TODO: Should these be warnings or mutters?
1792
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1405
pack.pack_transport.rename(pack.file_name(),
1406
'../obsolete_packs/' + pack.file_name())
1794
1407
# TODO: Probably needs to know all possible indices for this pack
1795
1408
# - or maybe list the directory and move all indices matching this
1796
1409
# name whether we recognize it or not?
1797
suffixes = ['.iix', '.six', '.tix', '.rix']
1798
if self.chk_index is not None:
1799
suffixes.append('.cix')
1800
for suffix in suffixes:
1802
self._index_transport.rename(pack.name + suffix,
1803
'../obsolete_packs/' + pack.name + suffix)
1804
except (errors.PathError, errors.TransportError), e:
1805
mutter("couldn't rename obsolete index, skipping it:\n%s"
1410
for suffix in ('.iix', '.six', '.tix', '.rix'):
1411
self._index_transport.rename(pack.name + suffix,
1412
'../obsolete_packs/' + pack.name + suffix)
1808
1414
def pack_distribution(self, total_revisions):
1809
1415
"""Generate a list of the number of revisions to put in each pack.
1828
1434
def _remove_pack_from_memory(self, pack):
1829
1435
"""Remove pack from the packs accessed by this repository.
1831
1437
Only affects memory state, until self._save_pack_names() is invoked.
1833
1439
self._names.pop(pack.name)
1834
1440
self._packs_by_name.pop(pack.name)
1835
1441
self._remove_pack_indices(pack)
1836
self.packs.remove(pack)
1838
def _remove_pack_indices(self, pack, ignore_missing=False):
1839
"""Remove the indices for pack from the aggregated indices.
1841
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1843
for index_type in Pack.index_definitions.keys():
1844
attr_name = index_type + '_index'
1845
aggregate_index = getattr(self, attr_name)
1846
if aggregate_index is not None:
1847
pack_index = getattr(pack, attr_name)
1849
aggregate_index.remove_index(pack_index)
1443
def _remove_pack_indices(self, pack):
1444
"""Remove the indices for pack from the aggregated indices."""
1445
self.revision_index.remove_index(pack.revision_index, pack)
1446
self.inventory_index.remove_index(pack.inventory_index, pack)
1447
self.text_index.remove_index(pack.text_index, pack)
1448
self.signature_index.remove_index(pack.signature_index, pack)
1855
1450
def reset(self):
1856
1451
"""Clear all cached data."""
1857
1452
# cached revision data
1453
self.repo._revision_knit = None
1858
1454
self.revision_index.clear()
1859
1455
# cached signature data
1456
self.repo._signature_knit = None
1860
1457
self.signature_index.clear()
1861
1458
# cached file text data
1862
1459
self.text_index.clear()
1460
self.repo._text_knit = None
1863
1461
# cached inventory data
1864
1462
self.inventory_index.clear()
1866
if self.chk_index is not None:
1867
self.chk_index.clear()
1868
1463
# remove the open pack
1869
1464
self._new_pack = None
1870
1465
# information about packs.
1873
1468
self._packs_by_name = {}
1874
1469
self._packs_at_load = None
1471
def _make_index_map(self, index_suffix):
1472
"""Return information on existing indices.
1474
:param suffix: Index suffix added to pack name.
1476
:returns: (pack_map, indices) where indices is a list of GraphIndex
1477
objects, and pack_map is a mapping from those objects to the
1478
pack tuple they describe.
1480
# TODO: stop using this; it creates new indices unnecessarily.
1481
self.ensure_loaded()
1482
suffix_map = {'.rix': 'revision_index',
1483
'.six': 'signature_index',
1484
'.iix': 'inventory_index',
1485
'.tix': 'text_index',
1487
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1488
suffix_map[index_suffix])
1490
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1491
"""Convert a list of packs to an index pack map and index list.
1493
:param packs: The packs list to process.
1494
:param index_attribute: The attribute that the desired index is found
1496
:return: A tuple (map, list) where map contains the dict from
1497
index:pack_tuple, and lsit contains the indices in the same order
1503
index = getattr(pack, index_attribute)
1504
indices.append(index)
1505
pack_map[index] = (pack.pack_transport, pack.file_name())
1506
return pack_map, indices
1508
def _index_contents(self, pack_map, key_filter=None):
1509
"""Get an iterable of the index contents from a pack_map.
1511
:param pack_map: A map from indices to pack details.
1512
:param key_filter: An optional filter to limit the
1515
indices = [index for index in pack_map.iterkeys()]
1516
all_index = CombinedGraphIndex(indices)
1517
if key_filter is None:
1518
return all_index.iter_all_entries()
1520
return all_index.iter_entries(key_filter)
1876
1522
def _unlock_names(self):
1877
1523
"""Release the mutex around the pack-names index."""
1878
1524
self.repo.control_files.unlock()
1880
def _diff_pack_names(self):
1881
"""Read the pack names from disk, and compare it to the one in memory.
1883
:return: (disk_nodes, deleted_nodes, new_nodes)
1884
disk_nodes The final set of nodes that should be referenced
1885
deleted_nodes Nodes which have been removed from when we started
1886
new_nodes Nodes that are newly introduced
1888
# load the disk nodes across
1890
for index, key, value in self._iter_disk_pack_index():
1891
disk_nodes.add((key, value))
1892
orig_disk_nodes = set(disk_nodes)
1894
# do a two-way diff against our original content
1895
current_nodes = set()
1896
for name, sizes in self._names.iteritems():
1898
((name, ), ' '.join(str(size) for size in sizes)))
1900
# Packs no longer present in the repository, which were present when we
1901
# locked the repository
1902
deleted_nodes = self._packs_at_load - current_nodes
1903
# Packs which this process is adding
1904
new_nodes = current_nodes - self._packs_at_load
1906
# Update the disk_nodes set to include the ones we are adding, and
1907
# remove the ones which were removed by someone else
1908
disk_nodes.difference_update(deleted_nodes)
1909
disk_nodes.update(new_nodes)
1911
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1913
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1914
"""Given the correct set of pack files, update our saved info.
1916
:return: (removed, added, modified)
1917
removed pack names removed from self._names
1918
added pack names added to self._names
1919
modified pack names that had changed value
1924
## self._packs_at_load = disk_nodes
1526
def _save_pack_names(self, clear_obsolete_packs=False):
1527
"""Save the list of packs.
1529
This will take out the mutex around the pack names list for the
1530
duration of the method call. If concurrent updates have been made, a
1531
three-way merge between the current list and the current in memory list
1534
:param clear_obsolete_packs: If True, clear out the contents of the
1535
obsolete_packs directory.
1539
builder = GraphIndexBuilder()
1540
# load the disk nodes across
1542
for index, key, value in self._iter_disk_pack_index():
1543
disk_nodes.add((key, value))
1544
# do a two-way diff against our original content
1545
current_nodes = set()
1546
for name, sizes in self._names.iteritems():
1548
((name, ), ' '.join(str(size) for size in sizes)))
1549
deleted_nodes = self._packs_at_load - current_nodes
1550
new_nodes = current_nodes - self._packs_at_load
1551
disk_nodes.difference_update(deleted_nodes)
1552
disk_nodes.update(new_nodes)
1553
# TODO: handle same-name, index-size-changes here -
1554
# e.g. use the value from disk, not ours, *unless* we're the one
1556
for key, value in disk_nodes:
1557
builder.add_node(key, value)
1558
self.transport.put_file('pack-names', builder.finish(),
1559
mode=self.repo.bzrdir._get_file_mode())
1560
# move the baseline forward
1561
self._packs_at_load = disk_nodes
1562
if clear_obsolete_packs:
1563
self._clear_obsolete_packs()
1565
self._unlock_names()
1566
# synchronise the memory packs list with what we just wrote:
1925
1567
new_names = dict(disk_nodes)
1926
1568
# drop no longer present nodes
1927
1569
for pack in self.all_packs():
1928
1570
if (pack.name,) not in new_names:
1929
removed.append(pack.name)
1930
1571
self._remove_pack_from_memory(pack)
1931
1572
# add new nodes/refresh existing ones
1932
1573
for key, value in disk_nodes:
1946
1587
self._remove_pack_from_memory(self.get_pack_by_name(name))
1947
1588
self._names[name] = sizes
1948
1589
self.get_pack_by_name(name)
1949
modified.append(name)
1952
1592
self._names[name] = sizes
1953
1593
self.get_pack_by_name(name)
1955
return removed, added, modified
1957
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1958
"""Save the list of packs.
1960
This will take out the mutex around the pack names list for the
1961
duration of the method call. If concurrent updates have been made, a
1962
three-way merge between the current list and the current in memory list
1965
:param clear_obsolete_packs: If True, clear out the contents of the
1966
obsolete_packs directory.
1967
:param obsolete_packs: Packs that are obsolete once the new pack-names
1968
file has been written.
1969
:return: A list of the names saved that were not previously on disk.
1971
already_obsolete = []
1974
builder = self._index_builder_class()
1975
(disk_nodes, deleted_nodes, new_nodes,
1976
orig_disk_nodes) = self._diff_pack_names()
1977
# TODO: handle same-name, index-size-changes here -
1978
# e.g. use the value from disk, not ours, *unless* we're the one
1980
for key, value in disk_nodes:
1981
builder.add_node(key, value)
1982
self.transport.put_file('pack-names', builder.finish(),
1983
mode=self.repo.bzrdir._get_file_mode())
1984
self._packs_at_load = disk_nodes
1985
if clear_obsolete_packs:
1988
to_preserve = set([o.name for o in obsolete_packs])
1989
already_obsolete = self._clear_obsolete_packs(to_preserve)
1991
self._unlock_names()
1992
# synchronise the memory packs list with what we just wrote:
1993
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1995
# TODO: We could add one more condition here. "if o.name not in
1996
# orig_disk_nodes and o != the new_pack we haven't written to
1997
# disk yet. However, the new pack object is not easily
1998
# accessible here (it would have to be passed through the
1999
# autopacking code, etc.)
2000
obsolete_packs = [o for o in obsolete_packs
2001
if o.name not in already_obsolete]
2002
self._obsolete_packs(obsolete_packs)
2003
return [new_node[0][0] for new_node in new_nodes]
2005
def reload_pack_names(self):
2006
"""Sync our pack listing with what is present in the repository.
2008
This should be called when we find out that something we thought was
2009
present is now missing. This happens when another process re-packs the
2012
:return: True if the in-memory list of packs has been altered at all.
2014
# The ensure_loaded call is to handle the case where the first call
2015
# made involving the collection was to reload_pack_names, where we
2016
# don't have a view of disk contents. Its a bit of a bandaid, and
2017
# causes two reads of pack-names, but its a rare corner case not struck
2018
# with regular push/pull etc.
2019
first_read = self.ensure_loaded()
2022
# out the new value.
2023
(disk_nodes, deleted_nodes, new_nodes,
2024
orig_disk_nodes) = self._diff_pack_names()
2025
# _packs_at_load is meant to be the explicit list of names in
2026
# 'pack-names' at then start. As such, it should not contain any
2027
# pending names that haven't been written out yet.
2028
self._packs_at_load = orig_disk_nodes
2030
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
2031
if removed or added or modified:
2035
def _restart_autopack(self):
2036
"""Reload the pack names list, and restart the autopack code."""
2037
if not self.reload_pack_names():
2038
# Re-raise the original exception, because something went missing
2039
# and a restart didn't find it
2041
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
2043
def _clear_obsolete_packs(self, preserve=None):
1595
def _clear_obsolete_packs(self):
2044
1596
"""Delete everything from the obsolete-packs directory.
2046
:return: A list of pack identifiers (the filename without '.pack') that
2047
were found in obsolete_packs.
2050
1598
obsolete_pack_transport = self.transport.clone('obsolete_packs')
2051
if preserve is None:
2053
1599
for filename in obsolete_pack_transport.list_dir('.'):
2054
name, ext = osutils.splitext(filename)
2057
if name in preserve:
2060
1601
obsolete_pack_transport.delete(filename)
2061
1602
except (errors.PathError, errors.TransportError), e:
2062
warning("couldn't delete obsolete pack, skipping it:\n%s"
1603
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
2066
1605
def _start_write_group(self):
2067
1606
# Do not permit preparation for writing if we're not in a 'write lock'.
2068
1607
if not self.repo.is_write_locked():
2069
1608
raise errors.NotWriteLocked(self)
2070
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1609
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1610
self._pack_transport, upload_suffix='.pack',
2071
1611
file_mode=self.repo.bzrdir._get_file_mode())
2072
1612
# allow writing: queue writes to a new index
2073
1613
self.revision_index.add_writable_index(self._new_pack.revision_index,
2094
1628
# FIXME: just drop the transient index.
2095
1629
# forget what names there are
2096
1630
if self._new_pack is not None:
2097
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
2098
operation.add_cleanup(setattr, self, '_new_pack', None)
2099
# If we aborted while in the middle of finishing the write
2100
# group, _remove_pack_indices could fail because the indexes are
2101
# already gone. But they're not there we shouldn't fail in this
2102
# case, so we pass ignore_missing=True.
2103
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
2104
ignore_missing=True)
2105
operation.run_simple()
2106
for resumed_pack in self._resumed_packs:
2107
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
2108
# See comment in previous finally block.
2109
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
2110
ignore_missing=True)
2111
operation.run_simple()
2112
del self._resumed_packs[:]
2114
def _remove_resumed_pack_indices(self):
2115
for resumed_pack in self._resumed_packs:
2116
self._remove_pack_indices(resumed_pack)
2117
del self._resumed_packs[:]
2119
def _check_new_inventories(self):
2120
"""Detect missing inventories in this write group.
2122
:returns: list of strs, summarising any problems found. If the list is
2123
empty no problems were found.
2125
# The base implementation does no checks. GCRepositoryPackCollection
1631
self._new_pack.abort()
1632
self._remove_pack_indices(self._new_pack)
1633
self._new_pack = None
1634
self.repo._text_knit = None
2129
1636
def _commit_write_group(self):
2131
for prefix, versioned_file in (
2132
('revisions', self.repo.revisions),
2133
('inventories', self.repo.inventories),
2134
('texts', self.repo.texts),
2135
('signatures', self.repo.signatures),
2137
missing = versioned_file.get_missing_compression_parent_keys()
2138
all_missing.update([(prefix,) + key for key in missing])
2140
raise errors.BzrCheckError(
2141
"Repository %s has missing compression parent(s) %r "
2142
% (self.repo, sorted(all_missing)))
2143
problems = self._check_new_inventories()
2145
problems_summary = '\n'.join(problems)
2146
raise errors.BzrCheckError(
2147
"Cannot add revision(s) to repository: " + problems_summary)
2148
1637
self._remove_pack_indices(self._new_pack)
2149
any_new_content = False
2150
1638
if self._new_pack.data_inserted():
2151
1639
# get all the data to disk and read to use
2152
1640
self._new_pack.finish()
2153
1641
self.allocate(self._new_pack)
2154
1642
self._new_pack = None
2155
any_new_content = True
2157
self._new_pack.abort()
2158
self._new_pack = None
2159
for resumed_pack in self._resumed_packs:
2160
# XXX: this is a pretty ugly way to turn the resumed pack into a
2161
# properly committed pack.
2162
self._names[resumed_pack.name] = None
2163
self._remove_pack_from_memory(resumed_pack)
2164
resumed_pack.finish()
2165
self.allocate(resumed_pack)
2166
any_new_content = True
2167
del self._resumed_packs[:]
2169
result = self.autopack()
1643
if not self.autopack():
2171
1644
# when autopack takes no steps, the names list is still
2173
return self._save_pack_names()
2177
def _suspend_write_group(self):
2178
tokens = [pack.name for pack in self._resumed_packs]
2179
self._remove_pack_indices(self._new_pack)
2180
if self._new_pack.data_inserted():
2181
# get all the data to disk and read to use
2182
self._new_pack.finish(suspend=True)
2183
tokens.append(self._new_pack.name)
2184
self._new_pack = None
1646
self._save_pack_names()
2186
1648
self._new_pack.abort()
2187
1649
self._new_pack = None
2188
self._remove_resumed_pack_indices()
2191
def _resume_write_group(self, tokens):
2192
for token in tokens:
2193
self._resume_pack(token)
1650
self.repo._text_knit = None
2196
1653
class KnitPackRepository(KnitRepository):
2197
1654
"""Repository with knit objects stored inside pack containers.
2199
1656
The layering for a KnitPackRepository is:
2201
1658
Graph | HPSS | Repository public layer |
2279
1719
self._reconcile_does_inventory_gc = True
2280
1720
self._reconcile_fixes_text_parents = True
2281
1721
self._reconcile_backsup_inventory = False
1722
self._fetch_order = 'unsorted'
2283
def _warn_if_deprecated(self, branch=None):
2284
# This class isn't deprecated, but one sub-format is
2285
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
2286
super(KnitPackRepository, self)._warn_if_deprecated(branch)
1724
def _warn_if_deprecated(self):
1725
# This class isn't deprecated
2288
1728
def _abort_write_group(self):
2289
self.revisions._index._key_dependencies.clear()
2290
1729
self._pack_collection._abort_write_group()
2292
def _get_source(self, to_format):
2293
if to_format.network_name() == self._format.network_name():
2294
return KnitPackStreamSource(self, to_format)
2295
return super(KnitPackRepository, self)._get_source(to_format)
1731
def _find_inconsistent_revision_parents(self):
1732
"""Find revisions with incorrectly cached parents.
1734
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1735
parents-in-revision).
1737
if not self.is_locked():
1738
raise errors.ObjectNotLocked(self)
1739
pb = ui.ui_factory.nested_progress_bar()
1742
revision_nodes = self._pack_collection.revision_index \
1743
.combined_index.iter_all_entries()
1744
index_positions = []
1745
# Get the cached index values for all revisions, and also the location
1746
# in each index of the revision text so we can perform linear IO.
1747
for index, key, value, refs in revision_nodes:
1748
pos, length = value[1:].split(' ')
1749
index_positions.append((index, int(pos), key[0],
1750
tuple(parent[0] for parent in refs[0])))
1751
pb.update("Reading revision index.", 0, 0)
1752
index_positions.sort()
1753
batch_count = len(index_positions) / 1000 + 1
1754
pb.update("Checking cached revision graph.", 0, batch_count)
1755
for offset in xrange(batch_count):
1756
pb.update("Checking cached revision graph.", offset)
1757
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1760
rev_ids = [item[2] for item in to_query]
1761
revs = self.get_revisions(rev_ids)
1762
for revision, item in zip(revs, to_query):
1763
index_parents = item[3]
1764
rev_parents = tuple(revision.parent_ids)
1765
if index_parents != rev_parents:
1766
result.append((revision.revision_id, index_parents, rev_parents))
1771
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1772
def get_parents(self, revision_ids):
1773
"""See graph._StackedParentsProvider.get_parents."""
1774
parent_map = self.get_parent_map(revision_ids)
1775
return [parent_map.get(r, None) for r in revision_ids]
2297
1777
def _make_parents_provider(self):
2298
1778
return graph.CachingParentsProvider(self)
2300
1780
def _refresh_data(self):
2301
if not self.is_locked():
2303
self._pack_collection.reload_pack_names()
1781
if self._write_lock_count == 1 or (
1782
self.control_files._lock_count == 1 and
1783
self.control_files._lock_mode == 'r'):
1784
# forget what names there are
1785
self._pack_collection.reset()
1786
# XXX: Better to do an in-memory merge when acquiring a new lock -
1787
# factor out code from _save_pack_names.
1788
self._pack_collection.ensure_loaded()
2305
1790
def _start_write_group(self):
2306
1791
self._pack_collection._start_write_group()
2308
1793
def _commit_write_group(self):
2309
hint = self._pack_collection._commit_write_group()
2310
self.revisions._index._key_dependencies.clear()
2313
def suspend_write_group(self):
2314
# XXX check self._write_group is self.get_transaction()?
2315
tokens = self._pack_collection._suspend_write_group()
2316
self.revisions._index._key_dependencies.clear()
2317
self._write_group = None
2320
def _resume_write_group(self, tokens):
2321
self._start_write_group()
2323
self._pack_collection._resume_write_group(tokens)
2324
except errors.UnresumableWriteGroup:
2325
self._abort_write_group()
2327
for pack in self._pack_collection._resumed_packs:
2328
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1794
return self._pack_collection._commit_write_group()
2330
1796
def get_transaction(self):
2331
1797
if self._write_lock_count:
2413
1865
transaction = self._transaction
2414
1866
self._transaction = None
2415
1867
transaction.finish()
1868
for repo in self._fallback_repositories:
2417
1871
self.control_files.unlock()
2419
if not self.is_locked():
2420
1872
for repo in self._fallback_repositories:
2424
class KnitPackStreamSource(StreamSource):
2425
"""A StreamSource used to transfer data between same-format KnitPack repos.
2427
This source assumes:
2428
1) Same serialization format for all objects
2429
2) Same root information
2430
3) XML format inventories
2431
4) Atomic inserts (so we can stream inventory texts before text
2436
def __init__(self, from_repository, to_format):
2437
super(KnitPackStreamSource, self).__init__(from_repository, to_format)
2438
self._text_keys = None
2439
self._text_fetch_order = 'unordered'
2441
def _get_filtered_inv_stream(self, revision_ids):
2442
from_repo = self.from_repository
2443
parent_ids = from_repo._find_parent_ids_of_revisions(revision_ids)
2444
parent_keys = [(p,) for p in parent_ids]
2445
find_text_keys = from_repo._find_text_key_references_from_xml_inventory_lines
2446
parent_text_keys = set(find_text_keys(
2447
from_repo._inventory_xml_lines_for_keys(parent_keys)))
2448
content_text_keys = set()
2449
knit = KnitVersionedFiles(None, None)
2450
factory = KnitPlainFactory()
2451
def find_text_keys_from_content(record):
2452
if record.storage_kind not in ('knit-delta-gz', 'knit-ft-gz'):
2453
raise ValueError("Unknown content storage kind for"
2454
" inventory text: %s" % (record.storage_kind,))
2455
# It's a knit record, it has a _raw_record field (even if it was
2456
# reconstituted from a network stream).
2457
raw_data = record._raw_record
2458
# read the entire thing
2459
revision_id = record.key[-1]
2460
content, _ = knit._parse_record(revision_id, raw_data)
2461
if record.storage_kind == 'knit-delta-gz':
2462
line_iterator = factory.get_linedelta_content(content)
2463
elif record.storage_kind == 'knit-ft-gz':
2464
line_iterator = factory.get_fulltext_content(content)
2465
content_text_keys.update(find_text_keys(
2466
[(line, revision_id) for line in line_iterator]))
2467
revision_keys = [(r,) for r in revision_ids]
2468
def _filtered_inv_stream():
2469
source_vf = from_repo.inventories
2470
stream = source_vf.get_record_stream(revision_keys,
2472
for record in stream:
2473
if record.storage_kind == 'absent':
2474
raise errors.NoSuchRevision(from_repo, record.key)
2475
find_text_keys_from_content(record)
2477
self._text_keys = content_text_keys - parent_text_keys
2478
return ('inventories', _filtered_inv_stream())
2480
def _get_text_stream(self):
2481
# Note: We know we don't have to handle adding root keys, because both
2482
# the source and target are the identical network name.
2483
text_stream = self.from_repository.texts.get_record_stream(
2484
self._text_keys, self._text_fetch_order, False)
2485
return ('texts', text_stream)
2487
def get_stream(self, search):
2488
revision_ids = search.get_keys()
2489
for stream_info in self._fetch_revision_texts(revision_ids):
2491
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
2492
yield self._get_filtered_inv_stream(revision_ids)
2493
yield self._get_text_stream()
2497
1876
class RepositoryFormatPack(MetaDirRepositoryFormat):
2498
1877
"""Format logic for pack structured repositories.
2718
2080
def get_format_description(self):
2719
2081
"""See RepositoryFormat.get_format_description()."""
2720
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2084
def check_conversion_target(self, target_format):
2723
2088
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2724
"""A repository with rich roots and stacking.
2726
New in release 1.6.1.
2728
Supports stacking on other repositories, allowing data to be accessed
2729
without being stored locally.
2732
repository_class = KnitPackRepository
2733
_commit_builder_class = PackRootCommitBuilder
2734
rich_root_data = True
2735
supports_tree_reference = False # no subtrees
2736
supports_external_lookups = True
2737
# What index classes to use
2738
index_builder_class = InMemoryGraphIndex
2739
index_class = GraphIndex
2742
def _serializer(self):
2743
return xml6.serializer_v6
2745
def _get_matching_bzrdir(self):
2746
return bzrdir.format_registry.make_bzrdir(
2749
def _ignore_setting_bzrdir(self, format):
2752
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2754
def get_format_string(self):
2755
"""See RepositoryFormat.get_format_string()."""
2756
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2758
def get_format_description(self):
2759
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2762
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2763
"""A repository with rich roots and external references.
2089
"""A repository with subtrees and external references.
2765
2091
New in release 1.6.
2767
2093
Supports external lookups, which results in non-truncated ghosts after
2768
2094
reconcile compared to pack-0.92 formats.
2770
This format was deprecated because the serializer it uses accidentally
2771
supported subtrees, when the format was not intended to. This meant that
2772
someone could accidentally fetch from an incorrect repository.
2775
2097
repository_class = KnitPackRepository
2776
2098
_commit_builder_class = PackRootCommitBuilder
2777
2099
rich_root_data = True
2778
2100
supports_tree_reference = False # no subtrees
2101
_serializer = xml7.serializer_v7
2780
2103
supports_external_lookups = True
2781
# What index classes to use
2782
index_builder_class = InMemoryGraphIndex
2783
index_class = GraphIndex
2786
def _serializer(self):
2787
return xml7.serializer_v7
2789
2105
def _get_matching_bzrdir(self):
2790
matching = bzrdir.format_registry.make_bzrdir(
2792
matching.repository_format = self
2106
return bzrdir.format_registry.make_bzrdir(
2107
'development1-subtree')
2795
2109
def _ignore_setting_bzrdir(self, format):
2798
2112
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2114
def check_conversion_target(self, target_format):
2115
if not target_format.rich_root_data:
2116
raise errors.BadConversionTarget(
2117
'Does not support rich root data.', target_format)
2800
2119
def get_format_string(self):
2801
2120
"""See RepositoryFormat.get_format_string()."""
2802
2121
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2804
2123
def get_format_description(self):
2805
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2809
class RepositoryFormatKnitPack6(RepositoryFormatPack):
2810
"""A repository with stacking and btree indexes,
2811
without rich roots or subtrees.
2813
This is equivalent to pack-1.6 with B+Tree indices.
2124
"""See RepositoryFormat.get_format_description()."""
2128
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2129
"""A no-subtrees development repository.
2131
This format should be retained until the second release after bzr 1.0.
2133
No changes to the disk behaviour from pack-0.92.
2816
2136
repository_class = KnitPackRepository
2817
2137
_commit_builder_class = PackCommitBuilder
2818
supports_external_lookups = True
2819
# What index classes to use
2820
index_builder_class = BTreeBuilder
2821
index_class = BTreeGraphIndex
2824
def _serializer(self):
2825
return xml5.serializer_v5
2138
_serializer = xml5.serializer_v5
2827
2140
def _get_matching_bzrdir(self):
2828
return bzrdir.format_registry.make_bzrdir('1.9')
2141
return bzrdir.format_registry.make_bzrdir('development0')
2830
2143
def _ignore_setting_bzrdir(self, format):
2835
2148
def get_format_string(self):
2836
2149
"""See RepositoryFormat.get_format_string()."""
2837
return "Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n"
2150
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2839
2152
def get_format_description(self):
2840
2153
"""See RepositoryFormat.get_format_description()."""
2841
return "Packs 6 (uses btree indexes, requires bzr 1.9)"
2844
class RepositoryFormatKnitPack6RichRoot(RepositoryFormatPack):
2845
"""A repository with rich roots, no subtrees, stacking and btree indexes.
2847
1.6-rich-root with B+Tree indices.
2850
repository_class = KnitPackRepository
2851
_commit_builder_class = PackRootCommitBuilder
2852
rich_root_data = True
2853
supports_tree_reference = False # no subtrees
2854
supports_external_lookups = True
2855
# What index classes to use
2856
index_builder_class = BTreeBuilder
2857
index_class = BTreeGraphIndex
2860
def _serializer(self):
2861
return xml6.serializer_v6
2863
def _get_matching_bzrdir(self):
2864
return bzrdir.format_registry.make_bzrdir(
2867
def _ignore_setting_bzrdir(self, format):
2154
return ("Development repository format, currently the same as "
2157
def check_conversion_target(self, target_format):
2870
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2872
def get_format_string(self):
2873
"""See RepositoryFormat.get_format_string()."""
2874
return "Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n"
2876
def get_format_description(self):
2877
return "Packs 6 rich-root (uses btree indexes, requires bzr 1.9)"
2880
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2161
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2881
2162
"""A subtrees development repository.
2883
This format should be retained until the second release after bzr 1.7.
2885
1.6.1-subtree[as it might have been] with B+Tree indices.
2887
This is [now] retained until we have a CHK based subtree format in
2164
This format should be retained until the second release after bzr 1.0.
2166
No changes to the disk behaviour from pack-0.92-subtree.
2891
2169
repository_class = KnitPackRepository
2892
2170
_commit_builder_class = PackRootCommitBuilder
2893
2171
rich_root_data = True
2895
2172
supports_tree_reference = True
2896
supports_external_lookups = True
2897
# What index classes to use
2898
index_builder_class = BTreeBuilder
2899
index_class = BTreeGraphIndex
2902
def _serializer(self):
2903
return xml7.serializer_v7
2905
def _get_matching_bzrdir(self):
2906
return bzrdir.format_registry.make_bzrdir(
2907
'development-subtree')
2909
def _ignore_setting_bzrdir(self, format):
2912
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2914
def get_format_string(self):
2915
"""See RepositoryFormat.get_format_string()."""
2916
return ("Bazaar development format 2 with subtree support "
2917
"(needs bzr.dev from before 1.8)\n")
2919
def get_format_description(self):
2920
"""See RepositoryFormat.get_format_description()."""
2921
return ("Development repository format, currently the same as "
2922
"1.6.1-subtree with B+Tree indices.\n")
2173
_serializer = xml7.serializer_v7
2175
def _get_matching_bzrdir(self):
2176
return bzrdir.format_registry.make_bzrdir(
2177
'development0-subtree')
2179
def _ignore_setting_bzrdir(self, format):
2182
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2184
def check_conversion_target(self, target_format):
2185
if not target_format.rich_root_data:
2186
raise errors.BadConversionTarget(
2187
'Does not support rich root data.', target_format)
2188
if not getattr(target_format, 'supports_tree_reference', False):
2189
raise errors.BadConversionTarget(
2190
'Does not support nested trees', target_format)
2192
def get_format_string(self):
2193
"""See RepositoryFormat.get_format_string()."""
2194
return ("Bazaar development format 0 with subtree support "
2195
"(needs bzr.dev from before 1.3)\n")
2197
def get_format_description(self):
2198
"""See RepositoryFormat.get_format_description()."""
2199
return ("Development repository format, currently the same as "
2200
"pack-0.92-subtree\n")
2203
class RepositoryFormatPackDevelopment1(RepositoryFormatPackDevelopment0):
2204
"""A no-subtrees development repository.
2206
This format should be retained until the second release after bzr 1.5.
2208
Supports external lookups, which results in non-truncated ghosts after
2209
reconcile compared to pack-0.92 formats.
2212
supports_external_lookups = True
2214
def _get_matching_bzrdir(self):
2215
return bzrdir.format_registry.make_bzrdir('development1')
2217
def _ignore_setting_bzrdir(self, format):
2220
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2222
def get_format_string(self):
2223
"""See RepositoryFormat.get_format_string()."""
2224
return "Bazaar development format 1 (needs bzr.dev from before 1.6)\n"
2226
def get_format_description(self):
2227
"""See RepositoryFormat.get_format_description()."""
2228
return ("Development repository format, currently the same as "
2229
"pack-0.92 with external reference support.\n")
2231
def check_conversion_target(self, target_format):
2235
class RepositoryFormatPackDevelopment1Subtree(RepositoryFormatPackDevelopment0Subtree):
2236
"""A subtrees development repository.
2238
This format should be retained until the second release after bzr 1.5.
2240
Supports external lookups, which results in non-truncated ghosts after
2241
reconcile compared to pack-0.92 formats.
2244
supports_external_lookups = True
2246
def _get_matching_bzrdir(self):
2247
return bzrdir.format_registry.make_bzrdir(
2248
'development1-subtree')
2250
def _ignore_setting_bzrdir(self, format):
2253
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2255
def check_conversion_target(self, target_format):
2256
if not target_format.rich_root_data:
2257
raise errors.BadConversionTarget(
2258
'Does not support rich root data.', target_format)
2259
if not getattr(target_format, 'supports_tree_reference', False):
2260
raise errors.BadConversionTarget(
2261
'Does not support nested trees', target_format)
2263
def get_format_string(self):
2264
"""See RepositoryFormat.get_format_string()."""
2265
return ("Bazaar development format 1 with subtree support "
2266
"(needs bzr.dev from before 1.6)\n")
2268
def get_format_description(self):
2269
"""See RepositoryFormat.get_format_description()."""
2270
return ("Development repository format, currently the same as "
2271
"pack-0.92-subtree with external reference support.\n")