1
# Copyright (C) 2007-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from __future__ import absolute_import
22
from ..lazy_import import lazy_import
23
lazy_import(globals(), """
24
from itertools import izip
39
from breezy.index import (
41
GraphIndexPrefixAdapter,
51
from ..decorators import (
56
from ..lock import LogicalLockResult
57
from ..repository import (
59
RepositoryWriteLockResult,
61
from ..bzrrepository import (
63
RepositoryFormatMetaDir,
65
from ..sixish import (
68
from ..vf_repository import (
69
MetaDirVersionedFileRepository,
70
MetaDirVersionedFileRepositoryFormat,
71
VersionedFileCommitBuilder,
72
VersionedFileRootCommitBuilder,
81
class PackCommitBuilder(VersionedFileCommitBuilder):
82
"""Subclass of VersionedFileCommitBuilder to add texts with pack semantics.
84
Specifically this uses one knit object rather than one knit object per
85
added text, reducing memory and object pressure.
88
def __init__(self, repository, parents, config, timestamp=None,
89
timezone=None, committer=None, revprops=None,
90
revision_id=None, lossy=False):
91
VersionedFileCommitBuilder.__init__(self, repository, parents, config,
92
timestamp=timestamp, timezone=timezone, committer=committer,
93
revprops=revprops, revision_id=revision_id, lossy=lossy)
94
self._file_graph = graph.Graph(
95
repository._pack_collection.text_index.combined_index)
97
def _heads(self, file_id, revision_ids):
98
keys = [(file_id, revision_id) for revision_id in revision_ids]
99
return {key[1] for key in self._file_graph.heads(keys)}
102
class PackRootCommitBuilder(VersionedFileRootCommitBuilder):
103
"""A subclass of RootCommitBuilder to add texts with pack semantics.
105
Specifically this uses one knit object rather than one knit object per
106
added text, reducing memory and object pressure.
109
def __init__(self, repository, parents, config, timestamp=None,
110
timezone=None, committer=None, revprops=None,
111
revision_id=None, lossy=False):
112
super(PackRootCommitBuilder, self).__init__(repository, parents,
113
config, timestamp=timestamp, timezone=timezone,
114
committer=committer, revprops=revprops, revision_id=revision_id,
116
self._file_graph = graph.Graph(
117
repository._pack_collection.text_index.combined_index)
119
def _heads(self, file_id, revision_ids):
120
keys = [(file_id, revision_id) for revision_id in revision_ids]
121
return {key[1] for key in self._file_graph.heads(keys)}
125
"""An in memory proxy for a pack and its indices.
127
This is a base class that is not directly used, instead the classes
128
ExistingPack and NewPack are used.
131
# A map of index 'type' to the file extension and position in the
133
index_definitions = {
135
'revision': ('.rix', 0),
136
'inventory': ('.iix', 1),
138
'signature': ('.six', 3),
141
def __init__(self, revision_index, inventory_index, text_index,
142
signature_index, chk_index=None):
143
"""Create a pack instance.
145
:param revision_index: A GraphIndex for determining what revisions are
146
present in the Pack and accessing the locations of their texts.
147
:param inventory_index: A GraphIndex for determining what inventories are
148
present in the Pack and accessing the locations of their
150
:param text_index: A GraphIndex for determining what file texts
151
are present in the pack and accessing the locations of their
152
texts/deltas (via (fileid, revisionid) tuples).
153
:param signature_index: A GraphIndex for determining what signatures are
154
present in the Pack and accessing the locations of their texts.
155
:param chk_index: A GraphIndex for accessing content by CHK, if the
158
self.revision_index = revision_index
159
self.inventory_index = inventory_index
160
self.text_index = text_index
161
self.signature_index = signature_index
162
self.chk_index = chk_index
164
def access_tuple(self):
165
"""Return a tuple (transport, name) for the pack content."""
166
return self.pack_transport, self.file_name()
168
def _check_references(self):
169
"""Make sure our external references are present.
171
Packs are allowed to have deltas whose base is not in the pack, but it
172
must be present somewhere in this collection. It is not allowed to
173
have deltas based on a fallback repository.
174
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
177
for (index_name, external_refs, index) in [
179
self._get_external_refs(self.text_index),
180
self._pack_collection.text_index.combined_index),
182
self._get_external_refs(self.inventory_index),
183
self._pack_collection.inventory_index.combined_index),
185
missing = external_refs.difference(
186
k for (idx, k, v, r) in
187
index.iter_entries(external_refs))
189
missing_items[index_name] = sorted(list(missing))
191
from pprint import pformat
192
raise errors.BzrCheckError(
193
"Newly created pack file %r has delta references to "
194
"items not in its repository:\n%s"
195
% (self, pformat(missing_items)))
198
"""Get the file name for the pack on disk."""
199
return self.name + '.pack'
201
def get_revision_count(self):
202
return self.revision_index.key_count()
204
def index_name(self, index_type, name):
205
"""Get the disk name of an index type for pack name 'name'."""
206
return name + Pack.index_definitions[index_type][0]
208
def index_offset(self, index_type):
209
"""Get the position in a index_size array for a given index type."""
210
return Pack.index_definitions[index_type][1]
212
def inventory_index_name(self, name):
213
"""The inv index is the name + .iix."""
214
return self.index_name('inventory', name)
216
def revision_index_name(self, name):
217
"""The revision index is the name + .rix."""
218
return self.index_name('revision', name)
220
def signature_index_name(self, name):
221
"""The signature index is the name + .six."""
222
return self.index_name('signature', name)
224
def text_index_name(self, name):
225
"""The text index is the name + .tix."""
226
return self.index_name('text', name)
228
def _replace_index_with_readonly(self, index_type):
229
unlimited_cache = False
230
if index_type == 'chk':
231
unlimited_cache = True
232
index = self.index_class(self.index_transport,
233
self.index_name(index_type, self.name),
234
self.index_sizes[self.index_offset(index_type)],
235
unlimited_cache=unlimited_cache)
236
if index_type == 'chk':
237
index._leaf_factory = btree_index._gcchk_factory
238
setattr(self, index_type + '_index', index)
241
class ExistingPack(Pack):
242
"""An in memory proxy for an existing .pack and its disk indices."""
244
def __init__(self, pack_transport, name, revision_index, inventory_index,
245
text_index, signature_index, chk_index=None):
246
"""Create an ExistingPack object.
248
:param pack_transport: The transport where the pack file resides.
249
:param name: The name of the pack on disk in the pack_transport.
251
Pack.__init__(self, revision_index, inventory_index, text_index,
252
signature_index, chk_index)
254
self.pack_transport = pack_transport
255
if None in (revision_index, inventory_index, text_index,
256
signature_index, name, pack_transport):
257
raise AssertionError()
259
def __eq__(self, other):
260
return self.__dict__ == other.__dict__
262
def __ne__(self, other):
263
return not self.__eq__(other)
266
return "<%s.%s object at 0x%x, %s, %s" % (
267
self.__class__.__module__, self.__class__.__name__, id(self),
268
self.pack_transport, self.name)
271
class ResumedPack(ExistingPack):
273
def __init__(self, name, revision_index, inventory_index, text_index,
274
signature_index, upload_transport, pack_transport, index_transport,
275
pack_collection, chk_index=None):
276
"""Create a ResumedPack object."""
277
ExistingPack.__init__(self, pack_transport, name, revision_index,
278
inventory_index, text_index, signature_index,
280
self.upload_transport = upload_transport
281
self.index_transport = index_transport
282
self.index_sizes = [None, None, None, None]
284
('revision', revision_index),
285
('inventory', inventory_index),
286
('text', text_index),
287
('signature', signature_index),
289
if chk_index is not None:
290
indices.append(('chk', chk_index))
291
self.index_sizes.append(None)
292
for index_type, index in indices:
293
offset = self.index_offset(index_type)
294
self.index_sizes[offset] = index._size
295
self.index_class = pack_collection._index_class
296
self._pack_collection = pack_collection
297
self._state = 'resumed'
298
# XXX: perhaps check that the .pack file exists?
300
def access_tuple(self):
301
if self._state == 'finished':
302
return Pack.access_tuple(self)
303
elif self._state == 'resumed':
304
return self.upload_transport, self.file_name()
306
raise AssertionError(self._state)
309
self.upload_transport.delete(self.file_name())
310
indices = [self.revision_index, self.inventory_index, self.text_index,
311
self.signature_index]
312
if self.chk_index is not None:
313
indices.append(self.chk_index)
314
for index in indices:
315
index._transport.delete(index._name)
318
self._check_references()
319
index_types = ['revision', 'inventory', 'text', 'signature']
320
if self.chk_index is not None:
321
index_types.append('chk')
322
for index_type in index_types:
323
old_name = self.index_name(index_type, self.name)
324
new_name = '../indices/' + old_name
325
self.upload_transport.move(old_name, new_name)
326
self._replace_index_with_readonly(index_type)
327
new_name = '../packs/' + self.file_name()
328
self.upload_transport.move(self.file_name(), new_name)
329
self._state = 'finished'
331
def _get_external_refs(self, index):
332
"""Return compression parents for this index that are not present.
334
This returns any compression parents that are referenced by this index,
335
which are not contained *in* this index. They may be present elsewhere.
337
return index.external_references(1)
341
"""An in memory proxy for a pack which is being created."""
343
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
344
"""Create a NewPack instance.
346
:param pack_collection: A PackCollection into which this is being inserted.
347
:param upload_suffix: An optional suffix to be given to any temporary
348
files created during the pack creation. e.g '.autopack'
349
:param file_mode: Unix permissions for newly created file.
351
# The relative locations of the packs are constrained, but all are
352
# passed in because the caller has them, so as to avoid object churn.
353
index_builder_class = pack_collection._index_builder_class
354
if pack_collection.chk_index is not None:
355
chk_index = index_builder_class(reference_lists=0)
359
# Revisions: parents list, no text compression.
360
index_builder_class(reference_lists=1),
361
# Inventory: We want to map compression only, but currently the
362
# knit code hasn't been updated enough to understand that, so we
363
# have a regular 2-list index giving parents and compression
365
index_builder_class(reference_lists=2),
366
# Texts: compression and per file graph, for all fileids - so two
367
# reference lists and two elements in the key tuple.
368
index_builder_class(reference_lists=2, key_elements=2),
369
# Signatures: Just blobs to store, no compression, no parents
371
index_builder_class(reference_lists=0),
372
# CHK based storage - just blobs, no compression or parents.
375
self._pack_collection = pack_collection
376
# When we make readonly indices, we need this.
377
self.index_class = pack_collection._index_class
378
# where should the new pack be opened
379
self.upload_transport = pack_collection._upload_transport
380
# where are indices written out to
381
self.index_transport = pack_collection._index_transport
382
# where is the pack renamed to when it is finished?
383
self.pack_transport = pack_collection._pack_transport
384
# What file mode to upload the pack and indices with.
385
self._file_mode = file_mode
386
# tracks the content written to the .pack file.
387
self._hash = osutils.md5()
388
# a tuple with the length in bytes of the indices, once the pack
389
# is finalised. (rev, inv, text, sigs, chk_if_in_use)
390
self.index_sizes = None
391
# How much data to cache when writing packs. Note that this is not
392
# synchronised with reads, because it's not in the transport layer, so
393
# is not safe unless the client knows it won't be reading from the pack
395
self._cache_limit = 0
396
# the temporary pack file name.
397
self.random_name = osutils.rand_chars(20) + upload_suffix
398
# when was this pack started ?
399
self.start_time = time.time()
400
# open an output stream for the data added to the pack.
401
self.write_stream = self.upload_transport.open_write_stream(
402
self.random_name, mode=self._file_mode)
403
if 'pack' in debug.debug_flags:
404
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
405
time.ctime(), self.upload_transport.base, self.random_name,
406
time.time() - self.start_time)
407
# A list of byte sequences to be written to the new pack, and the
408
# aggregate size of them. Stored as a list rather than separate
409
# variables so that the _write_data closure below can update them.
410
self._buffer = [[], 0]
411
# create a callable for adding data
413
# robertc says- this is a closure rather than a method on the object
414
# so that the variables are locals, and faster than accessing object
416
def _write_data(bytes, flush=False, _buffer=self._buffer,
417
_write=self.write_stream.write, _update=self._hash.update):
418
_buffer[0].append(bytes)
419
_buffer[1] += len(bytes)
421
if _buffer[1] > self._cache_limit or flush:
422
bytes = ''.join(_buffer[0])
426
# expose this on self, for the occasion when clients want to add data.
427
self._write_data = _write_data
428
# a pack writer object to serialise pack records.
429
self._writer = pack.ContainerWriter(self._write_data)
431
# what state is the pack in? (open, finished, aborted)
433
# no name until we finish writing the content
437
"""Cancel creating this pack."""
438
self._state = 'aborted'
439
self.write_stream.close()
440
# Remove the temporary pack file.
441
self.upload_transport.delete(self.random_name)
442
# The indices have no state on disk.
444
def access_tuple(self):
445
"""Return a tuple (transport, name) for the pack content."""
446
if self._state == 'finished':
447
return Pack.access_tuple(self)
448
elif self._state == 'open':
449
return self.upload_transport, self.random_name
451
raise AssertionError(self._state)
453
def data_inserted(self):
454
"""True if data has been added to this pack."""
455
return bool(self.get_revision_count() or
456
self.inventory_index.key_count() or
457
self.text_index.key_count() or
458
self.signature_index.key_count() or
459
(self.chk_index is not None and self.chk_index.key_count()))
461
def finish_content(self):
462
if self.name is not None:
466
self._write_data('', flush=True)
467
self.name = self._hash.hexdigest()
469
def finish(self, suspend=False):
470
"""Finish the new pack.
473
- finalises the content
474
- assigns a name (the md5 of the content, currently)
475
- writes out the associated indices
476
- renames the pack into place.
477
- stores the index size tuple for the pack in the index_sizes
480
self.finish_content()
482
self._check_references()
484
# XXX: It'd be better to write them all to temporary names, then
485
# rename them all into place, so that the window when only some are
486
# visible is smaller. On the other hand none will be seen until
487
# they're in the names list.
488
self.index_sizes = [None, None, None, None]
489
self._write_index('revision', self.revision_index, 'revision',
491
self._write_index('inventory', self.inventory_index, 'inventory',
493
self._write_index('text', self.text_index, 'file texts', suspend)
494
self._write_index('signature', self.signature_index,
495
'revision signatures', suspend)
496
if self.chk_index is not None:
497
self.index_sizes.append(None)
498
self._write_index('chk', self.chk_index,
499
'content hash bytes', suspend)
500
self.write_stream.close(
501
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
502
# Note that this will clobber an existing pack with the same name,
503
# without checking for hash collisions. While this is undesirable this
504
# is something that can be rectified in a subsequent release. One way
505
# to rectify it may be to leave the pack at the original name, writing
506
# its pack-names entry as something like 'HASH: index-sizes
507
# temporary-name'. Allocate that and check for collisions, if it is
508
# collision free then rename it into place. If clients know this scheme
509
# they can handle missing-file errors by:
510
# - try for HASH.pack
511
# - try for temporary-name
512
# - refresh the pack-list to see if the pack is now absent
513
new_name = self.name + '.pack'
515
new_name = '../packs/' + new_name
516
self.upload_transport.move(self.random_name, new_name)
517
self._state = 'finished'
518
if 'pack' in debug.debug_flags:
519
# XXX: size might be interesting?
520
mutter('%s: create_pack: pack finished: %s%s->%s t+%6.3fs',
521
time.ctime(), self.upload_transport.base, self.random_name,
522
new_name, time.time() - self.start_time)
525
"""Flush any current data."""
527
bytes = ''.join(self._buffer[0])
528
self.write_stream.write(bytes)
529
self._hash.update(bytes)
530
self._buffer[:] = [[], 0]
532
def _get_external_refs(self, index):
533
return index._external_references()
535
def set_write_cache_size(self, size):
536
self._cache_limit = size
538
def _write_index(self, index_type, index, label, suspend=False):
539
"""Write out an index.
541
:param index_type: The type of index to write - e.g. 'revision'.
542
:param index: The index object to serialise.
543
:param label: What label to give the index e.g. 'revision'.
545
index_name = self.index_name(index_type, self.name)
547
transport = self.upload_transport
549
transport = self.index_transport
550
index_tempfile = index.finish()
551
index_bytes = index_tempfile.read()
552
write_stream = transport.open_write_stream(index_name,
553
mode=self._file_mode)
554
write_stream.write(index_bytes)
556
want_fdatasync=self._pack_collection.config_stack.get('repository.fdatasync'))
557
self.index_sizes[self.index_offset(index_type)] = len(index_bytes)
558
if 'pack' in debug.debug_flags:
559
# XXX: size might be interesting?
560
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
561
time.ctime(), label, self.upload_transport.base,
562
self.random_name, time.time() - self.start_time)
563
# Replace the writable index on this object with a readonly,
564
# presently unloaded index. We should alter
565
# the index layer to make its finish() error if add_node is
566
# subsequently used. RBC
567
self._replace_index_with_readonly(index_type)
570
class AggregateIndex(object):
571
"""An aggregated index for the RepositoryPackCollection.
573
AggregateIndex is reponsible for managing the PackAccess object,
574
Index-To-Pack mapping, and all indices list for a specific type of index
575
such as 'revision index'.
577
A CombinedIndex provides an index on a single key space built up
578
from several on-disk indices. The AggregateIndex builds on this
579
to provide a knit access layer, and allows having up to one writable
580
index within the collection.
582
# XXX: Probably 'can be written to' could/should be separated from 'acts
583
# like a knit index' -- mbp 20071024
585
def __init__(self, reload_func=None, flush_func=None):
586
"""Create an AggregateIndex.
588
:param reload_func: A function to call if we find we are missing an
589
index. Should have the form reload_func() => True if the list of
590
active pack files has changed.
592
self._reload_func = reload_func
593
self.index_to_pack = {}
594
self.combined_index = CombinedGraphIndex([], reload_func=reload_func)
595
self.data_access = _DirectPackAccess(self.index_to_pack,
596
reload_func=reload_func,
597
flush_func=flush_func)
598
self.add_callback = None
600
def add_index(self, index, pack):
601
"""Add index to the aggregate, which is an index for Pack pack.
603
Future searches on the aggregate index will seach this new index
604
before all previously inserted indices.
606
:param index: An Index for the pack.
607
:param pack: A Pack instance.
609
# expose it to the index map
610
self.index_to_pack[index] = pack.access_tuple()
611
# put it at the front of the linear index list
612
self.combined_index.insert_index(0, index, pack.name)
614
def add_writable_index(self, index, pack):
615
"""Add an index which is able to have data added to it.
617
There can be at most one writable index at any time. Any
618
modifications made to the knit are put into this index.
620
:param index: An index from the pack parameter.
621
:param pack: A Pack instance.
623
if self.add_callback is not None:
624
raise AssertionError(
625
"%s already has a writable index through %s" % \
626
(self, self.add_callback))
627
# allow writing: queue writes to a new index
628
self.add_index(index, pack)
629
# Updates the index to packs mapping as a side effect,
630
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
631
self.add_callback = index.add_nodes
634
"""Reset all the aggregate data to nothing."""
635
self.data_access.set_writer(None, None, (None, None))
636
self.index_to_pack.clear()
637
del self.combined_index._indices[:]
638
del self.combined_index._index_names[:]
639
self.add_callback = None
641
def remove_index(self, index):
642
"""Remove index from the indices used to answer queries.
644
:param index: An index from the pack parameter.
646
del self.index_to_pack[index]
647
pos = self.combined_index._indices.index(index)
648
del self.combined_index._indices[pos]
649
del self.combined_index._index_names[pos]
650
if (self.add_callback is not None and
651
getattr(index, 'add_nodes', None) == self.add_callback):
652
self.add_callback = None
653
self.data_access.set_writer(None, None, (None, None))
656
class Packer(object):
657
"""Create a pack from packs."""
659
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
663
:param pack_collection: A RepositoryPackCollection object where the
664
new pack is being written to.
665
:param packs: The packs to combine.
666
:param suffix: The suffix to use on the temporary files for the pack.
667
:param revision_ids: Revision ids to limit the pack to.
668
:param reload_func: A function to call if a pack file/index goes
669
missing. The side effect of calling this function should be to
670
update self.packs. See also AggregateIndex
674
self.revision_ids = revision_ids
675
# The pack object we are creating.
677
self._pack_collection = pack_collection
678
self._reload_func = reload_func
679
# The index layer keys for the revisions being copied. None for 'all
681
self._revision_keys = None
682
# What text keys to copy. None for 'all texts'. This is set by
683
# _copy_inventory_texts
684
self._text_filter = None
686
def pack(self, pb=None):
687
"""Create a new pack by reading data from other packs.
689
This does little more than a bulk copy of data. One key difference
690
is that data with the same item key across multiple packs is elided
691
from the output. The new pack is written into the current pack store
692
along with its indices, and the name added to the pack names. The
693
source packs are not altered and are not required to be in the current
696
:param pb: An optional progress bar to use. A nested bar is created if
698
:return: A Pack object, or None if nothing was copied.
700
# open a pack - using the same name as the last temporary file
701
# - which has already been flushed, so it's safe.
702
# XXX: - duplicate code warning with start_write_group; fix before
703
# considering 'done'.
704
if self._pack_collection._new_pack is not None:
705
raise errors.BzrError('call to %s.pack() while another pack is'
707
% (self.__class__.__name__,))
708
if self.revision_ids is not None:
709
if len(self.revision_ids) == 0:
710
# silly fetch request.
713
self.revision_ids = frozenset(self.revision_ids)
714
self.revision_keys = frozenset((revid,) for revid in
717
self.pb = ui.ui_factory.nested_progress_bar()
721
return self._create_pack_from_packs()
727
"""Open a pack for the pack we are creating."""
728
new_pack = self._pack_collection.pack_factory(self._pack_collection,
729
upload_suffix=self.suffix,
730
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
731
# We know that we will process all nodes in order, and don't need to
732
# query, so don't combine any indices spilled to disk until we are done
733
new_pack.revision_index.set_optimize(combine_backing_indices=False)
734
new_pack.inventory_index.set_optimize(combine_backing_indices=False)
735
new_pack.text_index.set_optimize(combine_backing_indices=False)
736
new_pack.signature_index.set_optimize(combine_backing_indices=False)
739
def _copy_revision_texts(self):
740
"""Copy revision data to the new pack."""
741
raise NotImplementedError(self._copy_revision_texts)
743
def _copy_inventory_texts(self):
744
"""Copy the inventory texts to the new pack.
746
self._revision_keys is used to determine what inventories to copy.
748
Sets self._text_filter appropriately.
750
raise NotImplementedError(self._copy_inventory_texts)
752
def _copy_text_texts(self):
753
raise NotImplementedError(self._copy_text_texts)
755
def _create_pack_from_packs(self):
756
raise NotImplementedError(self._create_pack_from_packs)
758
def _log_copied_texts(self):
759
if 'pack' in debug.debug_flags:
760
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
761
time.ctime(), self._pack_collection._upload_transport.base,
762
self.new_pack.random_name,
763
self.new_pack.text_index.key_count(),
764
time.time() - self.new_pack.start_time)
766
def _use_pack(self, new_pack):
767
"""Return True if new_pack should be used.
769
:param new_pack: The pack that has just been created.
770
:return: True if the pack should be used.
772
return new_pack.data_inserted()
775
class RepositoryPackCollection(object):
776
"""Management of packs within a repository.
778
:ivar _names: map of {pack_name: (index_size,)}
782
resumed_pack_factory = None
783
normal_packer_class = None
784
optimising_packer_class = None
786
def __init__(self, repo, transport, index_transport, upload_transport,
787
pack_transport, index_builder_class, index_class,
789
"""Create a new RepositoryPackCollection.
791
:param transport: Addresses the repository base directory
792
(typically .bzr/repository/).
793
:param index_transport: Addresses the directory containing indices.
794
:param upload_transport: Addresses the directory into which packs are written
795
while they're being created.
796
:param pack_transport: Addresses the directory of existing complete packs.
797
:param index_builder_class: The index builder class to use.
798
:param index_class: The index class to use.
799
:param use_chk_index: Whether to setup and manage a CHK index.
801
# XXX: This should call self.reset()
803
self.transport = transport
804
self._index_transport = index_transport
805
self._upload_transport = upload_transport
806
self._pack_transport = pack_transport
807
self._index_builder_class = index_builder_class
808
self._index_class = index_class
809
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3,
814
self._packs_by_name = {}
815
# the previous pack-names content
816
self._packs_at_load = None
817
# when a pack is being created by this object, the state of that pack.
818
self._new_pack = None
819
# aggregated revision index data
820
flush = self._flush_new_pack
821
self.revision_index = AggregateIndex(self.reload_pack_names, flush)
822
self.inventory_index = AggregateIndex(self.reload_pack_names, flush)
823
self.text_index = AggregateIndex(self.reload_pack_names, flush)
824
self.signature_index = AggregateIndex(self.reload_pack_names, flush)
825
all_indices = [self.revision_index, self.inventory_index,
826
self.text_index, self.signature_index]
828
self.chk_index = AggregateIndex(self.reload_pack_names, flush)
829
all_indices.append(self.chk_index)
831
# used to determine if we're using a chk_index elsewhere.
832
self.chk_index = None
833
# Tell all the CombinedGraphIndex objects about each other, so they can
834
# share hints about which pack names to search first.
835
all_combined = [agg_idx.combined_index for agg_idx in all_indices]
836
for combined_idx in all_combined:
837
combined_idx.set_sibling_indices(
838
set(all_combined).difference([combined_idx]))
840
self._resumed_packs = []
841
self.config_stack = config.LocationStack(self.transport.base)
844
return '%s(%r)' % (self.__class__.__name__, self.repo)
846
def add_pack_to_memory(self, pack):
847
"""Make a Pack object available to the repository to satisfy queries.
849
:param pack: A Pack object.
851
if pack.name in self._packs_by_name:
852
raise AssertionError(
853
'pack %s already in _packs_by_name' % (pack.name,))
854
self.packs.append(pack)
855
self._packs_by_name[pack.name] = pack
856
self.revision_index.add_index(pack.revision_index, pack)
857
self.inventory_index.add_index(pack.inventory_index, pack)
858
self.text_index.add_index(pack.text_index, pack)
859
self.signature_index.add_index(pack.signature_index, pack)
860
if self.chk_index is not None:
861
self.chk_index.add_index(pack.chk_index, pack)
864
"""Return a list of all the Pack objects this repository has.
866
Note that an in-progress pack being created is not returned.
868
:return: A list of Pack objects for all the packs in the repository.
871
for name in self.names():
872
result.append(self.get_pack_by_name(name))
876
"""Pack the pack collection incrementally.
878
This will not attempt global reorganisation or recompression,
879
rather it will just ensure that the total number of packs does
880
not grow without bound. It uses the _max_pack_count method to
881
determine if autopacking is needed, and the pack_distribution
882
method to determine the number of revisions in each pack.
884
If autopacking takes place then the packs name collection will have
885
been flushed to disk - packing requires updating the name collection
886
in synchronisation with certain steps. Otherwise the names collection
889
:return: Something evaluating true if packing took place.
893
return self._do_autopack()
894
except errors.RetryAutopack:
895
# If we get a RetryAutopack exception, we should abort the
896
# current action, and retry.
899
def _do_autopack(self):
900
# XXX: Should not be needed when the management of indices is sane.
901
total_revisions = self.revision_index.combined_index.key_count()
902
total_packs = len(self._names)
903
if self._max_pack_count(total_revisions) >= total_packs:
905
# determine which packs need changing
906
pack_distribution = self.pack_distribution(total_revisions)
908
for pack in self.all_packs():
909
revision_count = pack.get_revision_count()
910
if revision_count == 0:
911
# revision less packs are not generated by normal operation,
912
# only by operations like sign-my-commits, and thus will not
913
# tend to grow rapdily or without bound like commit containing
914
# packs do - leave them alone as packing them really should
915
# group their data with the relevant commit, and that may
916
# involve rewriting ancient history - which autopack tries to
917
# avoid. Alternatively we could not group the data but treat
918
# each of these as having a single revision, and thus add
919
# one revision for each to the total revision count, to get
920
# a matching distribution.
922
existing_packs.append((revision_count, pack))
923
pack_operations = self.plan_autopack_combinations(
924
existing_packs, pack_distribution)
925
num_new_packs = len(pack_operations)
926
num_old_packs = sum([len(po[1]) for po in pack_operations])
927
num_revs_affected = sum([po[0] for po in pack_operations])
928
mutter('Auto-packing repository %s, which has %d pack files, '
929
'containing %d revisions. Packing %d files into %d affecting %d'
930
' revisions', self, total_packs, total_revisions, num_old_packs,
931
num_new_packs, num_revs_affected)
932
result = self._execute_pack_operations(pack_operations, packer_class=self.normal_packer_class,
933
reload_func=self._restart_autopack)
934
mutter('Auto-packing repository %s completed', self)
937
def _execute_pack_operations(self, pack_operations, packer_class,
939
"""Execute a series of pack operations.
941
:param pack_operations: A list of [revision_count, packs_to_combine].
942
:param packer_class: The class of packer to use
943
:return: The new pack names.
945
for revision_count, packs in pack_operations:
946
# we may have no-ops from the setup logic
949
packer = packer_class(self, packs, '.autopack',
950
reload_func=reload_func)
952
result = packer.pack()
953
except errors.RetryWithNewPacks:
954
# An exception is propagating out of this context, make sure
955
# this packer has cleaned up. Packer() doesn't set its new_pack
956
# state into the RepositoryPackCollection object, so we only
957
# have access to it directly here.
958
if packer.new_pack is not None:
959
packer.new_pack.abort()
964
self._remove_pack_from_memory(pack)
965
# record the newly available packs and stop advertising the old
968
for _, packs in pack_operations:
969
to_be_obsoleted.extend(packs)
970
result = self._save_pack_names(clear_obsolete_packs=True,
971
obsolete_packs=to_be_obsoleted)
974
def _flush_new_pack(self):
975
if self._new_pack is not None:
976
self._new_pack.flush()
978
def lock_names(self):
979
"""Acquire the mutex around the pack-names index.
981
This cannot be used in the middle of a read-only transaction on the
984
self.repo.control_files.lock_write()
986
def _already_packed(self):
987
"""Is the collection already packed?"""
988
return not (self.repo._format.pack_compresses or (len(self._names) > 1))
990
def pack(self, hint=None, clean_obsolete_packs=False):
991
"""Pack the pack collection totally."""
993
total_packs = len(self._names)
994
if self._already_packed():
996
total_revisions = self.revision_index.combined_index.key_count()
997
# XXX: the following may want to be a class, to pack with a given
999
mutter('Packing repository %s, which has %d pack files, '
1000
'containing %d revisions with hint %r.', self, total_packs,
1001
total_revisions, hint)
1004
self._try_pack_operations(hint)
1005
except RetryPackOperations:
1009
if clean_obsolete_packs:
1010
self._clear_obsolete_packs()
1012
def _try_pack_operations(self, hint):
1013
"""Calculate the pack operations based on the hint (if any), and
1016
# determine which packs need changing
1017
pack_operations = [[0, []]]
1018
for pack in self.all_packs():
1019
if hint is None or pack.name in hint:
1020
# Either no hint was provided (so we are packing everything),
1021
# or this pack was included in the hint.
1022
pack_operations[-1][0] += pack.get_revision_count()
1023
pack_operations[-1][1].append(pack)
1024
self._execute_pack_operations(pack_operations,
1025
packer_class=self.optimising_packer_class,
1026
reload_func=self._restart_pack_operations)
1028
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1029
"""Plan a pack operation.
1031
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1033
:param pack_distribution: A list with the number of revisions desired
1036
if len(existing_packs) <= len(pack_distribution):
1038
existing_packs.sort(reverse=True)
1039
pack_operations = [[0, []]]
1040
# plan out what packs to keep, and what to reorganise
1041
while len(existing_packs):
1042
# take the largest pack, and if it's less than the head of the
1043
# distribution chart we will include its contents in the new pack
1044
# for that position. If it's larger, we remove its size from the
1045
# distribution chart
1046
next_pack_rev_count, next_pack = existing_packs.pop(0)
1047
if next_pack_rev_count >= pack_distribution[0]:
1048
# this is already packed 'better' than this, so we can
1049
# not waste time packing it.
1050
while next_pack_rev_count > 0:
1051
next_pack_rev_count -= pack_distribution[0]
1052
if next_pack_rev_count >= 0:
1054
del pack_distribution[0]
1056
# didn't use that entire bucket up
1057
pack_distribution[0] = -next_pack_rev_count
1059
# add the revisions we're going to add to the next output pack
1060
pack_operations[-1][0] += next_pack_rev_count
1061
# allocate this pack to the next pack sub operation
1062
pack_operations[-1][1].append(next_pack)
1063
if pack_operations[-1][0] >= pack_distribution[0]:
1064
# this pack is used up, shift left.
1065
del pack_distribution[0]
1066
pack_operations.append([0, []])
1067
# Now that we know which pack files we want to move, shove them all
1068
# into a single pack file.
1070
final_pack_list = []
1071
for num_revs, pack_files in pack_operations:
1072
final_rev_count += num_revs
1073
final_pack_list.extend(pack_files)
1074
if len(final_pack_list) == 1:
1075
raise AssertionError('We somehow generated an autopack with a'
1076
' single pack file being moved.')
1078
return [[final_rev_count, final_pack_list]]
1080
def ensure_loaded(self):
1081
"""Ensure we have read names from disk.
1083
:return: True if the disk names had not been previously read.
1085
# NB: if you see an assertion error here, it's probably access against
1086
# an unlocked repo. Naughty.
1087
if not self.repo.is_locked():
1088
raise errors.ObjectNotLocked(self.repo)
1089
if self._names is None:
1091
self._packs_at_load = set()
1092
for index, key, value in self._iter_disk_pack_index():
1094
self._names[name] = self._parse_index_sizes(value)
1095
self._packs_at_load.add((key, value))
1099
# populate all the metadata.
1103
def _parse_index_sizes(self, value):
1104
"""Parse a string of index sizes."""
1105
return tuple([int(digits) for digits in value.split(' ')])
1107
def get_pack_by_name(self, name):
1108
"""Get a Pack object by name.
1110
:param name: The name of the pack - e.g. '123456'
1111
:return: A Pack object.
1114
return self._packs_by_name[name]
1116
rev_index = self._make_index(name, '.rix')
1117
inv_index = self._make_index(name, '.iix')
1118
txt_index = self._make_index(name, '.tix')
1119
sig_index = self._make_index(name, '.six')
1120
if self.chk_index is not None:
1121
chk_index = self._make_index(name, '.cix', is_chk=True)
1124
result = ExistingPack(self._pack_transport, name, rev_index,
1125
inv_index, txt_index, sig_index, chk_index)
1126
self.add_pack_to_memory(result)
1129
def _resume_pack(self, name):
1130
"""Get a suspended Pack object by name.
1132
:param name: The name of the pack - e.g. '123456'
1133
:return: A Pack object.
1135
if not re.match('[a-f0-9]{32}', name):
1136
# Tokens should be md5sums of the suspended pack file, i.e. 32 hex
1138
raise errors.UnresumableWriteGroup(
1139
self.repo, [name], 'Malformed write group token')
1141
rev_index = self._make_index(name, '.rix', resume=True)
1142
inv_index = self._make_index(name, '.iix', resume=True)
1143
txt_index = self._make_index(name, '.tix', resume=True)
1144
sig_index = self._make_index(name, '.six', resume=True)
1145
if self.chk_index is not None:
1146
chk_index = self._make_index(name, '.cix', resume=True,
1150
result = self.resumed_pack_factory(name, rev_index, inv_index,
1151
txt_index, sig_index, self._upload_transport,
1152
self._pack_transport, self._index_transport, self,
1153
chk_index=chk_index)
1154
except errors.NoSuchFile as e:
1155
raise errors.UnresumableWriteGroup(self.repo, [name], str(e))
1156
self.add_pack_to_memory(result)
1157
self._resumed_packs.append(result)
1160
def allocate(self, a_new_pack):
1161
"""Allocate name in the list of packs.
1163
:param a_new_pack: A NewPack instance to be added to the collection of
1164
packs for this repository.
1166
self.ensure_loaded()
1167
if a_new_pack.name in self._names:
1168
raise errors.BzrError(
1169
'Pack %r already exists in %s' % (a_new_pack.name, self))
1170
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1171
self.add_pack_to_memory(a_new_pack)
1173
def _iter_disk_pack_index(self):
1174
"""Iterate over the contents of the pack-names index.
1176
This is used when loading the list from disk, and before writing to
1177
detect updates from others during our write operation.
1178
:return: An iterator of the index contents.
1180
return self._index_class(self.transport, 'pack-names', None
1181
).iter_all_entries()
1183
def _make_index(self, name, suffix, resume=False, is_chk=False):
1184
size_offset = self._suffix_offsets[suffix]
1185
index_name = name + suffix
1187
transport = self._upload_transport
1188
index_size = transport.stat(index_name).st_size
1190
transport = self._index_transport
1191
index_size = self._names[name][size_offset]
1192
index = self._index_class(transport, index_name, index_size,
1193
unlimited_cache=is_chk)
1194
if is_chk and self._index_class is btree_index.BTreeGraphIndex:
1195
index._leaf_factory = btree_index._gcchk_factory
1198
def _max_pack_count(self, total_revisions):
1199
"""Return the maximum number of packs to use for total revisions.
1201
:param total_revisions: The total number of revisions in the
1204
if not total_revisions:
1206
digits = str(total_revisions)
1208
for digit in digits:
1209
result += int(digit)
1213
"""Provide an order to the underlying names."""
1214
return sorted(self._names.keys())
1216
def _obsolete_packs(self, packs):
1217
"""Move a number of packs which have been obsoleted out of the way.
1219
Each pack and its associated indices are moved out of the way.
1221
Note: for correctness this function should only be called after a new
1222
pack names index has been written without these pack names, and with
1223
the names of packs that contain the data previously available via these
1226
:param packs: The packs to obsolete.
1227
:param return: None.
1232
pack.pack_transport.move(pack.file_name(),
1233
'../obsolete_packs/' + pack.file_name())
1234
except errors.NoSuchFile:
1235
# perhaps obsolete_packs was removed? Let's create it and
1238
pack.pack_transport.mkdir('../obsolete_packs/')
1239
except errors.FileExists:
1241
pack.pack_transport.move(pack.file_name(),
1242
'../obsolete_packs/' + pack.file_name())
1243
except (errors.PathError, errors.TransportError) as e:
1244
# TODO: Should these be warnings or mutters?
1245
mutter("couldn't rename obsolete pack, skipping it:\n%s"
1247
# TODO: Probably needs to know all possible indices for this pack
1248
# - or maybe list the directory and move all indices matching this
1249
# name whether we recognize it or not?
1250
suffixes = ['.iix', '.six', '.tix', '.rix']
1251
if self.chk_index is not None:
1252
suffixes.append('.cix')
1253
for suffix in suffixes:
1255
self._index_transport.move(pack.name + suffix,
1256
'../obsolete_packs/' + pack.name + suffix)
1257
except (errors.PathError, errors.TransportError) as e:
1258
mutter("couldn't rename obsolete index, skipping it:\n%s"
1261
def pack_distribution(self, total_revisions):
1262
"""Generate a list of the number of revisions to put in each pack.
1264
:param total_revisions: The total number of revisions in the
1267
if total_revisions == 0:
1269
digits = reversed(str(total_revisions))
1271
for exponent, count in enumerate(digits):
1272
size = 10 ** exponent
1273
for pos in range(int(count)):
1275
return list(reversed(result))
1277
def _pack_tuple(self, name):
1278
"""Return a tuple with the transport and file name for a pack name."""
1279
return self._pack_transport, name + '.pack'
1281
def _remove_pack_from_memory(self, pack):
1282
"""Remove pack from the packs accessed by this repository.
1284
Only affects memory state, until self._save_pack_names() is invoked.
1286
self._names.pop(pack.name)
1287
self._packs_by_name.pop(pack.name)
1288
self._remove_pack_indices(pack)
1289
self.packs.remove(pack)
1291
def _remove_pack_indices(self, pack, ignore_missing=False):
1292
"""Remove the indices for pack from the aggregated indices.
1294
:param ignore_missing: Suppress KeyErrors from calling remove_index.
1296
for index_type in Pack.index_definitions:
1297
attr_name = index_type + '_index'
1298
aggregate_index = getattr(self, attr_name)
1299
if aggregate_index is not None:
1300
pack_index = getattr(pack, attr_name)
1302
aggregate_index.remove_index(pack_index)
1309
"""Clear all cached data."""
1310
# cached revision data
1311
self.revision_index.clear()
1312
# cached signature data
1313
self.signature_index.clear()
1314
# cached file text data
1315
self.text_index.clear()
1316
# cached inventory data
1317
self.inventory_index.clear()
1319
if self.chk_index is not None:
1320
self.chk_index.clear()
1321
# remove the open pack
1322
self._new_pack = None
1323
# information about packs.
1326
self._packs_by_name = {}
1327
self._packs_at_load = None
1329
def _unlock_names(self):
1330
"""Release the mutex around the pack-names index."""
1331
self.repo.control_files.unlock()
1333
def _diff_pack_names(self):
1334
"""Read the pack names from disk, and compare it to the one in memory.
1336
:return: (disk_nodes, deleted_nodes, new_nodes)
1337
disk_nodes The final set of nodes that should be referenced
1338
deleted_nodes Nodes which have been removed from when we started
1339
new_nodes Nodes that are newly introduced
1341
# load the disk nodes across
1343
for index, key, value in self._iter_disk_pack_index():
1344
disk_nodes.add((key, value))
1345
orig_disk_nodes = set(disk_nodes)
1347
# do a two-way diff against our original content
1348
current_nodes = set()
1349
for name, sizes in self._names.items():
1351
((name, ), ' '.join(str(size) for size in sizes)))
1353
# Packs no longer present in the repository, which were present when we
1354
# locked the repository
1355
deleted_nodes = self._packs_at_load - current_nodes
1356
# Packs which this process is adding
1357
new_nodes = current_nodes - self._packs_at_load
1359
# Update the disk_nodes set to include the ones we are adding, and
1360
# remove the ones which were removed by someone else
1361
disk_nodes.difference_update(deleted_nodes)
1362
disk_nodes.update(new_nodes)
1364
return disk_nodes, deleted_nodes, new_nodes, orig_disk_nodes
1366
def _syncronize_pack_names_from_disk_nodes(self, disk_nodes):
1367
"""Given the correct set of pack files, update our saved info.
1369
:return: (removed, added, modified)
1370
removed pack names removed from self._names
1371
added pack names added to self._names
1372
modified pack names that had changed value
1377
## self._packs_at_load = disk_nodes
1378
new_names = dict(disk_nodes)
1379
# drop no longer present nodes
1380
for pack in self.all_packs():
1381
if (pack.name,) not in new_names:
1382
removed.append(pack.name)
1383
self._remove_pack_from_memory(pack)
1384
# add new nodes/refresh existing ones
1385
for key, value in disk_nodes:
1387
sizes = self._parse_index_sizes(value)
1388
if name in self._names:
1390
if sizes != self._names[name]:
1391
# the pack for name has had its indices replaced - rare but
1392
# important to handle. XXX: probably can never happen today
1393
# because the three-way merge code above does not handle it
1394
# - you may end up adding the same key twice to the new
1395
# disk index because the set values are the same, unless
1396
# the only index shows up as deleted by the set difference
1397
# - which it may. Until there is a specific test for this,
1398
# assume it's broken. RBC 20071017.
1399
self._remove_pack_from_memory(self.get_pack_by_name(name))
1400
self._names[name] = sizes
1401
self.get_pack_by_name(name)
1402
modified.append(name)
1405
self._names[name] = sizes
1406
self.get_pack_by_name(name)
1408
return removed, added, modified
1410
def _save_pack_names(self, clear_obsolete_packs=False, obsolete_packs=None):
1411
"""Save the list of packs.
1413
This will take out the mutex around the pack names list for the
1414
duration of the method call. If concurrent updates have been made, a
1415
three-way merge between the current list and the current in memory list
1418
:param clear_obsolete_packs: If True, clear out the contents of the
1419
obsolete_packs directory.
1420
:param obsolete_packs: Packs that are obsolete once the new pack-names
1421
file has been written.
1422
:return: A list of the names saved that were not previously on disk.
1424
already_obsolete = []
1427
builder = self._index_builder_class()
1428
(disk_nodes, deleted_nodes, new_nodes,
1429
orig_disk_nodes) = self._diff_pack_names()
1430
# TODO: handle same-name, index-size-changes here -
1431
# e.g. use the value from disk, not ours, *unless* we're the one
1433
for key, value in disk_nodes:
1434
builder.add_node(key, value)
1435
self.transport.put_file('pack-names', builder.finish(),
1436
mode=self.repo.bzrdir._get_file_mode())
1437
self._packs_at_load = disk_nodes
1438
if clear_obsolete_packs:
1441
to_preserve = {o.name for o in obsolete_packs}
1442
already_obsolete = self._clear_obsolete_packs(to_preserve)
1444
self._unlock_names()
1445
# synchronise the memory packs list with what we just wrote:
1446
self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1448
# TODO: We could add one more condition here. "if o.name not in
1449
# orig_disk_nodes and o != the new_pack we haven't written to
1450
# disk yet. However, the new pack object is not easily
1451
# accessible here (it would have to be passed through the
1452
# autopacking code, etc.)
1453
obsolete_packs = [o for o in obsolete_packs
1454
if o.name not in already_obsolete]
1455
self._obsolete_packs(obsolete_packs)
1456
return [new_node[0][0] for new_node in new_nodes]
1458
def reload_pack_names(self):
1459
"""Sync our pack listing with what is present in the repository.
1461
This should be called when we find out that something we thought was
1462
present is now missing. This happens when another process re-packs the
1465
:return: True if the in-memory list of packs has been altered at all.
1467
# The ensure_loaded call is to handle the case where the first call
1468
# made involving the collection was to reload_pack_names, where we
1469
# don't have a view of disk contents. It's a bit of a bandaid, and
1470
# causes two reads of pack-names, but it's a rare corner case not
1471
# struck with regular push/pull etc.
1472
first_read = self.ensure_loaded()
1475
# out the new value.
1476
(disk_nodes, deleted_nodes, new_nodes,
1477
orig_disk_nodes) = self._diff_pack_names()
1478
# _packs_at_load is meant to be the explicit list of names in
1479
# 'pack-names' at then start. As such, it should not contain any
1480
# pending names that haven't been written out yet.
1481
self._packs_at_load = orig_disk_nodes
1483
modified) = self._syncronize_pack_names_from_disk_nodes(disk_nodes)
1484
if removed or added or modified:
1488
def _restart_autopack(self):
1489
"""Reload the pack names list, and restart the autopack code."""
1490
if not self.reload_pack_names():
1491
# Re-raise the original exception, because something went missing
1492
# and a restart didn't find it
1494
raise errors.RetryAutopack(self.repo, False, sys.exc_info())
1496
def _restart_pack_operations(self):
1497
"""Reload the pack names list, and restart the autopack code."""
1498
if not self.reload_pack_names():
1499
# Re-raise the original exception, because something went missing
1500
# and a restart didn't find it
1502
raise RetryPackOperations(self.repo, False, sys.exc_info())
1504
def _clear_obsolete_packs(self, preserve=None):
1505
"""Delete everything from the obsolete-packs directory.
1507
:return: A list of pack identifiers (the filename without '.pack') that
1508
were found in obsolete_packs.
1511
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1512
if preserve is None:
1515
obsolete_pack_files = obsolete_pack_transport.list_dir('.')
1516
except errors.NoSuchFile:
1518
for filename in obsolete_pack_files:
1519
name, ext = osutils.splitext(filename)
1522
if name in preserve:
1525
obsolete_pack_transport.delete(filename)
1526
except (errors.PathError, errors.TransportError) as e:
1527
warning("couldn't delete obsolete pack, skipping it:\n%s"
1531
def _start_write_group(self):
1532
# Do not permit preparation for writing if we're not in a 'write lock'.
1533
if not self.repo.is_write_locked():
1534
raise errors.NotWriteLocked(self)
1535
self._new_pack = self.pack_factory(self, upload_suffix='.pack',
1536
file_mode=self.repo.bzrdir._get_file_mode())
1537
# allow writing: queue writes to a new index
1538
self.revision_index.add_writable_index(self._new_pack.revision_index,
1540
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1542
self.text_index.add_writable_index(self._new_pack.text_index,
1544
self._new_pack.text_index.set_optimize(combine_backing_indices=False)
1545
self.signature_index.add_writable_index(self._new_pack.signature_index,
1547
if self.chk_index is not None:
1548
self.chk_index.add_writable_index(self._new_pack.chk_index,
1550
self.repo.chk_bytes._index._add_callback = self.chk_index.add_callback
1551
self._new_pack.chk_index.set_optimize(combine_backing_indices=False)
1553
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1554
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1555
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1556
self.repo.texts._index._add_callback = self.text_index.add_callback
1558
def _abort_write_group(self):
1559
# FIXME: just drop the transient index.
1560
# forget what names there are
1561
if self._new_pack is not None:
1562
operation = cleanup.OperationWithCleanups(self._new_pack.abort)
1563
operation.add_cleanup(setattr, self, '_new_pack', None)
1564
# If we aborted while in the middle of finishing the write
1565
# group, _remove_pack_indices could fail because the indexes are
1566
# already gone. But they're not there we shouldn't fail in this
1567
# case, so we pass ignore_missing=True.
1568
operation.add_cleanup(self._remove_pack_indices, self._new_pack,
1569
ignore_missing=True)
1570
operation.run_simple()
1571
for resumed_pack in self._resumed_packs:
1572
operation = cleanup.OperationWithCleanups(resumed_pack.abort)
1573
# See comment in previous finally block.
1574
operation.add_cleanup(self._remove_pack_indices, resumed_pack,
1575
ignore_missing=True)
1576
operation.run_simple()
1577
del self._resumed_packs[:]
1579
def _remove_resumed_pack_indices(self):
1580
for resumed_pack in self._resumed_packs:
1581
self._remove_pack_indices(resumed_pack)
1582
del self._resumed_packs[:]
1584
def _check_new_inventories(self):
1585
"""Detect missing inventories in this write group.
1587
:returns: list of strs, summarising any problems found. If the list is
1588
empty no problems were found.
1590
# The base implementation does no checks. GCRepositoryPackCollection
1594
def _commit_write_group(self):
1596
for prefix, versioned_file in (
1597
('revisions', self.repo.revisions),
1598
('inventories', self.repo.inventories),
1599
('texts', self.repo.texts),
1600
('signatures', self.repo.signatures),
1602
missing = versioned_file.get_missing_compression_parent_keys()
1603
all_missing.update([(prefix,) + key for key in missing])
1605
raise errors.BzrCheckError(
1606
"Repository %s has missing compression parent(s) %r "
1607
% (self.repo, sorted(all_missing)))
1608
problems = self._check_new_inventories()
1610
problems_summary = '\n'.join(problems)
1611
raise errors.BzrCheckError(
1612
"Cannot add revision(s) to repository: " + problems_summary)
1613
self._remove_pack_indices(self._new_pack)
1614
any_new_content = False
1615
if self._new_pack.data_inserted():
1616
# get all the data to disk and read to use
1617
self._new_pack.finish()
1618
self.allocate(self._new_pack)
1619
self._new_pack = None
1620
any_new_content = True
1622
self._new_pack.abort()
1623
self._new_pack = None
1624
for resumed_pack in self._resumed_packs:
1625
# XXX: this is a pretty ugly way to turn the resumed pack into a
1626
# properly committed pack.
1627
self._names[resumed_pack.name] = None
1628
self._remove_pack_from_memory(resumed_pack)
1629
resumed_pack.finish()
1630
self.allocate(resumed_pack)
1631
any_new_content = True
1632
del self._resumed_packs[:]
1634
result = self.autopack()
1636
# when autopack takes no steps, the names list is still
1638
return self._save_pack_names()
1642
def _suspend_write_group(self):
1643
tokens = [pack.name for pack in self._resumed_packs]
1644
self._remove_pack_indices(self._new_pack)
1645
if self._new_pack.data_inserted():
1646
# get all the data to disk and read to use
1647
self._new_pack.finish(suspend=True)
1648
tokens.append(self._new_pack.name)
1649
self._new_pack = None
1651
self._new_pack.abort()
1652
self._new_pack = None
1653
self._remove_resumed_pack_indices()
1656
def _resume_write_group(self, tokens):
1657
for token in tokens:
1658
self._resume_pack(token)
1661
class PackRepository(MetaDirVersionedFileRepository):
1662
"""Repository with knit objects stored inside pack containers.
1664
The layering for a KnitPackRepository is:
1666
Graph | HPSS | Repository public layer |
1667
===================================================
1668
Tuple based apis below, string based, and key based apis above
1669
---------------------------------------------------
1671
Provides .texts, .revisions etc
1672
This adapts the N-tuple keys to physical knit records which only have a
1673
single string identifier (for historical reasons), which in older formats
1674
was always the revision_id, and in the mapped code for packs is always
1675
the last element of key tuples.
1676
---------------------------------------------------
1678
A separate GraphIndex is used for each of the
1679
texts/inventories/revisions/signatures contained within each individual
1680
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1682
===================================================
1686
# These attributes are inherited from the Repository base class. Setting
1687
# them to None ensures that if the constructor is changed to not initialize
1688
# them, or a subclass fails to call the constructor, that an error will
1689
# occur rather than the system working but generating incorrect data.
1690
_commit_builder_class = None
1693
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1695
MetaDirRepository.__init__(self, _format, a_bzrdir, control_files)
1696
self._commit_builder_class = _commit_builder_class
1697
self._serializer = _serializer
1698
self._reconcile_fixes_text_parents = True
1699
if self._format.supports_external_lookups:
1700
self._unstacked_provider = graph.CachingParentsProvider(
1701
self._make_parents_provider_unstacked())
1703
self._unstacked_provider = graph.CachingParentsProvider(self)
1704
self._unstacked_provider.disable_cache()
1707
def _all_revision_ids(self):
1708
"""See Repository.all_revision_ids()."""
1709
return [key[0] for key in self.revisions.keys()]
1711
def _abort_write_group(self):
1712
self.revisions._index._key_dependencies.clear()
1713
self._pack_collection._abort_write_group()
1715
def _make_parents_provider(self):
1716
if not self._format.supports_external_lookups:
1717
return self._unstacked_provider
1718
return graph.StackedParentsProvider(_LazyListJoin(
1719
[self._unstacked_provider], self._fallback_repositories))
1721
def _refresh_data(self):
1722
if not self.is_locked():
1724
self._pack_collection.reload_pack_names()
1725
self._unstacked_provider.disable_cache()
1726
self._unstacked_provider.enable_cache()
1728
def _start_write_group(self):
1729
self._pack_collection._start_write_group()
1731
def _commit_write_group(self):
1732
hint = self._pack_collection._commit_write_group()
1733
self.revisions._index._key_dependencies.clear()
1734
# The commit may have added keys that were previously cached as
1735
# missing, so reset the cache.
1736
self._unstacked_provider.disable_cache()
1737
self._unstacked_provider.enable_cache()
1740
def suspend_write_group(self):
1741
# XXX check self._write_group is self.get_transaction()?
1742
tokens = self._pack_collection._suspend_write_group()
1743
self.revisions._index._key_dependencies.clear()
1744
self._write_group = None
1747
def _resume_write_group(self, tokens):
1748
self._start_write_group()
1750
self._pack_collection._resume_write_group(tokens)
1751
except errors.UnresumableWriteGroup:
1752
self._abort_write_group()
1754
for pack in self._pack_collection._resumed_packs:
1755
self.revisions._index.scan_unvalidated_index(pack.revision_index)
1757
def get_transaction(self):
1758
if self._write_lock_count:
1759
return self._transaction
1761
return self.control_files.get_transaction()
1763
def is_locked(self):
1764
return self._write_lock_count or self.control_files.is_locked()
1766
def is_write_locked(self):
1767
return self._write_lock_count
1769
def lock_write(self, token=None):
1770
"""Lock the repository for writes.
1772
:return: A breezy.repository.RepositoryWriteLockResult.
1774
locked = self.is_locked()
1775
if not self._write_lock_count and locked:
1776
raise errors.ReadOnlyError(self)
1777
self._write_lock_count += 1
1778
if self._write_lock_count == 1:
1779
self._transaction = transactions.WriteTransaction()
1781
if 'relock' in debug.debug_flags and self._prev_lock == 'w':
1782
note('%r was write locked again', self)
1783
self._prev_lock = 'w'
1784
self._unstacked_provider.enable_cache()
1785
for repo in self._fallback_repositories:
1786
# Writes don't affect fallback repos
1788
self._refresh_data()
1789
return RepositoryWriteLockResult(self.unlock, None)
1791
def lock_read(self):
1792
"""Lock the repository for reads.
1794
:return: A breezy.lock.LogicalLockResult.
1796
locked = self.is_locked()
1797
if self._write_lock_count:
1798
self._write_lock_count += 1
1800
self.control_files.lock_read()
1802
if 'relock' in debug.debug_flags and self._prev_lock == 'r':
1803
note('%r was read locked again', self)
1804
self._prev_lock = 'r'
1805
self._unstacked_provider.enable_cache()
1806
for repo in self._fallback_repositories:
1808
self._refresh_data()
1809
return LogicalLockResult(self.unlock)
1811
def leave_lock_in_place(self):
1812
# not supported - raise an error
1813
raise NotImplementedError(self.leave_lock_in_place)
1815
def dont_leave_lock_in_place(self):
1816
# not supported - raise an error
1817
raise NotImplementedError(self.dont_leave_lock_in_place)
1820
def pack(self, hint=None, clean_obsolete_packs=False):
1821
"""Compress the data within the repository.
1823
This will pack all the data to a single pack. In future it may
1824
recompress deltas or do other such expensive operations.
1826
self._pack_collection.pack(hint=hint, clean_obsolete_packs=clean_obsolete_packs)
1829
def reconcile(self, other=None, thorough=False):
1830
"""Reconcile this repository."""
1831
from breezy.reconcile import PackReconciler
1832
reconciler = PackReconciler(self, thorough=thorough)
1833
reconciler.reconcile()
1836
def _reconcile_pack(self, collection, packs, extension, revs, pb):
1837
raise NotImplementedError(self._reconcile_pack)
1839
@only_raises(errors.LockNotHeld, errors.LockBroken)
1841
if self._write_lock_count == 1 and self._write_group is not None:
1842
self.abort_write_group()
1843
self._unstacked_provider.disable_cache()
1844
self._transaction = None
1845
self._write_lock_count = 0
1846
raise errors.BzrError(
1847
'Must end write group before releasing write lock on %s'
1849
if self._write_lock_count:
1850
self._write_lock_count -= 1
1851
if not self._write_lock_count:
1852
transaction = self._transaction
1853
self._transaction = None
1854
transaction.finish()
1856
self.control_files.unlock()
1858
if not self.is_locked():
1859
self._unstacked_provider.disable_cache()
1860
for repo in self._fallback_repositories:
1864
class RepositoryFormatPack(MetaDirVersionedFileRepositoryFormat):
1865
"""Format logic for pack structured repositories.
1867
This repository format has:
1868
- a list of packs in pack-names
1869
- packs in packs/NAME.pack
1870
- indices in indices/NAME.{iix,six,tix,rix}
1871
- knit deltas in the packs, knit indices mapped to the indices.
1872
- thunk objects to support the knits programming API.
1873
- a format marker of its own
1874
- an optional 'shared-storage' flag
1875
- an optional 'no-working-trees' flag
1879
# Set this attribute in derived classes to control the repository class
1880
# created by open and initialize.
1881
repository_class = None
1882
# Set this attribute in derived classes to control the
1883
# _commit_builder_class that the repository objects will have passed to
1884
# their constructor.
1885
_commit_builder_class = None
1886
# Set this attribute in derived clases to control the _serializer that the
1887
# repository objects will have passed to their constructor.
1889
# Packs are not confused by ghosts.
1890
supports_ghosts = True
1891
# External references are not supported in pack repositories yet.
1892
supports_external_lookups = False
1893
# Most pack formats do not use chk lookups.
1894
supports_chks = False
1895
# What index classes to use
1896
index_builder_class = None
1898
_fetch_uses_deltas = True
1900
supports_funky_characters = True
1901
revision_graph_can_have_wrong_parents = True
1903
def initialize(self, a_bzrdir, shared=False):
1904
"""Create a pack based repository.
1906
:param a_bzrdir: bzrdir to contain the new repository; must already
1908
:param shared: If true the repository will be initialized as a shared
1911
mutter('creating repository in %s.', a_bzrdir.transport.base)
1912
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1913
builder = self.index_builder_class()
1914
files = [('pack-names', builder.finish())]
1915
# GZ 2017-06-09: Where should format strings get decoded...
1916
utf8_files = [('format', self.get_format_string().encode('ascii'))]
1918
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1919
repository = self.open(a_bzrdir=a_bzrdir, _found=True)
1920
self._run_post_repo_init_hooks(repository, a_bzrdir, shared)
1923
def open(self, a_bzrdir, _found=False, _override_transport=None):
1924
"""See RepositoryFormat.open().
1926
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1927
repository at a slightly different url
1928
than normal. I.e. during 'upgrade'.
1931
format = RepositoryFormatMetaDir.find_format(a_bzrdir)
1932
if _override_transport is not None:
1933
repo_transport = _override_transport
1935
repo_transport = a_bzrdir.get_repository_transport(None)
1936
control_files = lockable_files.LockableFiles(repo_transport,
1937
'lock', lockdir.LockDir)
1938
return self.repository_class(_format=self,
1940
control_files=control_files,
1941
_commit_builder_class=self._commit_builder_class,
1942
_serializer=self._serializer)
1945
class RetryPackOperations(errors.RetryWithNewPacks):
1946
"""Raised when we are packing and we find a missing file.
1948
Meant as a signaling exception, to tell the RepositoryPackCollection.pack
1949
code it should try again.
1952
internal_error = True
1954
_fmt = ("Pack files have changed, reload and try pack again."
1955
" context: %(context)s %(orig_error)s")
1958
class _DirectPackAccess(object):
1959
"""Access to data in one or more packs with less translation."""
1961
def __init__(self, index_to_packs, reload_func=None, flush_func=None):
1962
"""Create a _DirectPackAccess object.
1964
:param index_to_packs: A dict mapping index objects to the transport
1965
and file names for obtaining data.
1966
:param reload_func: A function to call if we determine that the pack
1967
files have moved and we need to reload our caches. See
1968
breezy.repo_fmt.pack_repo.AggregateIndex for more details.
1970
self._container_writer = None
1971
self._write_index = None
1972
self._indices = index_to_packs
1973
self._reload_func = reload_func
1974
self._flush_func = flush_func
1976
def add_raw_records(self, key_sizes, raw_data):
1977
"""Add raw knit bytes to a storage area.
1979
The data is spooled to the container writer in one bytes-record per
1982
:param sizes: An iterable of tuples containing the key and size of each
1984
:param raw_data: A bytestring containing the data.
1985
:return: A list of memos to retrieve the record later. Each memo is an
1986
opaque index memo. For _DirectPackAccess the memo is (index, pos,
1987
length), where the index field is the write_index object supplied
1988
to the PackAccess object.
1990
if not isinstance(raw_data, str):
1991
raise AssertionError(
1992
'data must be plain bytes was %s' % type(raw_data))
1995
for key, size in key_sizes:
1996
p_offset, p_length = self._container_writer.add_bytes_record(
1997
raw_data[offset:offset+size], [])
1999
result.append((self._write_index, p_offset, p_length))
2003
"""Flush pending writes on this access object.
2005
This will flush any buffered writes to a NewPack.
2007
if self._flush_func is not None:
2010
def get_raw_records(self, memos_for_retrieval):
2011
"""Get the raw bytes for a records.
2013
:param memos_for_retrieval: An iterable containing the (index, pos,
2014
length) memo for retrieving the bytes. The Pack access method
2015
looks up the pack to use for a given record in its index_to_pack
2017
:return: An iterator over the bytes of the records.
2019
# first pass, group into same-index requests
2021
current_index = None
2022
for (index, offset, length) in memos_for_retrieval:
2023
if current_index == index:
2024
current_list.append((offset, length))
2026
if current_index is not None:
2027
request_lists.append((current_index, current_list))
2028
current_index = index
2029
current_list = [(offset, length)]
2030
# handle the last entry
2031
if current_index is not None:
2032
request_lists.append((current_index, current_list))
2033
for index, offsets in request_lists:
2035
transport, path = self._indices[index]
2037
# A KeyError here indicates that someone has triggered an index
2038
# reload, and this index has gone missing, we need to start
2040
if self._reload_func is None:
2041
# If we don't have a _reload_func there is nothing that can
2044
raise errors.RetryWithNewPacks(index,
2045
reload_occurred=True,
2046
exc_info=sys.exc_info())
2048
reader = pack.make_readv_reader(transport, path, offsets)
2049
for names, read_func in reader.iter_records():
2050
yield read_func(None)
2051
except errors.NoSuchFile:
2052
# A NoSuchFile error indicates that a pack file has gone
2053
# missing on disk, we need to trigger a reload, and start over.
2054
if self._reload_func is None:
2056
raise errors.RetryWithNewPacks(transport.abspath(path),
2057
reload_occurred=False,
2058
exc_info=sys.exc_info())
2060
def set_writer(self, writer, index, transport_packname):
2061
"""Set a writer to use for adding data."""
2062
if index is not None:
2063
self._indices[index] = transport_packname
2064
self._container_writer = writer
2065
self._write_index = index
2067
def reload_or_raise(self, retry_exc):
2068
"""Try calling the reload function, or re-raise the original exception.
2070
This should be called after _DirectPackAccess raises a
2071
RetryWithNewPacks exception. This function will handle the common logic
2072
of determining when the error is fatal versus being temporary.
2073
It will also make sure that the original exception is raised, rather
2074
than the RetryWithNewPacks exception.
2076
If this function returns, then the calling function should retry
2077
whatever operation was being performed. Otherwise an exception will
2080
:param retry_exc: A RetryWithNewPacks exception.
2083
if self._reload_func is None:
2085
elif not self._reload_func():
2086
# The reload claimed that nothing changed
2087
if not retry_exc.reload_occurred:
2088
# If there wasn't an earlier reload, then we really were
2089
# expecting to find changes. We didn't find them, so this is a
2093
# GZ 2017-03-27: No real reason this needs the original traceback.
2094
reraise(*retry_exc.exc_info)