1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
19
from itertools import izip
33
from bzrlib.index import (
37
GraphIndexPrefixAdapter,
40
from bzrlib.knit import (
46
from bzrlib import tsort
56
from bzrlib.decorators import needs_write_lock
57
from bzrlib.btree_index import (
61
from bzrlib.index import (
65
from bzrlib.repofmt.knitrepo import KnitRepository
66
from bzrlib.repository import (
68
MetaDirRepositoryFormat,
72
import bzrlib.revision as _mod_revision
73
from bzrlib.trace import (
79
class PackCommitBuilder(CommitBuilder):
80
"""A subclass of CommitBuilder to add texts with pack semantics.
82
Specifically this uses one knit object rather than one knit object per
83
added text, reducing memory and object pressure.
86
def __init__(self, repository, parents, config, timestamp=None,
87
timezone=None, committer=None, revprops=None,
89
CommitBuilder.__init__(self, repository, parents, config,
90
timestamp=timestamp, timezone=timezone, committer=committer,
91
revprops=revprops, revision_id=revision_id)
92
self._file_graph = graph.Graph(
93
repository._pack_collection.text_index.combined_index)
95
def _heads(self, file_id, revision_ids):
96
keys = [(file_id, revision_id) for revision_id in revision_ids]
97
return set([key[1] for key in self._file_graph.heads(keys)])
100
class PackRootCommitBuilder(RootCommitBuilder):
101
"""A subclass of RootCommitBuilder to add texts with pack semantics.
103
Specifically this uses one knit object rather than one knit object per
104
added text, reducing memory and object pressure.
107
def __init__(self, repository, parents, config, timestamp=None,
108
timezone=None, committer=None, revprops=None,
110
CommitBuilder.__init__(self, repository, parents, config,
111
timestamp=timestamp, timezone=timezone, committer=committer,
112
revprops=revprops, revision_id=revision_id)
113
self._file_graph = graph.Graph(
114
repository._pack_collection.text_index.combined_index)
116
def _heads(self, file_id, revision_ids):
117
keys = [(file_id, revision_id) for revision_id in revision_ids]
118
return set([key[1] for key in self._file_graph.heads(keys)])
122
"""An in memory proxy for a pack and its indices.
124
This is a base class that is not directly used, instead the classes
125
ExistingPack and NewPack are used.
128
def __init__(self, revision_index, inventory_index, text_index,
130
"""Create a pack instance.
132
:param revision_index: A GraphIndex for determining what revisions are
133
present in the Pack and accessing the locations of their texts.
134
:param inventory_index: A GraphIndex for determining what inventories are
135
present in the Pack and accessing the locations of their
137
:param text_index: A GraphIndex for determining what file texts
138
are present in the pack and accessing the locations of their
139
texts/deltas (via (fileid, revisionid) tuples).
140
:param signature_index: A GraphIndex for determining what signatures are
141
present in the Pack and accessing the locations of their texts.
143
self.revision_index = revision_index
144
self.inventory_index = inventory_index
145
self.text_index = text_index
146
self.signature_index = signature_index
148
def access_tuple(self):
149
"""Return a tuple (transport, name) for the pack content."""
150
return self.pack_transport, self.file_name()
153
"""Get the file name for the pack on disk."""
154
return self.name + '.pack'
156
def get_revision_count(self):
157
return self.revision_index.key_count()
159
def inventory_index_name(self, name):
160
"""The inv index is the name + .iix."""
161
return self.index_name('inventory', name)
163
def revision_index_name(self, name):
164
"""The revision index is the name + .rix."""
165
return self.index_name('revision', name)
167
def signature_index_name(self, name):
168
"""The signature index is the name + .six."""
169
return self.index_name('signature', name)
171
def text_index_name(self, name):
172
"""The text index is the name + .tix."""
173
return self.index_name('text', name)
175
def _external_compression_parents_of_texts(self):
178
for node in self.text_index.iter_all_entries():
180
refs.update(node[3][1])
184
class ExistingPack(Pack):
185
"""An in memory proxy for an existing .pack and its disk indices."""
187
def __init__(self, pack_transport, name, revision_index, inventory_index,
188
text_index, signature_index):
189
"""Create an ExistingPack object.
191
:param pack_transport: The transport where the pack file resides.
192
:param name: The name of the pack on disk in the pack_transport.
194
Pack.__init__(self, revision_index, inventory_index, text_index,
197
self.pack_transport = pack_transport
198
if None in (revision_index, inventory_index, text_index,
199
signature_index, name, pack_transport):
200
raise AssertionError()
202
def __eq__(self, other):
203
return self.__dict__ == other.__dict__
205
def __ne__(self, other):
206
return not self.__eq__(other)
209
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
210
id(self), self.pack_transport, self.name)
214
"""An in memory proxy for a pack which is being created."""
216
# A map of index 'type' to the file extension and position in the
218
index_definitions = {
219
'revision': ('.rix', 0),
220
'inventory': ('.iix', 1),
222
'signature': ('.six', 3),
225
def __init__(self, upload_transport, index_transport, pack_transport,
226
upload_suffix='', file_mode=None, index_builder_class=None,
228
"""Create a NewPack instance.
230
:param upload_transport: A writable transport for the pack to be
231
incrementally uploaded to.
232
:param index_transport: A writable transport for the pack's indices to
233
be written to when the pack is finished.
234
:param pack_transport: A writable transport for the pack to be renamed
235
to when the upload is complete. This *must* be the same as
236
upload_transport.clone('../packs').
237
:param upload_suffix: An optional suffix to be given to any temporary
238
files created during the pack creation. e.g '.autopack'
239
:param file_mode: An optional file mode to create the new files with.
240
:param index_builder_class: Required keyword parameter - the class of
241
index builder to use.
242
:param index_class: Required keyword parameter - the class of index
245
# The relative locations of the packs are constrained, but all are
246
# passed in because the caller has them, so as to avoid object churn.
248
# Revisions: parents list, no text compression.
249
index_builder_class(reference_lists=1),
250
# Inventory: We want to map compression only, but currently the
251
# knit code hasn't been updated enough to understand that, so we
252
# have a regular 2-list index giving parents and compression
254
index_builder_class(reference_lists=2),
255
# Texts: compression and per file graph, for all fileids - so two
256
# reference lists and two elements in the key tuple.
257
index_builder_class(reference_lists=2, key_elements=2),
258
# Signatures: Just blobs to store, no compression, no parents
260
index_builder_class(reference_lists=0),
262
# When we make readonly indices, we need this.
263
self.index_class = index_class
264
# where should the new pack be opened
265
self.upload_transport = upload_transport
266
# where are indices written out to
267
self.index_transport = index_transport
268
# where is the pack renamed to when it is finished?
269
self.pack_transport = pack_transport
270
# What file mode to upload the pack and indices with.
271
self._file_mode = file_mode
272
# tracks the content written to the .pack file.
273
self._hash = osutils.md5()
274
# a four-tuple with the length in bytes of the indices, once the pack
275
# is finalised. (rev, inv, text, sigs)
276
self.index_sizes = None
277
# How much data to cache when writing packs. Note that this is not
278
# synchronised with reads, because it's not in the transport layer, so
279
# is not safe unless the client knows it won't be reading from the pack
281
self._cache_limit = 0
282
# the temporary pack file name.
283
self.random_name = osutils.rand_chars(20) + upload_suffix
284
# when was this pack started ?
285
self.start_time = time.time()
286
# open an output stream for the data added to the pack.
287
self.write_stream = self.upload_transport.open_write_stream(
288
self.random_name, mode=self._file_mode)
289
if 'pack' in debug.debug_flags:
290
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
291
time.ctime(), self.upload_transport.base, self.random_name,
292
time.time() - self.start_time)
293
# A list of byte sequences to be written to the new pack, and the
294
# aggregate size of them. Stored as a list rather than separate
295
# variables so that the _write_data closure below can update them.
296
self._buffer = [[], 0]
297
# create a callable for adding data
299
# robertc says- this is a closure rather than a method on the object
300
# so that the variables are locals, and faster than accessing object
302
def _write_data(bytes, flush=False, _buffer=self._buffer,
303
_write=self.write_stream.write, _update=self._hash.update):
304
_buffer[0].append(bytes)
305
_buffer[1] += len(bytes)
307
if _buffer[1] > self._cache_limit or flush:
308
bytes = ''.join(_buffer[0])
312
# expose this on self, for the occasion when clients want to add data.
313
self._write_data = _write_data
314
# a pack writer object to serialise pack records.
315
self._writer = pack.ContainerWriter(self._write_data)
317
# what state is the pack in? (open, finished, aborted)
321
"""Cancel creating this pack."""
322
self._state = 'aborted'
323
self.write_stream.close()
324
# Remove the temporary pack file.
325
self.upload_transport.delete(self.random_name)
326
# The indices have no state on disk.
328
def access_tuple(self):
329
"""Return a tuple (transport, name) for the pack content."""
330
if self._state == 'finished':
331
return Pack.access_tuple(self)
332
elif self._state == 'open':
333
return self.upload_transport, self.random_name
335
raise AssertionError(self._state)
337
def data_inserted(self):
338
"""True if data has been added to this pack."""
339
return bool(self.get_revision_count() or
340
self.inventory_index.key_count() or
341
self.text_index.key_count() or
342
self.signature_index.key_count())
345
"""Finish the new pack.
348
- finalises the content
349
- assigns a name (the md5 of the content, currently)
350
- writes out the associated indices
351
- renames the pack into place.
352
- stores the index size tuple for the pack in the index_sizes
357
self._write_data('', flush=True)
358
self.name = self._hash.hexdigest()
360
# XXX: It'd be better to write them all to temporary names, then
361
# rename them all into place, so that the window when only some are
362
# visible is smaller. On the other hand none will be seen until
363
# they're in the names list.
364
self.index_sizes = [None, None, None, None]
365
self._write_index('revision', self.revision_index, 'revision')
366
self._write_index('inventory', self.inventory_index, 'inventory')
367
self._write_index('text', self.text_index, 'file texts')
368
self._write_index('signature', self.signature_index,
369
'revision signatures')
370
self.write_stream.close()
371
# Note that this will clobber an existing pack with the same name,
372
# without checking for hash collisions. While this is undesirable this
373
# is something that can be rectified in a subsequent release. One way
374
# to rectify it may be to leave the pack at the original name, writing
375
# its pack-names entry as something like 'HASH: index-sizes
376
# temporary-name'. Allocate that and check for collisions, if it is
377
# collision free then rename it into place. If clients know this scheme
378
# they can handle missing-file errors by:
379
# - try for HASH.pack
380
# - try for temporary-name
381
# - refresh the pack-list to see if the pack is now absent
382
self.upload_transport.rename(self.random_name,
383
'../packs/' + self.name + '.pack')
384
self._state = 'finished'
385
if 'pack' in debug.debug_flags:
386
# XXX: size might be interesting?
387
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
388
time.ctime(), self.upload_transport.base, self.random_name,
389
self.pack_transport, self.name,
390
time.time() - self.start_time)
393
"""Flush any current data."""
395
bytes = ''.join(self._buffer[0])
396
self.write_stream.write(bytes)
397
self._hash.update(bytes)
398
self._buffer[:] = [[], 0]
400
def index_name(self, index_type, name):
401
"""Get the disk name of an index type for pack name 'name'."""
402
return name + NewPack.index_definitions[index_type][0]
404
def index_offset(self, index_type):
405
"""Get the position in a index_size array for a given index type."""
406
return NewPack.index_definitions[index_type][1]
408
def _replace_index_with_readonly(self, index_type):
409
setattr(self, index_type + '_index',
410
self.index_class(self.index_transport,
411
self.index_name(index_type, self.name),
412
self.index_sizes[self.index_offset(index_type)]))
414
def set_write_cache_size(self, size):
415
self._cache_limit = size
417
def _write_index(self, index_type, index, label):
418
"""Write out an index.
420
:param index_type: The type of index to write - e.g. 'revision'.
421
:param index: The index object to serialise.
422
:param label: What label to give the index e.g. 'revision'.
424
index_name = self.index_name(index_type, self.name)
425
self.index_sizes[self.index_offset(index_type)] = \
426
self.index_transport.put_file(index_name, index.finish(),
427
mode=self._file_mode)
428
if 'pack' in debug.debug_flags:
429
# XXX: size might be interesting?
430
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
431
time.ctime(), label, self.upload_transport.base,
432
self.random_name, time.time() - self.start_time)
433
# Replace the writable index on this object with a readonly,
434
# presently unloaded index. We should alter
435
# the index layer to make its finish() error if add_node is
436
# subsequently used. RBC
437
self._replace_index_with_readonly(index_type)
440
class AggregateIndex(object):
441
"""An aggregated index for the RepositoryPackCollection.
443
AggregateIndex is reponsible for managing the PackAccess object,
444
Index-To-Pack mapping, and all indices list for a specific type of index
445
such as 'revision index'.
447
A CombinedIndex provides an index on a single key space built up
448
from several on-disk indices. The AggregateIndex builds on this
449
to provide a knit access layer, and allows having up to one writable
450
index within the collection.
452
# XXX: Probably 'can be written to' could/should be separated from 'acts
453
# like a knit index' -- mbp 20071024
456
"""Create an AggregateIndex."""
457
self.index_to_pack = {}
458
self.combined_index = CombinedGraphIndex([])
459
self.data_access = _DirectPackAccess(self.index_to_pack)
460
self.add_callback = None
462
def replace_indices(self, index_to_pack, indices):
463
"""Replace the current mappings with fresh ones.
465
This should probably not be used eventually, rather incremental add and
466
removal of indices. It has been added during refactoring of existing
469
:param index_to_pack: A mapping from index objects to
470
(transport, name) tuples for the pack file data.
471
:param indices: A list of indices.
473
# refresh the revision pack map dict without replacing the instance.
474
self.index_to_pack.clear()
475
self.index_to_pack.update(index_to_pack)
476
# XXX: API break - clearly a 'replace' method would be good?
477
self.combined_index._indices[:] = indices
478
# the current add nodes callback for the current writable index if
480
self.add_callback = None
482
def add_index(self, index, pack):
483
"""Add index to the aggregate, which is an index for Pack pack.
485
Future searches on the aggregate index will seach this new index
486
before all previously inserted indices.
488
:param index: An Index for the pack.
489
:param pack: A Pack instance.
491
# expose it to the index map
492
self.index_to_pack[index] = pack.access_tuple()
493
# put it at the front of the linear index list
494
self.combined_index.insert_index(0, index)
496
def add_writable_index(self, index, pack):
497
"""Add an index which is able to have data added to it.
499
There can be at most one writable index at any time. Any
500
modifications made to the knit are put into this index.
502
:param index: An index from the pack parameter.
503
:param pack: A Pack instance.
505
if self.add_callback is not None:
506
raise AssertionError(
507
"%s already has a writable index through %s" % \
508
(self, self.add_callback))
509
# allow writing: queue writes to a new index
510
self.add_index(index, pack)
511
# Updates the index to packs mapping as a side effect,
512
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
513
self.add_callback = index.add_nodes
516
"""Reset all the aggregate data to nothing."""
517
self.data_access.set_writer(None, None, (None, None))
518
self.index_to_pack.clear()
519
del self.combined_index._indices[:]
520
self.add_callback = None
522
def remove_index(self, index, pack):
523
"""Remove index from the indices used to answer queries.
525
:param index: An index from the pack parameter.
526
:param pack: A Pack instance.
528
del self.index_to_pack[index]
529
self.combined_index._indices.remove(index)
530
if (self.add_callback is not None and
531
getattr(index, 'add_nodes', None) == self.add_callback):
532
self.add_callback = None
533
self.data_access.set_writer(None, None, (None, None))
536
class Packer(object):
537
"""Create a pack from packs."""
539
def __init__(self, pack_collection, packs, suffix, revision_ids=None):
542
:param pack_collection: A RepositoryPackCollection object where the
543
new pack is being written to.
544
:param packs: The packs to combine.
545
:param suffix: The suffix to use on the temporary files for the pack.
546
:param revision_ids: Revision ids to limit the pack to.
550
self.revision_ids = revision_ids
551
# The pack object we are creating.
553
self._pack_collection = pack_collection
554
# The index layer keys for the revisions being copied. None for 'all
556
self._revision_keys = None
557
# What text keys to copy. None for 'all texts'. This is set by
558
# _copy_inventory_texts
559
self._text_filter = None
562
def _extra_init(self):
563
"""A template hook to allow extending the constructor trivially."""
565
def pack(self, pb=None):
566
"""Create a new pack by reading data from other packs.
568
This does little more than a bulk copy of data. One key difference
569
is that data with the same item key across multiple packs is elided
570
from the output. The new pack is written into the current pack store
571
along with its indices, and the name added to the pack names. The
572
source packs are not altered and are not required to be in the current
575
:param pb: An optional progress bar to use. A nested bar is created if
577
:return: A Pack object, or None if nothing was copied.
579
# open a pack - using the same name as the last temporary file
580
# - which has already been flushed, so its safe.
581
# XXX: - duplicate code warning with start_write_group; fix before
582
# considering 'done'.
583
if self._pack_collection._new_pack is not None:
584
raise errors.BzrError('call to create_pack_from_packs while '
585
'another pack is being written.')
586
if self.revision_ids is not None:
587
if len(self.revision_ids) == 0:
588
# silly fetch request.
591
self.revision_ids = frozenset(self.revision_ids)
592
self.revision_keys = frozenset((revid,) for revid in
595
self.pb = ui.ui_factory.nested_progress_bar()
599
return self._create_pack_from_packs()
605
"""Open a pack for the pack we are creating."""
606
return NewPack(self._pack_collection._upload_transport,
607
self._pack_collection._index_transport,
608
self._pack_collection._pack_transport, upload_suffix=self.suffix,
609
file_mode=self._pack_collection.repo.bzrdir._get_file_mode(),
610
index_builder_class=self._pack_collection._index_builder_class,
611
index_class=self._pack_collection._index_class)
613
def _copy_revision_texts(self):
614
"""Copy revision data to the new pack."""
616
if self.revision_ids:
617
revision_keys = [(revision_id,) for revision_id in self.revision_ids]
620
# select revision keys
621
revision_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
622
self.packs, 'revision_index')[0]
623
revision_nodes = self._pack_collection._index_contents(revision_index_map, revision_keys)
624
# copy revision keys and adjust values
625
self.pb.update("Copying revision texts", 1)
626
total_items, readv_group_iter = self._revision_node_readv(revision_nodes)
627
list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer,
628
self.new_pack.revision_index, readv_group_iter, total_items))
629
if 'pack' in debug.debug_flags:
630
mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs',
631
time.ctime(), self._pack_collection._upload_transport.base,
632
self.new_pack.random_name,
633
self.new_pack.revision_index.key_count(),
634
time.time() - self.new_pack.start_time)
635
self._revision_keys = revision_keys
637
def _copy_inventory_texts(self):
638
"""Copy the inventory texts to the new pack.
640
self._revision_keys is used to determine what inventories to copy.
642
Sets self._text_filter appropriately.
644
# select inventory keys
645
inv_keys = self._revision_keys # currently the same keyspace, and note that
646
# querying for keys here could introduce a bug where an inventory item
647
# is missed, so do not change it to query separately without cross
648
# checking like the text key check below.
649
inventory_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
650
self.packs, 'inventory_index')[0]
651
inv_nodes = self._pack_collection._index_contents(inventory_index_map, inv_keys)
652
# copy inventory keys and adjust values
653
# XXX: Should be a helper function to allow different inv representation
655
self.pb.update("Copying inventory texts", 2)
656
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
657
# Only grab the output lines if we will be processing them
658
output_lines = bool(self.revision_ids)
659
inv_lines = self._copy_nodes_graph(inventory_index_map,
660
self.new_pack._writer, self.new_pack.inventory_index,
661
readv_group_iter, total_items, output_lines=output_lines)
662
if self.revision_ids:
663
self._process_inventory_lines(inv_lines)
665
# eat the iterator to cause it to execute.
667
self._text_filter = None
668
if 'pack' in debug.debug_flags:
669
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
670
time.ctime(), self._pack_collection._upload_transport.base,
671
self.new_pack.random_name,
672
self.new_pack.inventory_index.key_count(),
673
time.time() - self.new_pack.start_time)
675
def _copy_text_texts(self):
677
text_index_map, text_nodes = self._get_text_nodes()
678
if self._text_filter is not None:
679
# We could return the keys copied as part of the return value from
680
# _copy_nodes_graph but this doesn't work all that well with the
681
# need to get line output too, so we check separately, and as we're
682
# going to buffer everything anyway, we check beforehand, which
683
# saves reading knit data over the wire when we know there are
685
text_nodes = set(text_nodes)
686
present_text_keys = set(_node[1] for _node in text_nodes)
687
missing_text_keys = set(self._text_filter) - present_text_keys
688
if missing_text_keys:
689
# TODO: raise a specific error that can handle many missing
691
a_missing_key = missing_text_keys.pop()
692
raise errors.RevisionNotPresent(a_missing_key[1],
694
# copy text keys and adjust values
695
self.pb.update("Copying content texts", 3)
696
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
697
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
698
self.new_pack.text_index, readv_group_iter, total_items))
699
self._log_copied_texts()
701
def _check_references(self):
702
"""Make sure our external refereneces are present."""
703
external_refs = self.new_pack._external_compression_parents_of_texts()
705
index = self._pack_collection.text_index.combined_index
706
found_items = list(index.iter_entries(external_refs))
707
if len(found_items) != len(external_refs):
708
found_keys = set(k for idx, k, refs, value in found_items)
709
missing_items = external_refs - found_keys
710
missing_file_id, missing_revision_id = missing_items.pop()
711
raise errors.RevisionNotPresent(missing_revision_id,
714
def _create_pack_from_packs(self):
715
self.pb.update("Opening pack", 0, 5)
716
self.new_pack = self.open_pack()
717
new_pack = self.new_pack
718
# buffer data - we won't be reading-back during the pack creation and
719
# this makes a significant difference on sftp pushes.
720
new_pack.set_write_cache_size(1024*1024)
721
if 'pack' in debug.debug_flags:
722
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
723
for a_pack in self.packs]
724
if self.revision_ids is not None:
725
rev_count = len(self.revision_ids)
728
mutter('%s: create_pack: creating pack from source packs: '
729
'%s%s %s revisions wanted %s t=0',
730
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
731
plain_pack_list, rev_count)
732
self._copy_revision_texts()
733
self._copy_inventory_texts()
734
self._copy_text_texts()
735
# select signature keys
736
signature_filter = self._revision_keys # same keyspace
737
signature_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
738
self.packs, 'signature_index')[0]
739
signature_nodes = self._pack_collection._index_contents(signature_index_map,
741
# copy signature keys and adjust values
742
self.pb.update("Copying signature texts", 4)
743
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
744
new_pack.signature_index)
745
if 'pack' in debug.debug_flags:
746
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
747
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
748
new_pack.signature_index.key_count(),
749
time.time() - new_pack.start_time)
750
self._check_references()
751
if not self._use_pack(new_pack):
754
self.pb.update("Finishing pack", 5)
756
self._pack_collection.allocate(new_pack)
759
def _copy_nodes(self, nodes, index_map, writer, write_index):
760
"""Copy knit nodes between packs with no graph references."""
761
pb = ui.ui_factory.nested_progress_bar()
763
return self._do_copy_nodes(nodes, index_map, writer,
768
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
769
# for record verification
770
knit = KnitVersionedFiles(None, None)
771
# plan a readv on each source pack:
773
nodes = sorted(nodes)
774
# how to map this into knit.py - or knit.py into this?
775
# we don't want the typical knit logic, we want grouping by pack
776
# at this point - perhaps a helper library for the following code
777
# duplication points?
779
for index, key, value in nodes:
780
if index not in request_groups:
781
request_groups[index] = []
782
request_groups[index].append((key, value))
784
pb.update("Copied record", record_index, len(nodes))
785
for index, items in request_groups.iteritems():
786
pack_readv_requests = []
787
for key, value in items:
788
# ---- KnitGraphIndex.get_position
789
bits = value[1:].split(' ')
790
offset, length = int(bits[0]), int(bits[1])
791
pack_readv_requests.append((offset, length, (key, value[0])))
792
# linear scan up the pack
793
pack_readv_requests.sort()
795
transport, path = index_map[index]
796
reader = pack.make_readv_reader(transport, path,
797
[offset[0:2] for offset in pack_readv_requests])
798
for (names, read_func), (_1, _2, (key, eol_flag)) in \
799
izip(reader.iter_records(), pack_readv_requests):
800
raw_data = read_func(None)
801
# check the header only
802
df, _ = knit._parse_record_header(key, raw_data)
804
pos, size = writer.add_bytes_record(raw_data, names)
805
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
806
pb.update("Copied record", record_index)
809
def _copy_nodes_graph(self, index_map, writer, write_index,
810
readv_group_iter, total_items, output_lines=False):
811
"""Copy knit nodes between packs.
813
:param output_lines: Return lines present in the copied data as
814
an iterator of line,version_id.
816
pb = ui.ui_factory.nested_progress_bar()
818
for result in self._do_copy_nodes_graph(index_map, writer,
819
write_index, output_lines, pb, readv_group_iter, total_items):
822
# Python 2.4 does not permit try:finally: in a generator.
828
def _do_copy_nodes_graph(self, index_map, writer, write_index,
829
output_lines, pb, readv_group_iter, total_items):
830
# for record verification
831
knit = KnitVersionedFiles(None, None)
832
# for line extraction when requested (inventories only)
834
factory = KnitPlainFactory()
836
pb.update("Copied record", record_index, total_items)
837
for index, readv_vector, node_vector in readv_group_iter:
839
transport, path = index_map[index]
840
reader = pack.make_readv_reader(transport, path, readv_vector)
841
for (names, read_func), (key, eol_flag, references) in \
842
izip(reader.iter_records(), node_vector):
843
raw_data = read_func(None)
845
# read the entire thing
846
content, _ = knit._parse_record(key[-1], raw_data)
847
if len(references[-1]) == 0:
848
line_iterator = factory.get_fulltext_content(content)
850
line_iterator = factory.get_linedelta_content(content)
851
for line in line_iterator:
854
# check the header only
855
df, _ = knit._parse_record_header(key, raw_data)
857
pos, size = writer.add_bytes_record(raw_data, names)
858
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
859
pb.update("Copied record", record_index)
862
def _get_text_nodes(self):
863
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
864
self.packs, 'text_index')[0]
865
return text_index_map, self._pack_collection._index_contents(text_index_map,
868
def _least_readv_node_readv(self, nodes):
869
"""Generate request groups for nodes using the least readv's.
871
:param nodes: An iterable of graph index nodes.
872
:return: Total node count and an iterator of the data needed to perform
873
readvs to obtain the data for nodes. Each item yielded by the
874
iterator is a tuple with:
875
index, readv_vector, node_vector. readv_vector is a list ready to
876
hand to the transport readv method, and node_vector is a list of
877
(key, eol_flag, references) for the the node retrieved by the
878
matching readv_vector.
880
# group by pack so we do one readv per pack
881
nodes = sorted(nodes)
884
for index, key, value, references in nodes:
885
if index not in request_groups:
886
request_groups[index] = []
887
request_groups[index].append((key, value, references))
889
for index, items in request_groups.iteritems():
890
pack_readv_requests = []
891
for key, value, references in items:
892
# ---- KnitGraphIndex.get_position
893
bits = value[1:].split(' ')
894
offset, length = int(bits[0]), int(bits[1])
895
pack_readv_requests.append(
896
((offset, length), (key, value[0], references)))
897
# linear scan up the pack to maximum range combining.
898
pack_readv_requests.sort()
899
# split out the readv and the node data.
900
pack_readv = [readv for readv, node in pack_readv_requests]
901
node_vector = [node for readv, node in pack_readv_requests]
902
result.append((index, pack_readv, node_vector))
905
def _log_copied_texts(self):
906
if 'pack' in debug.debug_flags:
907
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
908
time.ctime(), self._pack_collection._upload_transport.base,
909
self.new_pack.random_name,
910
self.new_pack.text_index.key_count(),
911
time.time() - self.new_pack.start_time)
913
def _process_inventory_lines(self, inv_lines):
914
"""Use up the inv_lines generator and setup a text key filter."""
915
repo = self._pack_collection.repo
916
fileid_revisions = repo._find_file_ids_from_xml_inventory_lines(
917
inv_lines, self.revision_keys)
919
for fileid, file_revids in fileid_revisions.iteritems():
920
text_filter.extend([(fileid, file_revid) for file_revid in file_revids])
921
self._text_filter = text_filter
923
def _revision_node_readv(self, revision_nodes):
924
"""Return the total revisions and the readv's to issue.
926
:param revision_nodes: The revision index contents for the packs being
927
incorporated into the new pack.
928
:return: As per _least_readv_node_readv.
930
return self._least_readv_node_readv(revision_nodes)
932
def _use_pack(self, new_pack):
933
"""Return True if new_pack should be used.
935
:param new_pack: The pack that has just been created.
936
:return: True if the pack should be used.
938
return new_pack.data_inserted()
941
class OptimisingPacker(Packer):
942
"""A packer which spends more time to create better disk layouts."""
944
def _revision_node_readv(self, revision_nodes):
945
"""Return the total revisions and the readv's to issue.
947
This sort places revisions in topological order with the ancestors
950
:param revision_nodes: The revision index contents for the packs being
951
incorporated into the new pack.
952
:return: As per _least_readv_node_readv.
954
# build an ancestors dict
957
for index, key, value, references in revision_nodes:
958
ancestors[key] = references[0]
959
by_key[key] = (index, value, references)
960
order = tsort.topo_sort(ancestors)
962
# Single IO is pathological, but it will work as a starting point.
964
for key in reversed(order):
965
index, value, references = by_key[key]
966
# ---- KnitGraphIndex.get_position
967
bits = value[1:].split(' ')
968
offset, length = int(bits[0]), int(bits[1])
970
(index, [(offset, length)], [(key, value[0], references)]))
971
# TODO: combine requests in the same index that are in ascending order.
972
return total, requests
975
"""Open a pack for the pack we are creating."""
976
new_pack = super(OptimisingPacker, self).open_pack()
977
# Turn on the optimization flags for all the index builders.
978
new_pack.revision_index.set_optimize(for_size=True)
979
new_pack.inventory_index.set_optimize(for_size=True)
980
new_pack.text_index.set_optimize(for_size=True)
981
new_pack.signature_index.set_optimize(for_size=True)
985
class ReconcilePacker(Packer):
986
"""A packer which regenerates indices etc as it copies.
988
This is used by ``bzr reconcile`` to cause parent text pointers to be
992
def _extra_init(self):
993
self._data_changed = False
995
def _process_inventory_lines(self, inv_lines):
996
"""Generate a text key reference map rather for reconciling with."""
997
repo = self._pack_collection.repo
998
refs = repo._find_text_key_references_from_xml_inventory_lines(
1000
self._text_refs = refs
1001
# during reconcile we:
1002
# - convert unreferenced texts to full texts
1003
# - correct texts which reference a text not copied to be full texts
1004
# - copy all others as-is but with corrected parents.
1005
# - so at this point we don't know enough to decide what becomes a full
1007
self._text_filter = None
1009
def _copy_text_texts(self):
1010
"""generate what texts we should have and then copy."""
1011
self.pb.update("Copying content texts", 3)
1012
# we have three major tasks here:
1013
# 1) generate the ideal index
1014
repo = self._pack_collection.repo
1015
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
1016
_1, key, _2, refs in
1017
self.new_pack.revision_index.iter_all_entries()])
1018
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
1019
# 2) generate a text_nodes list that contains all the deltas that can
1020
# be used as-is, with corrected parents.
1023
discarded_nodes = []
1024
NULL_REVISION = _mod_revision.NULL_REVISION
1025
text_index_map, text_nodes = self._get_text_nodes()
1026
for node in text_nodes:
1032
ideal_parents = tuple(ideal_index[node[1]])
1034
discarded_nodes.append(node)
1035
self._data_changed = True
1037
if ideal_parents == (NULL_REVISION,):
1039
if ideal_parents == node[3][0]:
1041
ok_nodes.append(node)
1042
elif ideal_parents[0:1] == node[3][0][0:1]:
1043
# the left most parent is the same, or there are no parents
1044
# today. Either way, we can preserve the representation as
1045
# long as we change the refs to be inserted.
1046
self._data_changed = True
1047
ok_nodes.append((node[0], node[1], node[2],
1048
(ideal_parents, node[3][1])))
1049
self._data_changed = True
1051
# Reinsert this text completely
1052
bad_texts.append((node[1], ideal_parents))
1053
self._data_changed = True
1054
# we're finished with some data.
1057
# 3) bulk copy the ok data
1058
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1059
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1060
self.new_pack.text_index, readv_group_iter, total_items))
1061
# 4) adhoc copy all the other texts.
1062
# We have to topologically insert all texts otherwise we can fail to
1063
# reconcile when parts of a single delta chain are preserved intact,
1064
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1065
# reinserted, and if d3 has incorrect parents it will also be
1066
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1067
# copied), so we will try to delta, but d2 is not currently able to be
1068
# extracted because it's basis d1 is not present. Topologically sorting
1069
# addresses this. The following generates a sort for all the texts that
1070
# are being inserted without having to reference the entire text key
1071
# space (we only topo sort the revisions, which is smaller).
1072
topo_order = tsort.topo_sort(ancestors)
1073
rev_order = dict(zip(topo_order, range(len(topo_order))))
1074
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1075
transaction = repo.get_transaction()
1076
file_id_index = GraphIndexPrefixAdapter(
1077
self.new_pack.text_index,
1079
add_nodes_callback=self.new_pack.text_index.add_nodes)
1080
data_access = _DirectPackAccess(
1081
{self.new_pack.text_index:self.new_pack.access_tuple()})
1082
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1083
self.new_pack.access_tuple())
1084
output_texts = KnitVersionedFiles(
1085
_KnitGraphIndex(self.new_pack.text_index,
1086
add_callback=self.new_pack.text_index.add_nodes,
1087
deltas=True, parents=True, is_locked=repo.is_locked),
1088
data_access=data_access, max_delta_chain=200)
1089
for key, parent_keys in bad_texts:
1090
# We refer to the new pack to delta data being output.
1091
# A possible improvement would be to catch errors on short reads
1092
# and only flush then.
1093
self.new_pack.flush()
1095
for parent_key in parent_keys:
1096
if parent_key[0] != key[0]:
1097
# Graph parents must match the fileid
1098
raise errors.BzrError('Mismatched key parent %r:%r' %
1100
parents.append(parent_key[1])
1101
text_lines = osutils.split_lines(repo.texts.get_record_stream(
1102
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1103
output_texts.add_lines(key, parent_keys, text_lines,
1104
random_id=True, check_content=False)
1105
# 5) check that nothing inserted has a reference outside the keyspace.
1106
missing_text_keys = self.new_pack._external_compression_parents_of_texts()
1107
if missing_text_keys:
1108
raise errors.BzrError('Reference to missing compression parents %r'
1109
% (missing_text_keys,))
1110
self._log_copied_texts()
1112
def _use_pack(self, new_pack):
1113
"""Override _use_pack to check for reconcile having changed content."""
1114
# XXX: we might be better checking this at the copy time.
1115
original_inventory_keys = set()
1116
inv_index = self._pack_collection.inventory_index.combined_index
1117
for entry in inv_index.iter_all_entries():
1118
original_inventory_keys.add(entry[1])
1119
new_inventory_keys = set()
1120
for entry in new_pack.inventory_index.iter_all_entries():
1121
new_inventory_keys.add(entry[1])
1122
if new_inventory_keys != original_inventory_keys:
1123
self._data_changed = True
1124
return new_pack.data_inserted() and self._data_changed
1127
class RepositoryPackCollection(object):
1128
"""Management of packs within a repository.
1130
:ivar _names: map of {pack_name: (index_size,)}
1133
def __init__(self, repo, transport, index_transport, upload_transport,
1134
pack_transport, index_builder_class, index_class):
1135
"""Create a new RepositoryPackCollection.
1137
:param transport: Addresses the repository base directory
1138
(typically .bzr/repository/).
1139
:param index_transport: Addresses the directory containing indices.
1140
:param upload_transport: Addresses the directory into which packs are written
1141
while they're being created.
1142
:param pack_transport: Addresses the directory of existing complete packs.
1143
:param index_builder_class: The index builder class to use.
1144
:param index_class: The index class to use.
1147
self.transport = transport
1148
self._index_transport = index_transport
1149
self._upload_transport = upload_transport
1150
self._pack_transport = pack_transport
1151
self._index_builder_class = index_builder_class
1152
self._index_class = index_class
1153
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1156
self._packs_by_name = {}
1157
# the previous pack-names content
1158
self._packs_at_load = None
1159
# when a pack is being created by this object, the state of that pack.
1160
self._new_pack = None
1161
# aggregated revision index data
1162
self.revision_index = AggregateIndex()
1163
self.inventory_index = AggregateIndex()
1164
self.text_index = AggregateIndex()
1165
self.signature_index = AggregateIndex()
1167
def add_pack_to_memory(self, pack):
1168
"""Make a Pack object available to the repository to satisfy queries.
1170
:param pack: A Pack object.
1172
if pack.name in self._packs_by_name:
1173
raise AssertionError()
1174
self.packs.append(pack)
1175
self._packs_by_name[pack.name] = pack
1176
self.revision_index.add_index(pack.revision_index, pack)
1177
self.inventory_index.add_index(pack.inventory_index, pack)
1178
self.text_index.add_index(pack.text_index, pack)
1179
self.signature_index.add_index(pack.signature_index, pack)
1181
def all_packs(self):
1182
"""Return a list of all the Pack objects this repository has.
1184
Note that an in-progress pack being created is not returned.
1186
:return: A list of Pack objects for all the packs in the repository.
1189
for name in self.names():
1190
result.append(self.get_pack_by_name(name))
1194
"""Pack the pack collection incrementally.
1196
This will not attempt global reorganisation or recompression,
1197
rather it will just ensure that the total number of packs does
1198
not grow without bound. It uses the _max_pack_count method to
1199
determine if autopacking is needed, and the pack_distribution
1200
method to determine the number of revisions in each pack.
1202
If autopacking takes place then the packs name collection will have
1203
been flushed to disk - packing requires updating the name collection
1204
in synchronisation with certain steps. Otherwise the names collection
1207
:return: True if packing took place.
1209
# XXX: Should not be needed when the management of indices is sane.
1210
total_revisions = self.revision_index.combined_index.key_count()
1211
total_packs = len(self._names)
1212
if self._max_pack_count(total_revisions) >= total_packs:
1214
# XXX: the following may want to be a class, to pack with a given
1216
mutter('Auto-packing repository %s, which has %d pack files, '
1217
'containing %d revisions into %d packs.', self, total_packs,
1218
total_revisions, self._max_pack_count(total_revisions))
1219
# determine which packs need changing
1220
pack_distribution = self.pack_distribution(total_revisions)
1222
for pack in self.all_packs():
1223
revision_count = pack.get_revision_count()
1224
if revision_count == 0:
1225
# revision less packs are not generated by normal operation,
1226
# only by operations like sign-my-commits, and thus will not
1227
# tend to grow rapdily or without bound like commit containing
1228
# packs do - leave them alone as packing them really should
1229
# group their data with the relevant commit, and that may
1230
# involve rewriting ancient history - which autopack tries to
1231
# avoid. Alternatively we could not group the data but treat
1232
# each of these as having a single revision, and thus add
1233
# one revision for each to the total revision count, to get
1234
# a matching distribution.
1236
existing_packs.append((revision_count, pack))
1237
pack_operations = self.plan_autopack_combinations(
1238
existing_packs, pack_distribution)
1239
self._execute_pack_operations(pack_operations)
1242
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1243
"""Execute a series of pack operations.
1245
:param pack_operations: A list of [revision_count, packs_to_combine].
1246
:param _packer_class: The class of packer to use (default: Packer).
1249
for revision_count, packs in pack_operations:
1250
# we may have no-ops from the setup logic
1253
_packer_class(self, packs, '.autopack').pack()
1255
self._remove_pack_from_memory(pack)
1256
# record the newly available packs and stop advertising the old
1258
self._save_pack_names(clear_obsolete_packs=True)
1259
# Move the old packs out of the way now they are no longer referenced.
1260
for revision_count, packs in pack_operations:
1261
self._obsolete_packs(packs)
1263
def lock_names(self):
1264
"""Acquire the mutex around the pack-names index.
1266
This cannot be used in the middle of a read-only transaction on the
1269
self.repo.control_files.lock_write()
1272
"""Pack the pack collection totally."""
1273
self.ensure_loaded()
1274
total_packs = len(self._names)
1276
# This is arguably wrong because we might not be optimal, but for
1277
# now lets leave it in. (e.g. reconcile -> one pack. But not
1280
total_revisions = self.revision_index.combined_index.key_count()
1281
# XXX: the following may want to be a class, to pack with a given
1283
mutter('Packing repository %s, which has %d pack files, '
1284
'containing %d revisions into 1 packs.', self, total_packs,
1286
# determine which packs need changing
1287
pack_distribution = [1]
1288
pack_operations = [[0, []]]
1289
for pack in self.all_packs():
1290
pack_operations[-1][0] += pack.get_revision_count()
1291
pack_operations[-1][1].append(pack)
1292
self._execute_pack_operations(pack_operations, OptimisingPacker)
1294
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1295
"""Plan a pack operation.
1297
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1299
:param pack_distribution: A list with the number of revisions desired
1302
if len(existing_packs) <= len(pack_distribution):
1304
existing_packs.sort(reverse=True)
1305
pack_operations = [[0, []]]
1306
# plan out what packs to keep, and what to reorganise
1307
while len(existing_packs):
1308
# take the largest pack, and if its less than the head of the
1309
# distribution chart we will include its contents in the new pack
1310
# for that position. If its larger, we remove its size from the
1311
# distribution chart
1312
next_pack_rev_count, next_pack = existing_packs.pop(0)
1313
if next_pack_rev_count >= pack_distribution[0]:
1314
# this is already packed 'better' than this, so we can
1315
# not waste time packing it.
1316
while next_pack_rev_count > 0:
1317
next_pack_rev_count -= pack_distribution[0]
1318
if next_pack_rev_count >= 0:
1320
del pack_distribution[0]
1322
# didn't use that entire bucket up
1323
pack_distribution[0] = -next_pack_rev_count
1325
# add the revisions we're going to add to the next output pack
1326
pack_operations[-1][0] += next_pack_rev_count
1327
# allocate this pack to the next pack sub operation
1328
pack_operations[-1][1].append(next_pack)
1329
if pack_operations[-1][0] >= pack_distribution[0]:
1330
# this pack is used up, shift left.
1331
del pack_distribution[0]
1332
pack_operations.append([0, []])
1333
# Now that we know which pack files we want to move, shove them all
1334
# into a single pack file.
1336
final_pack_list = []
1337
for num_revs, pack_files in pack_operations:
1338
final_rev_count += num_revs
1339
final_pack_list.extend(pack_files)
1340
if len(final_pack_list) == 1:
1341
raise AssertionError('We somehow generated an autopack with a'
1342
' single pack file being moved.')
1344
return [[final_rev_count, final_pack_list]]
1346
def ensure_loaded(self):
1347
# NB: if you see an assertion error here, its probably access against
1348
# an unlocked repo. Naughty.
1349
if not self.repo.is_locked():
1350
raise errors.ObjectNotLocked(self.repo)
1351
if self._names is None:
1353
self._packs_at_load = set()
1354
for index, key, value in self._iter_disk_pack_index():
1356
self._names[name] = self._parse_index_sizes(value)
1357
self._packs_at_load.add((key, value))
1358
# populate all the metadata.
1361
def _parse_index_sizes(self, value):
1362
"""Parse a string of index sizes."""
1363
return tuple([int(digits) for digits in value.split(' ')])
1365
def get_pack_by_name(self, name):
1366
"""Get a Pack object by name.
1368
:param name: The name of the pack - e.g. '123456'
1369
:return: A Pack object.
1372
return self._packs_by_name[name]
1374
rev_index = self._make_index(name, '.rix')
1375
inv_index = self._make_index(name, '.iix')
1376
txt_index = self._make_index(name, '.tix')
1377
sig_index = self._make_index(name, '.six')
1378
result = ExistingPack(self._pack_transport, name, rev_index,
1379
inv_index, txt_index, sig_index)
1380
self.add_pack_to_memory(result)
1383
def allocate(self, a_new_pack):
1384
"""Allocate name in the list of packs.
1386
:param a_new_pack: A NewPack instance to be added to the collection of
1387
packs for this repository.
1389
self.ensure_loaded()
1390
if a_new_pack.name in self._names:
1391
raise errors.BzrError(
1392
'Pack %r already exists in %s' % (a_new_pack.name, self))
1393
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1394
self.add_pack_to_memory(a_new_pack)
1396
def _iter_disk_pack_index(self):
1397
"""Iterate over the contents of the pack-names index.
1399
This is used when loading the list from disk, and before writing to
1400
detect updates from others during our write operation.
1401
:return: An iterator of the index contents.
1403
return self._index_class(self.transport, 'pack-names', None
1404
).iter_all_entries()
1406
def _make_index(self, name, suffix):
1407
size_offset = self._suffix_offsets[suffix]
1408
index_name = name + suffix
1409
index_size = self._names[name][size_offset]
1410
return self._index_class(
1411
self._index_transport, index_name, index_size)
1413
def _max_pack_count(self, total_revisions):
1414
"""Return the maximum number of packs to use for total revisions.
1416
:param total_revisions: The total number of revisions in the
1419
if not total_revisions:
1421
digits = str(total_revisions)
1423
for digit in digits:
1424
result += int(digit)
1428
"""Provide an order to the underlying names."""
1429
return sorted(self._names.keys())
1431
def _obsolete_packs(self, packs):
1432
"""Move a number of packs which have been obsoleted out of the way.
1434
Each pack and its associated indices are moved out of the way.
1436
Note: for correctness this function should only be called after a new
1437
pack names index has been written without these pack names, and with
1438
the names of packs that contain the data previously available via these
1441
:param packs: The packs to obsolete.
1442
:param return: None.
1445
pack.pack_transport.rename(pack.file_name(),
1446
'../obsolete_packs/' + pack.file_name())
1447
# TODO: Probably needs to know all possible indices for this pack
1448
# - or maybe list the directory and move all indices matching this
1449
# name whether we recognize it or not?
1450
for suffix in ('.iix', '.six', '.tix', '.rix'):
1451
self._index_transport.rename(pack.name + suffix,
1452
'../obsolete_packs/' + pack.name + suffix)
1454
def pack_distribution(self, total_revisions):
1455
"""Generate a list of the number of revisions to put in each pack.
1457
:param total_revisions: The total number of revisions in the
1460
if total_revisions == 0:
1462
digits = reversed(str(total_revisions))
1464
for exponent, count in enumerate(digits):
1465
size = 10 ** exponent
1466
for pos in range(int(count)):
1468
return list(reversed(result))
1470
def _pack_tuple(self, name):
1471
"""Return a tuple with the transport and file name for a pack name."""
1472
return self._pack_transport, name + '.pack'
1474
def _remove_pack_from_memory(self, pack):
1475
"""Remove pack from the packs accessed by this repository.
1477
Only affects memory state, until self._save_pack_names() is invoked.
1479
self._names.pop(pack.name)
1480
self._packs_by_name.pop(pack.name)
1481
self._remove_pack_indices(pack)
1482
self.packs.remove(pack)
1484
def _remove_pack_indices(self, pack):
1485
"""Remove the indices for pack from the aggregated indices."""
1486
self.revision_index.remove_index(pack.revision_index, pack)
1487
self.inventory_index.remove_index(pack.inventory_index, pack)
1488
self.text_index.remove_index(pack.text_index, pack)
1489
self.signature_index.remove_index(pack.signature_index, pack)
1492
"""Clear all cached data."""
1493
# cached revision data
1494
self.repo._revision_knit = None
1495
self.revision_index.clear()
1496
# cached signature data
1497
self.repo._signature_knit = None
1498
self.signature_index.clear()
1499
# cached file text data
1500
self.text_index.clear()
1501
self.repo._text_knit = None
1502
# cached inventory data
1503
self.inventory_index.clear()
1504
# remove the open pack
1505
self._new_pack = None
1506
# information about packs.
1509
self._packs_by_name = {}
1510
self._packs_at_load = None
1512
def _make_index_map(self, index_suffix):
1513
"""Return information on existing indices.
1515
:param suffix: Index suffix added to pack name.
1517
:returns: (pack_map, indices) where indices is a list of GraphIndex
1518
objects, and pack_map is a mapping from those objects to the
1519
pack tuple they describe.
1521
# TODO: stop using this; it creates new indices unnecessarily.
1522
self.ensure_loaded()
1523
suffix_map = {'.rix': 'revision_index',
1524
'.six': 'signature_index',
1525
'.iix': 'inventory_index',
1526
'.tix': 'text_index',
1528
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1529
suffix_map[index_suffix])
1531
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1532
"""Convert a list of packs to an index pack map and index list.
1534
:param packs: The packs list to process.
1535
:param index_attribute: The attribute that the desired index is found
1537
:return: A tuple (map, list) where map contains the dict from
1538
index:pack_tuple, and lsit contains the indices in the same order
1544
index = getattr(pack, index_attribute)
1545
indices.append(index)
1546
pack_map[index] = (pack.pack_transport, pack.file_name())
1547
return pack_map, indices
1549
def _index_contents(self, pack_map, key_filter=None):
1550
"""Get an iterable of the index contents from a pack_map.
1552
:param pack_map: A map from indices to pack details.
1553
:param key_filter: An optional filter to limit the
1556
indices = [index for index in pack_map.iterkeys()]
1557
all_index = CombinedGraphIndex(indices)
1558
if key_filter is None:
1559
return all_index.iter_all_entries()
1561
return all_index.iter_entries(key_filter)
1563
def _unlock_names(self):
1564
"""Release the mutex around the pack-names index."""
1565
self.repo.control_files.unlock()
1567
def _save_pack_names(self, clear_obsolete_packs=False):
1568
"""Save the list of packs.
1570
This will take out the mutex around the pack names list for the
1571
duration of the method call. If concurrent updates have been made, a
1572
three-way merge between the current list and the current in memory list
1575
:param clear_obsolete_packs: If True, clear out the contents of the
1576
obsolete_packs directory.
1580
builder = self._index_builder_class()
1581
# load the disk nodes across
1583
for index, key, value in self._iter_disk_pack_index():
1584
disk_nodes.add((key, value))
1585
# do a two-way diff against our original content
1586
current_nodes = set()
1587
for name, sizes in self._names.iteritems():
1589
((name, ), ' '.join(str(size) for size in sizes)))
1590
deleted_nodes = self._packs_at_load - current_nodes
1591
new_nodes = current_nodes - self._packs_at_load
1592
disk_nodes.difference_update(deleted_nodes)
1593
disk_nodes.update(new_nodes)
1594
# TODO: handle same-name, index-size-changes here -
1595
# e.g. use the value from disk, not ours, *unless* we're the one
1597
for key, value in disk_nodes:
1598
builder.add_node(key, value)
1599
self.transport.put_file('pack-names', builder.finish(),
1600
mode=self.repo.bzrdir._get_file_mode())
1601
# move the baseline forward
1602
self._packs_at_load = disk_nodes
1603
if clear_obsolete_packs:
1604
self._clear_obsolete_packs()
1606
self._unlock_names()
1607
# synchronise the memory packs list with what we just wrote:
1608
new_names = dict(disk_nodes)
1609
# drop no longer present nodes
1610
for pack in self.all_packs():
1611
if (pack.name,) not in new_names:
1612
self._remove_pack_from_memory(pack)
1613
# add new nodes/refresh existing ones
1614
for key, value in disk_nodes:
1616
sizes = self._parse_index_sizes(value)
1617
if name in self._names:
1619
if sizes != self._names[name]:
1620
# the pack for name has had its indices replaced - rare but
1621
# important to handle. XXX: probably can never happen today
1622
# because the three-way merge code above does not handle it
1623
# - you may end up adding the same key twice to the new
1624
# disk index because the set values are the same, unless
1625
# the only index shows up as deleted by the set difference
1626
# - which it may. Until there is a specific test for this,
1627
# assume its broken. RBC 20071017.
1628
self._remove_pack_from_memory(self.get_pack_by_name(name))
1629
self._names[name] = sizes
1630
self.get_pack_by_name(name)
1633
self._names[name] = sizes
1634
self.get_pack_by_name(name)
1636
def _clear_obsolete_packs(self):
1637
"""Delete everything from the obsolete-packs directory.
1639
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1640
for filename in obsolete_pack_transport.list_dir('.'):
1642
obsolete_pack_transport.delete(filename)
1643
except (errors.PathError, errors.TransportError), e:
1644
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1646
def _start_write_group(self):
1647
# Do not permit preparation for writing if we're not in a 'write lock'.
1648
if not self.repo.is_write_locked():
1649
raise errors.NotWriteLocked(self)
1650
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1651
self._pack_transport, upload_suffix='.pack',
1652
file_mode=self.repo.bzrdir._get_file_mode(),
1653
index_builder_class=self._index_builder_class,
1654
index_class=self._index_class)
1655
# allow writing: queue writes to a new index
1656
self.revision_index.add_writable_index(self._new_pack.revision_index,
1658
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1660
self.text_index.add_writable_index(self._new_pack.text_index,
1662
self.signature_index.add_writable_index(self._new_pack.signature_index,
1665
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1666
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1667
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1668
self.repo.texts._index._add_callback = self.text_index.add_callback
1670
def _abort_write_group(self):
1671
# FIXME: just drop the transient index.
1672
# forget what names there are
1673
if self._new_pack is not None:
1674
self._new_pack.abort()
1675
self._remove_pack_indices(self._new_pack)
1676
self._new_pack = None
1677
self.repo._text_knit = None
1679
def _commit_write_group(self):
1680
self._remove_pack_indices(self._new_pack)
1681
if self._new_pack.data_inserted():
1682
# get all the data to disk and read to use
1683
self._new_pack.finish()
1684
self.allocate(self._new_pack)
1685
self._new_pack = None
1686
if not self.autopack():
1687
# when autopack takes no steps, the names list is still
1689
self._save_pack_names()
1691
self._new_pack.abort()
1692
self._new_pack = None
1693
self.repo._text_knit = None
1696
class KnitPackRepository(KnitRepository):
1697
"""Repository with knit objects stored inside pack containers.
1699
The layering for a KnitPackRepository is:
1701
Graph | HPSS | Repository public layer |
1702
===================================================
1703
Tuple based apis below, string based, and key based apis above
1704
---------------------------------------------------
1706
Provides .texts, .revisions etc
1707
This adapts the N-tuple keys to physical knit records which only have a
1708
single string identifier (for historical reasons), which in older formats
1709
was always the revision_id, and in the mapped code for packs is always
1710
the last element of key tuples.
1711
---------------------------------------------------
1713
A separate GraphIndex is used for each of the
1714
texts/inventories/revisions/signatures contained within each individual
1715
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1717
===================================================
1721
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1723
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1724
_commit_builder_class, _serializer)
1725
index_transport = self._transport.clone('indices')
1726
self._pack_collection = RepositoryPackCollection(self, self._transport,
1728
self._transport.clone('upload'),
1729
self._transport.clone('packs'),
1730
_format.index_builder_class,
1731
_format.index_class)
1732
self.inventories = KnitVersionedFiles(
1733
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1734
add_callback=self._pack_collection.inventory_index.add_callback,
1735
deltas=True, parents=True, is_locked=self.is_locked),
1736
data_access=self._pack_collection.inventory_index.data_access,
1737
max_delta_chain=200)
1738
self.revisions = KnitVersionedFiles(
1739
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1740
add_callback=self._pack_collection.revision_index.add_callback,
1741
deltas=False, parents=True, is_locked=self.is_locked),
1742
data_access=self._pack_collection.revision_index.data_access,
1744
self.signatures = KnitVersionedFiles(
1745
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1746
add_callback=self._pack_collection.signature_index.add_callback,
1747
deltas=False, parents=False, is_locked=self.is_locked),
1748
data_access=self._pack_collection.signature_index.data_access,
1750
self.texts = KnitVersionedFiles(
1751
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1752
add_callback=self._pack_collection.text_index.add_callback,
1753
deltas=True, parents=True, is_locked=self.is_locked),
1754
data_access=self._pack_collection.text_index.data_access,
1755
max_delta_chain=200)
1756
# True when the repository object is 'write locked' (as opposed to the
1757
# physical lock only taken out around changes to the pack-names list.)
1758
# Another way to represent this would be a decorator around the control
1759
# files object that presents logical locks as physical ones - if this
1760
# gets ugly consider that alternative design. RBC 20071011
1761
self._write_lock_count = 0
1762
self._transaction = None
1764
self._reconcile_does_inventory_gc = True
1765
self._reconcile_fixes_text_parents = True
1766
self._reconcile_backsup_inventory = False
1767
self._fetch_order = 'unordered'
1769
def _warn_if_deprecated(self):
1770
# This class isn't deprecated, but one sub-format is
1771
if isinstance(self._format, RepositoryFormatKnitPack5RichRootBroken):
1772
from bzrlib import repository
1773
if repository._deprecation_warning_done:
1775
repository._deprecation_warning_done = True
1776
warning("Format %s for %s is deprecated - please use"
1777
" 'bzr upgrade --1.6.1-rich-root'"
1778
% (self._format, self.bzrdir.transport.base))
1780
def _abort_write_group(self):
1781
self._pack_collection._abort_write_group()
1783
def _find_inconsistent_revision_parents(self):
1784
"""Find revisions with incorrectly cached parents.
1786
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1787
parents-in-revision).
1789
if not self.is_locked():
1790
raise errors.ObjectNotLocked(self)
1791
pb = ui.ui_factory.nested_progress_bar()
1794
revision_nodes = self._pack_collection.revision_index \
1795
.combined_index.iter_all_entries()
1796
index_positions = []
1797
# Get the cached index values for all revisions, and also the location
1798
# in each index of the revision text so we can perform linear IO.
1799
for index, key, value, refs in revision_nodes:
1800
pos, length = value[1:].split(' ')
1801
index_positions.append((index, int(pos), key[0],
1802
tuple(parent[0] for parent in refs[0])))
1803
pb.update("Reading revision index.", 0, 0)
1804
index_positions.sort()
1805
batch_count = len(index_positions) / 1000 + 1
1806
pb.update("Checking cached revision graph.", 0, batch_count)
1807
for offset in xrange(batch_count):
1808
pb.update("Checking cached revision graph.", offset)
1809
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1812
rev_ids = [item[2] for item in to_query]
1813
revs = self.get_revisions(rev_ids)
1814
for revision, item in zip(revs, to_query):
1815
index_parents = item[3]
1816
rev_parents = tuple(revision.parent_ids)
1817
if index_parents != rev_parents:
1818
result.append((revision.revision_id, index_parents, rev_parents))
1823
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1824
def get_parents(self, revision_ids):
1825
"""See graph._StackedParentsProvider.get_parents."""
1826
parent_map = self.get_parent_map(revision_ids)
1827
return [parent_map.get(r, None) for r in revision_ids]
1829
def _make_parents_provider(self):
1830
return graph.CachingParentsProvider(self)
1832
def _refresh_data(self):
1833
if self._write_lock_count == 1 or (
1834
self.control_files._lock_count == 1 and
1835
self.control_files._lock_mode == 'r'):
1836
# forget what names there are
1837
self._pack_collection.reset()
1838
# XXX: Better to do an in-memory merge when acquiring a new lock -
1839
# factor out code from _save_pack_names.
1840
self._pack_collection.ensure_loaded()
1842
def _start_write_group(self):
1843
self._pack_collection._start_write_group()
1845
def _commit_write_group(self):
1846
return self._pack_collection._commit_write_group()
1848
def get_transaction(self):
1849
if self._write_lock_count:
1850
return self._transaction
1852
return self.control_files.get_transaction()
1854
def is_locked(self):
1855
return self._write_lock_count or self.control_files.is_locked()
1857
def is_write_locked(self):
1858
return self._write_lock_count
1860
def lock_write(self, token=None):
1861
if not self._write_lock_count and self.is_locked():
1862
raise errors.ReadOnlyError(self)
1863
self._write_lock_count += 1
1864
if self._write_lock_count == 1:
1865
self._transaction = transactions.WriteTransaction()
1866
for repo in self._fallback_repositories:
1867
# Writes don't affect fallback repos
1869
self._refresh_data()
1871
def lock_read(self):
1872
if self._write_lock_count:
1873
self._write_lock_count += 1
1875
self.control_files.lock_read()
1876
for repo in self._fallback_repositories:
1877
# Writes don't affect fallback repos
1879
self._refresh_data()
1881
def leave_lock_in_place(self):
1882
# not supported - raise an error
1883
raise NotImplementedError(self.leave_lock_in_place)
1885
def dont_leave_lock_in_place(self):
1886
# not supported - raise an error
1887
raise NotImplementedError(self.dont_leave_lock_in_place)
1891
"""Compress the data within the repository.
1893
This will pack all the data to a single pack. In future it may
1894
recompress deltas or do other such expensive operations.
1896
self._pack_collection.pack()
1899
def reconcile(self, other=None, thorough=False):
1900
"""Reconcile this repository."""
1901
from bzrlib.reconcile import PackReconciler
1902
reconciler = PackReconciler(self, thorough=thorough)
1903
reconciler.reconcile()
1907
if self._write_lock_count == 1 and self._write_group is not None:
1908
self.abort_write_group()
1909
self._transaction = None
1910
self._write_lock_count = 0
1911
raise errors.BzrError(
1912
'Must end write group before releasing write lock on %s'
1914
if self._write_lock_count:
1915
self._write_lock_count -= 1
1916
if not self._write_lock_count:
1917
transaction = self._transaction
1918
self._transaction = None
1919
transaction.finish()
1920
for repo in self._fallback_repositories:
1923
self.control_files.unlock()
1924
for repo in self._fallback_repositories:
1928
class RepositoryFormatPack(MetaDirRepositoryFormat):
1929
"""Format logic for pack structured repositories.
1931
This repository format has:
1932
- a list of packs in pack-names
1933
- packs in packs/NAME.pack
1934
- indices in indices/NAME.{iix,six,tix,rix}
1935
- knit deltas in the packs, knit indices mapped to the indices.
1936
- thunk objects to support the knits programming API.
1937
- a format marker of its own
1938
- an optional 'shared-storage' flag
1939
- an optional 'no-working-trees' flag
1943
# Set this attribute in derived classes to control the repository class
1944
# created by open and initialize.
1945
repository_class = None
1946
# Set this attribute in derived classes to control the
1947
# _commit_builder_class that the repository objects will have passed to
1948
# their constructor.
1949
_commit_builder_class = None
1950
# Set this attribute in derived clases to control the _serializer that the
1951
# repository objects will have passed to their constructor.
1953
# External references are not supported in pack repositories yet.
1954
supports_external_lookups = False
1955
# What index classes to use
1956
index_builder_class = None
1959
def initialize(self, a_bzrdir, shared=False):
1960
"""Create a pack based repository.
1962
:param a_bzrdir: bzrdir to contain the new repository; must already
1964
:param shared: If true the repository will be initialized as a shared
1967
mutter('creating repository in %s.', a_bzrdir.transport.base)
1968
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1969
builder = self.index_builder_class()
1970
files = [('pack-names', builder.finish())]
1971
utf8_files = [('format', self.get_format_string())]
1973
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1974
return self.open(a_bzrdir=a_bzrdir, _found=True)
1976
def open(self, a_bzrdir, _found=False, _override_transport=None):
1977
"""See RepositoryFormat.open().
1979
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1980
repository at a slightly different url
1981
than normal. I.e. during 'upgrade'.
1984
format = RepositoryFormat.find_format(a_bzrdir)
1985
if _override_transport is not None:
1986
repo_transport = _override_transport
1988
repo_transport = a_bzrdir.get_repository_transport(None)
1989
control_files = lockable_files.LockableFiles(repo_transport,
1990
'lock', lockdir.LockDir)
1991
return self.repository_class(_format=self,
1993
control_files=control_files,
1994
_commit_builder_class=self._commit_builder_class,
1995
_serializer=self._serializer)
1998
class RepositoryFormatKnitPack1(RepositoryFormatPack):
1999
"""A no-subtrees parameterized Pack repository.
2001
This format was introduced in 0.92.
2004
repository_class = KnitPackRepository
2005
_commit_builder_class = PackCommitBuilder
2007
def _serializer(self):
2008
return xml5.serializer_v5
2009
# What index classes to use
2010
index_builder_class = InMemoryGraphIndex
2011
index_class = GraphIndex
2013
def _get_matching_bzrdir(self):
2014
return bzrdir.format_registry.make_bzrdir('pack-0.92')
2016
def _ignore_setting_bzrdir(self, format):
2019
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2021
def get_format_string(self):
2022
"""See RepositoryFormat.get_format_string()."""
2023
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
2025
def get_format_description(self):
2026
"""See RepositoryFormat.get_format_description()."""
2027
return "Packs containing knits without subtree support"
2029
def check_conversion_target(self, target_format):
2033
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2034
"""A subtrees parameterized Pack repository.
2036
This repository format uses the xml7 serializer to get:
2037
- support for recording full info about the tree root
2038
- support for recording tree-references
2040
This format was introduced in 0.92.
2043
repository_class = KnitPackRepository
2044
_commit_builder_class = PackRootCommitBuilder
2045
rich_root_data = True
2046
supports_tree_reference = True
2048
def _serializer(self):
2049
return xml7.serializer_v7
2050
# What index classes to use
2051
index_builder_class = InMemoryGraphIndex
2052
index_class = GraphIndex
2054
def _get_matching_bzrdir(self):
2055
return bzrdir.format_registry.make_bzrdir(
2056
'pack-0.92-subtree')
2058
def _ignore_setting_bzrdir(self, format):
2061
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2063
def check_conversion_target(self, target_format):
2064
if not target_format.rich_root_data:
2065
raise errors.BadConversionTarget(
2066
'Does not support rich root data.', target_format)
2067
if not getattr(target_format, 'supports_tree_reference', False):
2068
raise errors.BadConversionTarget(
2069
'Does not support nested trees', target_format)
2071
def get_format_string(self):
2072
"""See RepositoryFormat.get_format_string()."""
2073
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2075
def get_format_description(self):
2076
"""See RepositoryFormat.get_format_description()."""
2077
return "Packs containing knits with subtree support\n"
2080
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2081
"""A rich-root, no subtrees parameterized Pack repository.
2083
This repository format uses the xml6 serializer to get:
2084
- support for recording full info about the tree root
2086
This format was introduced in 1.0.
2089
repository_class = KnitPackRepository
2090
_commit_builder_class = PackRootCommitBuilder
2091
rich_root_data = True
2092
supports_tree_reference = False
2094
def _serializer(self):
2095
return xml6.serializer_v6
2096
# What index classes to use
2097
index_builder_class = InMemoryGraphIndex
2098
index_class = GraphIndex
2100
def _get_matching_bzrdir(self):
2101
return bzrdir.format_registry.make_bzrdir(
2104
def _ignore_setting_bzrdir(self, format):
2107
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2109
def check_conversion_target(self, target_format):
2110
if not target_format.rich_root_data:
2111
raise errors.BadConversionTarget(
2112
'Does not support rich root data.', target_format)
2114
def get_format_string(self):
2115
"""See RepositoryFormat.get_format_string()."""
2116
return ("Bazaar pack repository format 1 with rich root"
2117
" (needs bzr 1.0)\n")
2119
def get_format_description(self):
2120
"""See RepositoryFormat.get_format_description()."""
2121
return "Packs containing knits with rich root support\n"
2124
class RepositoryFormatKnitPack5(RepositoryFormatPack):
2125
"""Repository that supports external references to allow stacking.
2129
Supports external lookups, which results in non-truncated ghosts after
2130
reconcile compared to pack-0.92 formats.
2133
repository_class = KnitPackRepository
2134
_commit_builder_class = PackCommitBuilder
2135
supports_external_lookups = True
2136
# What index classes to use
2137
index_builder_class = InMemoryGraphIndex
2138
index_class = GraphIndex
2141
def _serializer(self):
2142
return xml5.serializer_v5
2144
def _get_matching_bzrdir(self):
2145
return bzrdir.format_registry.make_bzrdir('1.6')
2147
def _ignore_setting_bzrdir(self, format):
2150
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2152
def get_format_string(self):
2153
"""See RepositoryFormat.get_format_string()."""
2154
return "Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n"
2156
def get_format_description(self):
2157
"""See RepositoryFormat.get_format_description()."""
2158
return "Packs 5 (adds stacking support, requires bzr 1.6)"
2160
def check_conversion_target(self, target_format):
2164
class RepositoryFormatKnitPack5RichRoot(RepositoryFormatPack):
2165
"""A repository with rich roots and stacking.
2167
New in release 1.6.1.
2169
Supports stacking on other repositories, allowing data to be accessed
2170
without being stored locally.
2173
repository_class = KnitPackRepository
2174
_commit_builder_class = PackRootCommitBuilder
2175
rich_root_data = True
2176
supports_tree_reference = False # no subtrees
2177
supports_external_lookups = True
2178
# What index classes to use
2179
index_builder_class = InMemoryGraphIndex
2180
index_class = GraphIndex
2183
def _serializer(self):
2184
return xml6.serializer_v6
2186
def _get_matching_bzrdir(self):
2187
return bzrdir.format_registry.make_bzrdir(
2190
def _ignore_setting_bzrdir(self, format):
2193
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2195
def check_conversion_target(self, target_format):
2196
if not target_format.rich_root_data:
2197
raise errors.BadConversionTarget(
2198
'Does not support rich root data.', target_format)
2200
def get_format_string(self):
2201
"""See RepositoryFormat.get_format_string()."""
2202
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n"
2204
def get_format_description(self):
2205
return "Packs 5 rich-root (adds stacking support, requires bzr 1.6.1)"
2208
class RepositoryFormatKnitPack5RichRootBroken(RepositoryFormatPack):
2209
"""A repository with rich roots and external references.
2213
Supports external lookups, which results in non-truncated ghosts after
2214
reconcile compared to pack-0.92 formats.
2216
This format was deprecated because the serializer it uses accidentally
2217
supported subtrees, when the format was not intended to. This meant that
2218
someone could accidentally fetch from an incorrect repository.
2221
repository_class = KnitPackRepository
2222
_commit_builder_class = PackRootCommitBuilder
2223
rich_root_data = True
2224
supports_tree_reference = False # no subtrees
2226
supports_external_lookups = True
2227
# What index classes to use
2228
index_builder_class = InMemoryGraphIndex
2229
index_class = GraphIndex
2232
def _serializer(self):
2233
return xml7.serializer_v7
2235
def _get_matching_bzrdir(self):
2236
return bzrdir.format_registry.make_bzrdir(
2239
def _ignore_setting_bzrdir(self, format):
2242
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2244
def check_conversion_target(self, target_format):
2245
if not target_format.rich_root_data:
2246
raise errors.BadConversionTarget(
2247
'Does not support rich root data.', target_format)
2249
def get_format_string(self):
2250
"""See RepositoryFormat.get_format_string()."""
2251
return "Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n"
2253
def get_format_description(self):
2254
return ("Packs 5 rich-root (adds stacking support, requires bzr 1.6)"
2258
class RepositoryFormatPackDevelopment2(RepositoryFormatPack):
2259
"""A no-subtrees development repository.
2261
This format should be retained until the second release after bzr 1.7.
2263
This is pack-1.6.1 with B+Tree indices.
2266
repository_class = KnitPackRepository
2267
_commit_builder_class = PackCommitBuilder
2268
supports_external_lookups = True
2269
# What index classes to use
2270
index_builder_class = BTreeBuilder
2271
index_class = BTreeGraphIndex
2274
def _serializer(self):
2275
return xml5.serializer_v5
2277
def _get_matching_bzrdir(self):
2278
return bzrdir.format_registry.make_bzrdir('development2')
2280
def _ignore_setting_bzrdir(self, format):
2283
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2285
def get_format_string(self):
2286
"""See RepositoryFormat.get_format_string()."""
2287
return "Bazaar development format 2 (needs bzr.dev from before 1.8)\n"
2289
def get_format_description(self):
2290
"""See RepositoryFormat.get_format_description()."""
2291
return ("Development repository format, currently the same as "
2292
"1.6.1 with B+Trees.\n")
2294
def check_conversion_target(self, target_format):
2298
class RepositoryFormatPackDevelopment2Subtree(RepositoryFormatPack):
2299
"""A subtrees development repository.
2301
This format should be retained until the second release after bzr 1.7.
2303
1.6.1-subtree[as it might have been] with B+Tree indices.
2306
repository_class = KnitPackRepository
2307
_commit_builder_class = PackRootCommitBuilder
2308
rich_root_data = True
2309
supports_tree_reference = True
2310
supports_external_lookups = True
2311
# What index classes to use
2312
index_builder_class = BTreeBuilder
2313
index_class = BTreeGraphIndex
2316
def _serializer(self):
2317
return xml7.serializer_v7
2319
def _get_matching_bzrdir(self):
2320
return bzrdir.format_registry.make_bzrdir(
2321
'development2-subtree')
2323
def _ignore_setting_bzrdir(self, format):
2326
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2328
def check_conversion_target(self, target_format):
2329
if not target_format.rich_root_data:
2330
raise errors.BadConversionTarget(
2331
'Does not support rich root data.', target_format)
2332
if not getattr(target_format, 'supports_tree_reference', False):
2333
raise errors.BadConversionTarget(
2334
'Does not support nested trees', target_format)
2336
def get_format_string(self):
2337
"""See RepositoryFormat.get_format_string()."""
2338
return ("Bazaar development format 2 with subtree support "
2339
"(needs bzr.dev from before 1.8)\n")
2341
def get_format_description(self):
2342
"""See RepositoryFormat.get_format_description()."""
2343
return ("Development repository format, currently the same as "
2344
"1.6.1-subtree with B+Tree indices.\n")