1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
19
from itertools import izip
30
from bzrlib.index import (
35
GraphIndexPrefixAdapter,
37
from bzrlib.knit import (
43
from bzrlib.osutils import rand_chars, split_lines
44
from bzrlib.pack import ContainerWriter
45
from bzrlib.store import revision
46
from bzrlib import tsort
61
from bzrlib.decorators import needs_read_lock, needs_write_lock
62
from bzrlib.repofmt.knitrepo import KnitRepository
63
from bzrlib.repository import (
67
MetaDirRepositoryFormat,
71
import bzrlib.revision as _mod_revision
72
from bzrlib.store.versioned import VersionedFileStore
73
from bzrlib.trace import (
81
class PackCommitBuilder(CommitBuilder):
82
"""A subclass of CommitBuilder to add texts with pack semantics.
84
Specifically this uses one knit object rather than one knit object per
85
added text, reducing memory and object pressure.
88
def __init__(self, repository, parents, config, timestamp=None,
89
timezone=None, committer=None, revprops=None,
91
CommitBuilder.__init__(self, repository, parents, config,
92
timestamp=timestamp, timezone=timezone, committer=committer,
93
revprops=revprops, revision_id=revision_id)
94
self._file_graph = graph.Graph(
95
repository._pack_collection.text_index.combined_index)
97
def _heads(self, file_id, revision_ids):
98
keys = [(file_id, revision_id) for revision_id in revision_ids]
99
return set([key[1] for key in self._file_graph.heads(keys)])
102
class PackRootCommitBuilder(RootCommitBuilder):
103
"""A subclass of RootCommitBuilder to add texts with pack semantics.
105
Specifically this uses one knit object rather than one knit object per
106
added text, reducing memory and object pressure.
109
def __init__(self, repository, parents, config, timestamp=None,
110
timezone=None, committer=None, revprops=None,
112
CommitBuilder.__init__(self, repository, parents, config,
113
timestamp=timestamp, timezone=timezone, committer=committer,
114
revprops=revprops, revision_id=revision_id)
115
self._file_graph = graph.Graph(
116
repository._pack_collection.text_index.combined_index)
118
def _heads(self, file_id, revision_ids):
119
keys = [(file_id, revision_id) for revision_id in revision_ids]
120
return set([key[1] for key in self._file_graph.heads(keys)])
124
"""An in memory proxy for a pack and its indices.
126
This is a base class that is not directly used, instead the classes
127
ExistingPack and NewPack are used.
130
def __init__(self, revision_index, inventory_index, text_index,
132
"""Create a pack instance.
134
:param revision_index: A GraphIndex for determining what revisions are
135
present in the Pack and accessing the locations of their texts.
136
:param inventory_index: A GraphIndex for determining what inventories are
137
present in the Pack and accessing the locations of their
139
:param text_index: A GraphIndex for determining what file texts
140
are present in the pack and accessing the locations of their
141
texts/deltas (via (fileid, revisionid) tuples).
142
:param revision_index: A GraphIndex for determining what signatures are
143
present in the Pack and accessing the locations of their texts.
145
self.revision_index = revision_index
146
self.inventory_index = inventory_index
147
self.text_index = text_index
148
self.signature_index = signature_index
150
def access_tuple(self):
151
"""Return a tuple (transport, name) for the pack content."""
152
return self.pack_transport, self.file_name()
155
"""Get the file name for the pack on disk."""
156
return self.name + '.pack'
158
def get_revision_count(self):
159
return self.revision_index.key_count()
161
def inventory_index_name(self, name):
162
"""The inv index is the name + .iix."""
163
return self.index_name('inventory', name)
165
def revision_index_name(self, name):
166
"""The revision index is the name + .rix."""
167
return self.index_name('revision', name)
169
def signature_index_name(self, name):
170
"""The signature index is the name + .six."""
171
return self.index_name('signature', name)
173
def text_index_name(self, name):
174
"""The text index is the name + .tix."""
175
return self.index_name('text', name)
177
def _external_compression_parents_of_texts(self):
180
for node in self.text_index.iter_all_entries():
182
refs.update(node[3][1])
186
class ExistingPack(Pack):
187
"""An in memory proxy for an existing .pack and its disk indices."""
189
def __init__(self, pack_transport, name, revision_index, inventory_index,
190
text_index, signature_index):
191
"""Create an ExistingPack object.
193
:param pack_transport: The transport where the pack file resides.
194
:param name: The name of the pack on disk in the pack_transport.
196
Pack.__init__(self, revision_index, inventory_index, text_index,
199
self.pack_transport = pack_transport
200
if None in (revision_index, inventory_index, text_index,
201
signature_index, name, pack_transport):
202
raise AssertionError()
204
def __eq__(self, other):
205
return self.__dict__ == other.__dict__
207
def __ne__(self, other):
208
return not self.__eq__(other)
211
return "<bzrlib.repofmt.pack_repo.Pack object at 0x%x, %s, %s" % (
212
id(self), self.pack_transport, self.name)
216
"""An in memory proxy for a pack which is being created."""
218
# A map of index 'type' to the file extension and position in the
220
index_definitions = {
221
'revision': ('.rix', 0),
222
'inventory': ('.iix', 1),
224
'signature': ('.six', 3),
227
def __init__(self, upload_transport, index_transport, pack_transport,
228
upload_suffix='', file_mode=None):
229
"""Create a NewPack instance.
231
:param upload_transport: A writable transport for the pack to be
232
incrementally uploaded to.
233
:param index_transport: A writable transport for the pack's indices to
234
be written to when the pack is finished.
235
:param pack_transport: A writable transport for the pack to be renamed
236
to when the upload is complete. This *must* be the same as
237
upload_transport.clone('../packs').
238
:param upload_suffix: An optional suffix to be given to any temporary
239
files created during the pack creation. e.g '.autopack'
240
:param file_mode: An optional file mode to create the new files with.
242
# The relative locations of the packs are constrained, but all are
243
# passed in because the caller has them, so as to avoid object churn.
245
# Revisions: parents list, no text compression.
246
InMemoryGraphIndex(reference_lists=1),
247
# Inventory: We want to map compression only, but currently the
248
# knit code hasn't been updated enough to understand that, so we
249
# have a regular 2-list index giving parents and compression
251
InMemoryGraphIndex(reference_lists=2),
252
# Texts: compression and per file graph, for all fileids - so two
253
# reference lists and two elements in the key tuple.
254
InMemoryGraphIndex(reference_lists=2, key_elements=2),
255
# Signatures: Just blobs to store, no compression, no parents
257
InMemoryGraphIndex(reference_lists=0),
259
# where should the new pack be opened
260
self.upload_transport = upload_transport
261
# where are indices written out to
262
self.index_transport = index_transport
263
# where is the pack renamed to when it is finished?
264
self.pack_transport = pack_transport
265
# What file mode to upload the pack and indices with.
266
self._file_mode = file_mode
267
# tracks the content written to the .pack file.
268
self._hash = md5.new()
269
# a four-tuple with the length in bytes of the indices, once the pack
270
# is finalised. (rev, inv, text, sigs)
271
self.index_sizes = None
272
# How much data to cache when writing packs. Note that this is not
273
# synchronised with reads, because it's not in the transport layer, so
274
# is not safe unless the client knows it won't be reading from the pack
276
self._cache_limit = 0
277
# the temporary pack file name.
278
self.random_name = rand_chars(20) + upload_suffix
279
# when was this pack started ?
280
self.start_time = time.time()
281
# open an output stream for the data added to the pack.
282
self.write_stream = self.upload_transport.open_write_stream(
283
self.random_name, mode=self._file_mode)
284
if 'pack' in debug.debug_flags:
285
mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
286
time.ctime(), self.upload_transport.base, self.random_name,
287
time.time() - self.start_time)
288
# A list of byte sequences to be written to the new pack, and the
289
# aggregate size of them. Stored as a list rather than separate
290
# variables so that the _write_data closure below can update them.
291
self._buffer = [[], 0]
292
# create a callable for adding data
294
# robertc says- this is a closure rather than a method on the object
295
# so that the variables are locals, and faster than accessing object
297
def _write_data(bytes, flush=False, _buffer=self._buffer,
298
_write=self.write_stream.write, _update=self._hash.update):
299
_buffer[0].append(bytes)
300
_buffer[1] += len(bytes)
302
if _buffer[1] > self._cache_limit or flush:
303
bytes = ''.join(_buffer[0])
307
# expose this on self, for the occasion when clients want to add data.
308
self._write_data = _write_data
309
# a pack writer object to serialise pack records.
310
self._writer = pack.ContainerWriter(self._write_data)
312
# what state is the pack in? (open, finished, aborted)
316
"""Cancel creating this pack."""
317
self._state = 'aborted'
318
self.write_stream.close()
319
# Remove the temporary pack file.
320
self.upload_transport.delete(self.random_name)
321
# The indices have no state on disk.
323
def access_tuple(self):
324
"""Return a tuple (transport, name) for the pack content."""
325
if self._state == 'finished':
326
return Pack.access_tuple(self)
327
elif self._state == 'open':
328
return self.upload_transport, self.random_name
330
raise AssertionError(self._state)
332
def data_inserted(self):
333
"""True if data has been added to this pack."""
334
return bool(self.get_revision_count() or
335
self.inventory_index.key_count() or
336
self.text_index.key_count() or
337
self.signature_index.key_count())
340
"""Finish the new pack.
343
- finalises the content
344
- assigns a name (the md5 of the content, currently)
345
- writes out the associated indices
346
- renames the pack into place.
347
- stores the index size tuple for the pack in the index_sizes
352
self._write_data('', flush=True)
353
self.name = self._hash.hexdigest()
355
# XXX: It'd be better to write them all to temporary names, then
356
# rename them all into place, so that the window when only some are
357
# visible is smaller. On the other hand none will be seen until
358
# they're in the names list.
359
self.index_sizes = [None, None, None, None]
360
self._write_index('revision', self.revision_index, 'revision')
361
self._write_index('inventory', self.inventory_index, 'inventory')
362
self._write_index('text', self.text_index, 'file texts')
363
self._write_index('signature', self.signature_index,
364
'revision signatures')
365
self.write_stream.close()
366
# Note that this will clobber an existing pack with the same name,
367
# without checking for hash collisions. While this is undesirable this
368
# is something that can be rectified in a subsequent release. One way
369
# to rectify it may be to leave the pack at the original name, writing
370
# its pack-names entry as something like 'HASH: index-sizes
371
# temporary-name'. Allocate that and check for collisions, if it is
372
# collision free then rename it into place. If clients know this scheme
373
# they can handle missing-file errors by:
374
# - try for HASH.pack
375
# - try for temporary-name
376
# - refresh the pack-list to see if the pack is now absent
377
self.upload_transport.rename(self.random_name,
378
'../packs/' + self.name + '.pack')
379
self._state = 'finished'
380
if 'pack' in debug.debug_flags:
381
# XXX: size might be interesting?
382
mutter('%s: create_pack: pack renamed into place: %s%s->%s%s t+%6.3fs',
383
time.ctime(), self.upload_transport.base, self.random_name,
384
self.pack_transport, self.name,
385
time.time() - self.start_time)
388
"""Flush any current data."""
390
bytes = ''.join(self._buffer[0])
391
self.write_stream.write(bytes)
392
self._hash.update(bytes)
393
self._buffer[:] = [[], 0]
395
def index_name(self, index_type, name):
396
"""Get the disk name of an index type for pack name 'name'."""
397
return name + NewPack.index_definitions[index_type][0]
399
def index_offset(self, index_type):
400
"""Get the position in a index_size array for a given index type."""
401
return NewPack.index_definitions[index_type][1]
403
def _replace_index_with_readonly(self, index_type):
404
setattr(self, index_type + '_index',
405
GraphIndex(self.index_transport,
406
self.index_name(index_type, self.name),
407
self.index_sizes[self.index_offset(index_type)]))
409
def set_write_cache_size(self, size):
410
self._cache_limit = size
412
def _write_index(self, index_type, index, label):
413
"""Write out an index.
415
:param index_type: The type of index to write - e.g. 'revision'.
416
:param index: The index object to serialise.
417
:param label: What label to give the index e.g. 'revision'.
419
index_name = self.index_name(index_type, self.name)
420
self.index_sizes[self.index_offset(index_type)] = \
421
self.index_transport.put_file(index_name, index.finish(),
422
mode=self._file_mode)
423
if 'pack' in debug.debug_flags:
424
# XXX: size might be interesting?
425
mutter('%s: create_pack: wrote %s index: %s%s t+%6.3fs',
426
time.ctime(), label, self.upload_transport.base,
427
self.random_name, time.time() - self.start_time)
428
# Replace the writable index on this object with a readonly,
429
# presently unloaded index. We should alter
430
# the index layer to make its finish() error if add_node is
431
# subsequently used. RBC
432
self._replace_index_with_readonly(index_type)
435
class AggregateIndex(object):
436
"""An aggregated index for the RepositoryPackCollection.
438
AggregateIndex is reponsible for managing the PackAccess object,
439
Index-To-Pack mapping, and all indices list for a specific type of index
440
such as 'revision index'.
442
A CombinedIndex provides an index on a single key space built up
443
from several on-disk indices. The AggregateIndex builds on this
444
to provide a knit access layer, and allows having up to one writable
445
index within the collection.
447
# XXX: Probably 'can be written to' could/should be separated from 'acts
448
# like a knit index' -- mbp 20071024
451
"""Create an AggregateIndex."""
452
self.index_to_pack = {}
453
self.combined_index = CombinedGraphIndex([])
454
self.data_access = _DirectPackAccess(self.index_to_pack)
455
self.add_callback = None
457
def replace_indices(self, index_to_pack, indices):
458
"""Replace the current mappings with fresh ones.
460
This should probably not be used eventually, rather incremental add and
461
removal of indices. It has been added during refactoring of existing
464
:param index_to_pack: A mapping from index objects to
465
(transport, name) tuples for the pack file data.
466
:param indices: A list of indices.
468
# refresh the revision pack map dict without replacing the instance.
469
self.index_to_pack.clear()
470
self.index_to_pack.update(index_to_pack)
471
# XXX: API break - clearly a 'replace' method would be good?
472
self.combined_index._indices[:] = indices
473
# the current add nodes callback for the current writable index if
475
self.add_callback = None
477
def add_index(self, index, pack):
478
"""Add index to the aggregate, which is an index for Pack pack.
480
Future searches on the aggregate index will seach this new index
481
before all previously inserted indices.
483
:param index: An Index for the pack.
484
:param pack: A Pack instance.
486
# expose it to the index map
487
self.index_to_pack[index] = pack.access_tuple()
488
# put it at the front of the linear index list
489
self.combined_index.insert_index(0, index)
491
def add_writable_index(self, index, pack):
492
"""Add an index which is able to have data added to it.
494
There can be at most one writable index at any time. Any
495
modifications made to the knit are put into this index.
497
:param index: An index from the pack parameter.
498
:param pack: A Pack instance.
500
if self.add_callback is not None:
501
raise AssertionError(
502
"%s already has a writable index through %s" % \
503
(self, self.add_callback))
504
# allow writing: queue writes to a new index
505
self.add_index(index, pack)
506
# Updates the index to packs mapping as a side effect,
507
self.data_access.set_writer(pack._writer, index, pack.access_tuple())
508
self.add_callback = index.add_nodes
511
"""Reset all the aggregate data to nothing."""
512
self.data_access.set_writer(None, None, (None, None))
513
self.index_to_pack.clear()
514
del self.combined_index._indices[:]
515
self.add_callback = None
517
def remove_index(self, index, pack):
518
"""Remove index from the indices used to answer queries.
520
:param index: An index from the pack parameter.
521
:param pack: A Pack instance.
523
del self.index_to_pack[index]
524
self.combined_index._indices.remove(index)
525
if (self.add_callback is not None and
526
getattr(index, 'add_nodes', None) == self.add_callback):
527
self.add_callback = None
528
self.data_access.set_writer(None, None, (None, None))
531
class Packer(object):
532
"""Create a pack from packs."""
534
def __init__(self, pack_collection, packs, suffix, revision_ids=None):
537
:param pack_collection: A RepositoryPackCollection object where the
538
new pack is being written to.
539
:param packs: The packs to combine.
540
:param suffix: The suffix to use on the temporary files for the pack.
541
:param revision_ids: Revision ids to limit the pack to.
545
self.revision_ids = revision_ids
546
# The pack object we are creating.
548
self._pack_collection = pack_collection
549
# The index layer keys for the revisions being copied. None for 'all
551
self._revision_keys = None
552
# What text keys to copy. None for 'all texts'. This is set by
553
# _copy_inventory_texts
554
self._text_filter = None
557
def _extra_init(self):
558
"""A template hook to allow extending the constructor trivially."""
560
def pack(self, pb=None):
561
"""Create a new pack by reading data from other packs.
563
This does little more than a bulk copy of data. One key difference
564
is that data with the same item key across multiple packs is elided
565
from the output. The new pack is written into the current pack store
566
along with its indices, and the name added to the pack names. The
567
source packs are not altered and are not required to be in the current
570
:param pb: An optional progress bar to use. A nested bar is created if
572
:return: A Pack object, or None if nothing was copied.
574
# open a pack - using the same name as the last temporary file
575
# - which has already been flushed, so its safe.
576
# XXX: - duplicate code warning with start_write_group; fix before
577
# considering 'done'.
578
if self._pack_collection._new_pack is not None:
579
raise errors.BzrError('call to create_pack_from_packs while '
580
'another pack is being written.')
581
if self.revision_ids is not None:
582
if len(self.revision_ids) == 0:
583
# silly fetch request.
586
self.revision_ids = frozenset(self.revision_ids)
587
self.revision_keys = frozenset((revid,) for revid in
590
self.pb = ui.ui_factory.nested_progress_bar()
594
return self._create_pack_from_packs()
600
"""Open a pack for the pack we are creating."""
601
return NewPack(self._pack_collection._upload_transport,
602
self._pack_collection._index_transport,
603
self._pack_collection._pack_transport, upload_suffix=self.suffix,
604
file_mode=self._pack_collection.repo.bzrdir._get_file_mode())
606
def _copy_revision_texts(self):
607
"""Copy revision data to the new pack."""
609
if self.revision_ids:
610
revision_keys = [(revision_id,) for revision_id in self.revision_ids]
613
# select revision keys
614
revision_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
615
self.packs, 'revision_index')[0]
616
revision_nodes = self._pack_collection._index_contents(revision_index_map, revision_keys)
617
# copy revision keys and adjust values
618
self.pb.update("Copying revision texts", 1)
619
total_items, readv_group_iter = self._revision_node_readv(revision_nodes)
620
list(self._copy_nodes_graph(revision_index_map, self.new_pack._writer,
621
self.new_pack.revision_index, readv_group_iter, total_items))
622
if 'pack' in debug.debug_flags:
623
mutter('%s: create_pack: revisions copied: %s%s %d items t+%6.3fs',
624
time.ctime(), self._pack_collection._upload_transport.base,
625
self.new_pack.random_name,
626
self.new_pack.revision_index.key_count(),
627
time.time() - self.new_pack.start_time)
628
self._revision_keys = revision_keys
630
def _copy_inventory_texts(self):
631
"""Copy the inventory texts to the new pack.
633
self._revision_keys is used to determine what inventories to copy.
635
Sets self._text_filter appropriately.
637
# select inventory keys
638
inv_keys = self._revision_keys # currently the same keyspace, and note that
639
# querying for keys here could introduce a bug where an inventory item
640
# is missed, so do not change it to query separately without cross
641
# checking like the text key check below.
642
inventory_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
643
self.packs, 'inventory_index')[0]
644
inv_nodes = self._pack_collection._index_contents(inventory_index_map, inv_keys)
645
# copy inventory keys and adjust values
646
# XXX: Should be a helper function to allow different inv representation
648
self.pb.update("Copying inventory texts", 2)
649
total_items, readv_group_iter = self._least_readv_node_readv(inv_nodes)
650
# Only grab the output lines if we will be processing them
651
output_lines = bool(self.revision_ids)
652
inv_lines = self._copy_nodes_graph(inventory_index_map,
653
self.new_pack._writer, self.new_pack.inventory_index,
654
readv_group_iter, total_items, output_lines=output_lines)
655
if self.revision_ids:
656
self._process_inventory_lines(inv_lines)
658
# eat the iterator to cause it to execute.
660
self._text_filter = None
661
if 'pack' in debug.debug_flags:
662
mutter('%s: create_pack: inventories copied: %s%s %d items t+%6.3fs',
663
time.ctime(), self._pack_collection._upload_transport.base,
664
self.new_pack.random_name,
665
self.new_pack.inventory_index.key_count(),
666
time.time() - self.new_pack.start_time)
668
def _copy_text_texts(self):
670
text_index_map, text_nodes = self._get_text_nodes()
671
if self._text_filter is not None:
672
# We could return the keys copied as part of the return value from
673
# _copy_nodes_graph but this doesn't work all that well with the
674
# need to get line output too, so we check separately, and as we're
675
# going to buffer everything anyway, we check beforehand, which
676
# saves reading knit data over the wire when we know there are
678
text_nodes = set(text_nodes)
679
present_text_keys = set(_node[1] for _node in text_nodes)
680
missing_text_keys = set(self._text_filter) - present_text_keys
681
if missing_text_keys:
682
# TODO: raise a specific error that can handle many missing
684
a_missing_key = missing_text_keys.pop()
685
raise errors.RevisionNotPresent(a_missing_key[1],
687
# copy text keys and adjust values
688
self.pb.update("Copying content texts", 3)
689
total_items, readv_group_iter = self._least_readv_node_readv(text_nodes)
690
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
691
self.new_pack.text_index, readv_group_iter, total_items))
692
self._log_copied_texts()
694
def _check_references(self):
695
"""Make sure our external refereneces are present."""
696
external_refs = self.new_pack._external_compression_parents_of_texts()
698
index = self._pack_collection.text_index.combined_index
699
found_items = list(index.iter_entries(external_refs))
700
if len(found_items) != len(external_refs):
701
found_keys = set(k for idx, k, refs, value in found_items)
702
missing_items = external_refs - found_keys
703
missing_file_id, missing_revision_id = missing_items.pop()
704
raise errors.RevisionNotPresent(missing_revision_id,
707
def _create_pack_from_packs(self):
708
self.pb.update("Opening pack", 0, 5)
709
self.new_pack = self.open_pack()
710
new_pack = self.new_pack
711
# buffer data - we won't be reading-back during the pack creation and
712
# this makes a significant difference on sftp pushes.
713
new_pack.set_write_cache_size(1024*1024)
714
if 'pack' in debug.debug_flags:
715
plain_pack_list = ['%s%s' % (a_pack.pack_transport.base, a_pack.name)
716
for a_pack in self.packs]
717
if self.revision_ids is not None:
718
rev_count = len(self.revision_ids)
721
mutter('%s: create_pack: creating pack from source packs: '
722
'%s%s %s revisions wanted %s t=0',
723
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
724
plain_pack_list, rev_count)
725
self._copy_revision_texts()
726
self._copy_inventory_texts()
727
self._copy_text_texts()
728
# select signature keys
729
signature_filter = self._revision_keys # same keyspace
730
signature_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
731
self.packs, 'signature_index')[0]
732
signature_nodes = self._pack_collection._index_contents(signature_index_map,
734
# copy signature keys and adjust values
735
self.pb.update("Copying signature texts", 4)
736
self._copy_nodes(signature_nodes, signature_index_map, new_pack._writer,
737
new_pack.signature_index)
738
if 'pack' in debug.debug_flags:
739
mutter('%s: create_pack: revision signatures copied: %s%s %d items t+%6.3fs',
740
time.ctime(), self._pack_collection._upload_transport.base, new_pack.random_name,
741
new_pack.signature_index.key_count(),
742
time.time() - new_pack.start_time)
743
self._check_references()
744
if not self._use_pack(new_pack):
747
self.pb.update("Finishing pack", 5)
749
self._pack_collection.allocate(new_pack)
752
def _copy_nodes(self, nodes, index_map, writer, write_index):
753
"""Copy knit nodes between packs with no graph references."""
754
pb = ui.ui_factory.nested_progress_bar()
756
return self._do_copy_nodes(nodes, index_map, writer,
761
def _do_copy_nodes(self, nodes, index_map, writer, write_index, pb):
762
# for record verification
763
knit = KnitVersionedFiles(None, None)
764
# plan a readv on each source pack:
766
nodes = sorted(nodes)
767
# how to map this into knit.py - or knit.py into this?
768
# we don't want the typical knit logic, we want grouping by pack
769
# at this point - perhaps a helper library for the following code
770
# duplication points?
772
for index, key, value in nodes:
773
if index not in request_groups:
774
request_groups[index] = []
775
request_groups[index].append((key, value))
777
pb.update("Copied record", record_index, len(nodes))
778
for index, items in request_groups.iteritems():
779
pack_readv_requests = []
780
for key, value in items:
781
# ---- KnitGraphIndex.get_position
782
bits = value[1:].split(' ')
783
offset, length = int(bits[0]), int(bits[1])
784
pack_readv_requests.append((offset, length, (key, value[0])))
785
# linear scan up the pack
786
pack_readv_requests.sort()
788
transport, path = index_map[index]
789
reader = pack.make_readv_reader(transport, path,
790
[offset[0:2] for offset in pack_readv_requests])
791
for (names, read_func), (_1, _2, (key, eol_flag)) in \
792
izip(reader.iter_records(), pack_readv_requests):
793
raw_data = read_func(None)
794
# check the header only
795
df, _ = knit._parse_record_header(key, raw_data)
797
pos, size = writer.add_bytes_record(raw_data, names)
798
write_index.add_node(key, eol_flag + "%d %d" % (pos, size))
799
pb.update("Copied record", record_index)
802
def _copy_nodes_graph(self, index_map, writer, write_index,
803
readv_group_iter, total_items, output_lines=False):
804
"""Copy knit nodes between packs.
806
:param output_lines: Return lines present in the copied data as
807
an iterator of line,version_id.
809
pb = ui.ui_factory.nested_progress_bar()
811
for result in self._do_copy_nodes_graph(index_map, writer,
812
write_index, output_lines, pb, readv_group_iter, total_items):
815
# Python 2.4 does not permit try:finally: in a generator.
821
def _do_copy_nodes_graph(self, index_map, writer, write_index,
822
output_lines, pb, readv_group_iter, total_items):
823
# for record verification
824
knit = KnitVersionedFiles(None, None)
825
# for line extraction when requested (inventories only)
827
factory = KnitPlainFactory()
829
pb.update("Copied record", record_index, total_items)
830
for index, readv_vector, node_vector in readv_group_iter:
832
transport, path = index_map[index]
833
reader = pack.make_readv_reader(transport, path, readv_vector)
834
for (names, read_func), (key, eol_flag, references) in \
835
izip(reader.iter_records(), node_vector):
836
raw_data = read_func(None)
838
# read the entire thing
839
content, _ = knit._parse_record(key[-1], raw_data)
840
if len(references[-1]) == 0:
841
line_iterator = factory.get_fulltext_content(content)
843
line_iterator = factory.get_linedelta_content(content)
844
for line in line_iterator:
847
# check the header only
848
df, _ = knit._parse_record_header(key, raw_data)
850
pos, size = writer.add_bytes_record(raw_data, names)
851
write_index.add_node(key, eol_flag + "%d %d" % (pos, size), references)
852
pb.update("Copied record", record_index)
855
def _get_text_nodes(self):
856
text_index_map = self._pack_collection._packs_list_to_pack_map_and_index_list(
857
self.packs, 'text_index')[0]
858
return text_index_map, self._pack_collection._index_contents(text_index_map,
861
def _least_readv_node_readv(self, nodes):
862
"""Generate request groups for nodes using the least readv's.
864
:param nodes: An iterable of graph index nodes.
865
:return: Total node count and an iterator of the data needed to perform
866
readvs to obtain the data for nodes. Each item yielded by the
867
iterator is a tuple with:
868
index, readv_vector, node_vector. readv_vector is a list ready to
869
hand to the transport readv method, and node_vector is a list of
870
(key, eol_flag, references) for the the node retrieved by the
871
matching readv_vector.
873
# group by pack so we do one readv per pack
874
nodes = sorted(nodes)
877
for index, key, value, references in nodes:
878
if index not in request_groups:
879
request_groups[index] = []
880
request_groups[index].append((key, value, references))
882
for index, items in request_groups.iteritems():
883
pack_readv_requests = []
884
for key, value, references in items:
885
# ---- KnitGraphIndex.get_position
886
bits = value[1:].split(' ')
887
offset, length = int(bits[0]), int(bits[1])
888
pack_readv_requests.append(
889
((offset, length), (key, value[0], references)))
890
# linear scan up the pack to maximum range combining.
891
pack_readv_requests.sort()
892
# split out the readv and the node data.
893
pack_readv = [readv for readv, node in pack_readv_requests]
894
node_vector = [node for readv, node in pack_readv_requests]
895
result.append((index, pack_readv, node_vector))
898
def _log_copied_texts(self):
899
if 'pack' in debug.debug_flags:
900
mutter('%s: create_pack: file texts copied: %s%s %d items t+%6.3fs',
901
time.ctime(), self._pack_collection._upload_transport.base,
902
self.new_pack.random_name,
903
self.new_pack.text_index.key_count(),
904
time.time() - self.new_pack.start_time)
906
def _process_inventory_lines(self, inv_lines):
907
"""Use up the inv_lines generator and setup a text key filter."""
908
repo = self._pack_collection.repo
909
fileid_revisions = repo._find_file_ids_from_xml_inventory_lines(
910
inv_lines, self.revision_keys)
912
for fileid, file_revids in fileid_revisions.iteritems():
913
text_filter.extend([(fileid, file_revid) for file_revid in file_revids])
914
self._text_filter = text_filter
916
def _revision_node_readv(self, revision_nodes):
917
"""Return the total revisions and the readv's to issue.
919
:param revision_nodes: The revision index contents for the packs being
920
incorporated into the new pack.
921
:return: As per _least_readv_node_readv.
923
return self._least_readv_node_readv(revision_nodes)
925
def _use_pack(self, new_pack):
926
"""Return True if new_pack should be used.
928
:param new_pack: The pack that has just been created.
929
:return: True if the pack should be used.
931
return new_pack.data_inserted()
934
class OptimisingPacker(Packer):
935
"""A packer which spends more time to create better disk layouts."""
937
def _revision_node_readv(self, revision_nodes):
938
"""Return the total revisions and the readv's to issue.
940
This sort places revisions in topological order with the ancestors
943
:param revision_nodes: The revision index contents for the packs being
944
incorporated into the new pack.
945
:return: As per _least_readv_node_readv.
947
# build an ancestors dict
950
for index, key, value, references in revision_nodes:
951
ancestors[key] = references[0]
952
by_key[key] = (index, value, references)
953
order = tsort.topo_sort(ancestors)
955
# Single IO is pathological, but it will work as a starting point.
957
for key in reversed(order):
958
index, value, references = by_key[key]
959
# ---- KnitGraphIndex.get_position
960
bits = value[1:].split(' ')
961
offset, length = int(bits[0]), int(bits[1])
963
(index, [(offset, length)], [(key, value[0], references)]))
964
# TODO: combine requests in the same index that are in ascending order.
965
return total, requests
968
class ReconcilePacker(Packer):
969
"""A packer which regenerates indices etc as it copies.
971
This is used by ``bzr reconcile`` to cause parent text pointers to be
975
def _extra_init(self):
976
self._data_changed = False
978
def _process_inventory_lines(self, inv_lines):
979
"""Generate a text key reference map rather for reconciling with."""
980
repo = self._pack_collection.repo
981
refs = repo._find_text_key_references_from_xml_inventory_lines(
983
self._text_refs = refs
984
# during reconcile we:
985
# - convert unreferenced texts to full texts
986
# - correct texts which reference a text not copied to be full texts
987
# - copy all others as-is but with corrected parents.
988
# - so at this point we don't know enough to decide what becomes a full
990
self._text_filter = None
992
def _copy_text_texts(self):
993
"""generate what texts we should have and then copy."""
994
self.pb.update("Copying content texts", 3)
995
# we have three major tasks here:
996
# 1) generate the ideal index
997
repo = self._pack_collection.repo
998
ancestors = dict([(key[0], tuple(ref[0] for ref in refs[0])) for
1000
self.new_pack.revision_index.iter_all_entries()])
1001
ideal_index = repo._generate_text_key_index(self._text_refs, ancestors)
1002
# 2) generate a text_nodes list that contains all the deltas that can
1003
# be used as-is, with corrected parents.
1006
discarded_nodes = []
1007
NULL_REVISION = _mod_revision.NULL_REVISION
1008
text_index_map, text_nodes = self._get_text_nodes()
1009
for node in text_nodes:
1015
ideal_parents = tuple(ideal_index[node[1]])
1017
discarded_nodes.append(node)
1018
self._data_changed = True
1020
if ideal_parents == (NULL_REVISION,):
1022
if ideal_parents == node[3][0]:
1024
ok_nodes.append(node)
1025
elif ideal_parents[0:1] == node[3][0][0:1]:
1026
# the left most parent is the same, or there are no parents
1027
# today. Either way, we can preserve the representation as
1028
# long as we change the refs to be inserted.
1029
self._data_changed = True
1030
ok_nodes.append((node[0], node[1], node[2],
1031
(ideal_parents, node[3][1])))
1032
self._data_changed = True
1034
# Reinsert this text completely
1035
bad_texts.append((node[1], ideal_parents))
1036
self._data_changed = True
1037
# we're finished with some data.
1040
# 3) bulk copy the ok data
1041
total_items, readv_group_iter = self._least_readv_node_readv(ok_nodes)
1042
list(self._copy_nodes_graph(text_index_map, self.new_pack._writer,
1043
self.new_pack.text_index, readv_group_iter, total_items))
1044
# 4) adhoc copy all the other texts.
1045
# We have to topologically insert all texts otherwise we can fail to
1046
# reconcile when parts of a single delta chain are preserved intact,
1047
# and other parts are not. E.g. Discarded->d1->d2->d3. d1 will be
1048
# reinserted, and if d3 has incorrect parents it will also be
1049
# reinserted. If we insert d3 first, d2 is present (as it was bulk
1050
# copied), so we will try to delta, but d2 is not currently able to be
1051
# extracted because it's basis d1 is not present. Topologically sorting
1052
# addresses this. The following generates a sort for all the texts that
1053
# are being inserted without having to reference the entire text key
1054
# space (we only topo sort the revisions, which is smaller).
1055
topo_order = tsort.topo_sort(ancestors)
1056
rev_order = dict(zip(topo_order, range(len(topo_order))))
1057
bad_texts.sort(key=lambda key:rev_order[key[0][1]])
1058
transaction = repo.get_transaction()
1059
file_id_index = GraphIndexPrefixAdapter(
1060
self.new_pack.text_index,
1062
add_nodes_callback=self.new_pack.text_index.add_nodes)
1063
data_access = _DirectPackAccess(
1064
{self.new_pack.text_index:self.new_pack.access_tuple()})
1065
data_access.set_writer(self.new_pack._writer, self.new_pack.text_index,
1066
self.new_pack.access_tuple())
1067
output_texts = KnitVersionedFiles(
1068
_KnitGraphIndex(self.new_pack.text_index,
1069
add_callback=self.new_pack.text_index.add_nodes,
1070
deltas=True, parents=True, is_locked=repo.is_locked),
1071
data_access=data_access, max_delta_chain=200)
1072
for key, parent_keys in bad_texts:
1073
# We refer to the new pack to delta data being output.
1074
# A possible improvement would be to catch errors on short reads
1075
# and only flush then.
1076
self.new_pack.flush()
1078
for parent_key in parent_keys:
1079
if parent_key[0] != key[0]:
1080
# Graph parents must match the fileid
1081
raise errors.BzrError('Mismatched key parent %r:%r' %
1083
parents.append(parent_key[1])
1084
text_lines = split_lines(repo.texts.get_record_stream(
1085
[key], 'unordered', True).next().get_bytes_as('fulltext'))
1086
output_texts.add_lines(key, parent_keys, text_lines,
1087
random_id=True, check_content=False)
1088
# 5) check that nothing inserted has a reference outside the keyspace.
1089
missing_text_keys = self.new_pack._external_compression_parents_of_texts()
1090
if missing_text_keys:
1091
raise errors.BzrError('Reference to missing compression parents %r'
1092
% (missing_text_keys,))
1093
self._log_copied_texts()
1095
def _use_pack(self, new_pack):
1096
"""Override _use_pack to check for reconcile having changed content."""
1097
# XXX: we might be better checking this at the copy time.
1098
original_inventory_keys = set()
1099
inv_index = self._pack_collection.inventory_index.combined_index
1100
for entry in inv_index.iter_all_entries():
1101
original_inventory_keys.add(entry[1])
1102
new_inventory_keys = set()
1103
for entry in new_pack.inventory_index.iter_all_entries():
1104
new_inventory_keys.add(entry[1])
1105
if new_inventory_keys != original_inventory_keys:
1106
self._data_changed = True
1107
return new_pack.data_inserted() and self._data_changed
1110
class RepositoryPackCollection(object):
1111
"""Management of packs within a repository."""
1113
def __init__(self, repo, transport, index_transport, upload_transport,
1115
"""Create a new RepositoryPackCollection.
1117
:param transport: Addresses the repository base directory
1118
(typically .bzr/repository/).
1119
:param index_transport: Addresses the directory containing indices.
1120
:param upload_transport: Addresses the directory into which packs are written
1121
while they're being created.
1122
:param pack_transport: Addresses the directory of existing complete packs.
1125
self.transport = transport
1126
self._index_transport = index_transport
1127
self._upload_transport = upload_transport
1128
self._pack_transport = pack_transport
1129
self._suffix_offsets = {'.rix': 0, '.iix': 1, '.tix': 2, '.six': 3}
1132
self._packs_by_name = {}
1133
# the previous pack-names content
1134
self._packs_at_load = None
1135
# when a pack is being created by this object, the state of that pack.
1136
self._new_pack = None
1137
# aggregated revision index data
1138
self.revision_index = AggregateIndex()
1139
self.inventory_index = AggregateIndex()
1140
self.text_index = AggregateIndex()
1141
self.signature_index = AggregateIndex()
1143
def add_pack_to_memory(self, pack):
1144
"""Make a Pack object available to the repository to satisfy queries.
1146
:param pack: A Pack object.
1148
if pack.name in self._packs_by_name:
1149
raise AssertionError()
1150
self.packs.append(pack)
1151
self._packs_by_name[pack.name] = pack
1152
self.revision_index.add_index(pack.revision_index, pack)
1153
self.inventory_index.add_index(pack.inventory_index, pack)
1154
self.text_index.add_index(pack.text_index, pack)
1155
self.signature_index.add_index(pack.signature_index, pack)
1157
def all_packs(self):
1158
"""Return a list of all the Pack objects this repository has.
1160
Note that an in-progress pack being created is not returned.
1162
:return: A list of Pack objects for all the packs in the repository.
1165
for name in self.names():
1166
result.append(self.get_pack_by_name(name))
1170
"""Pack the pack collection incrementally.
1172
This will not attempt global reorganisation or recompression,
1173
rather it will just ensure that the total number of packs does
1174
not grow without bound. It uses the _max_pack_count method to
1175
determine if autopacking is needed, and the pack_distribution
1176
method to determine the number of revisions in each pack.
1178
If autopacking takes place then the packs name collection will have
1179
been flushed to disk - packing requires updating the name collection
1180
in synchronisation with certain steps. Otherwise the names collection
1183
:return: True if packing took place.
1185
# XXX: Should not be needed when the management of indices is sane.
1186
total_revisions = self.revision_index.combined_index.key_count()
1187
total_packs = len(self._names)
1188
if self._max_pack_count(total_revisions) >= total_packs:
1190
# XXX: the following may want to be a class, to pack with a given
1192
mutter('Auto-packing repository %s, which has %d pack files, '
1193
'containing %d revisions into %d packs.', self, total_packs,
1194
total_revisions, self._max_pack_count(total_revisions))
1195
# determine which packs need changing
1196
pack_distribution = self.pack_distribution(total_revisions)
1198
for pack in self.all_packs():
1199
revision_count = pack.get_revision_count()
1200
if revision_count == 0:
1201
# revision less packs are not generated by normal operation,
1202
# only by operations like sign-my-commits, and thus will not
1203
# tend to grow rapdily or without bound like commit containing
1204
# packs do - leave them alone as packing them really should
1205
# group their data with the relevant commit, and that may
1206
# involve rewriting ancient history - which autopack tries to
1207
# avoid. Alternatively we could not group the data but treat
1208
# each of these as having a single revision, and thus add
1209
# one revision for each to the total revision count, to get
1210
# a matching distribution.
1212
existing_packs.append((revision_count, pack))
1213
pack_operations = self.plan_autopack_combinations(
1214
existing_packs, pack_distribution)
1215
self._execute_pack_operations(pack_operations)
1218
def _execute_pack_operations(self, pack_operations, _packer_class=Packer):
1219
"""Execute a series of pack operations.
1221
:param pack_operations: A list of [revision_count, packs_to_combine].
1222
:param _packer_class: The class of packer to use (default: Packer).
1225
for revision_count, packs in pack_operations:
1226
# we may have no-ops from the setup logic
1229
_packer_class(self, packs, '.autopack').pack()
1231
self._remove_pack_from_memory(pack)
1232
# record the newly available packs and stop advertising the old
1234
self._save_pack_names(clear_obsolete_packs=True)
1235
# Move the old packs out of the way now they are no longer referenced.
1236
for revision_count, packs in pack_operations:
1237
self._obsolete_packs(packs)
1239
def lock_names(self):
1240
"""Acquire the mutex around the pack-names index.
1242
This cannot be used in the middle of a read-only transaction on the
1245
self.repo.control_files.lock_write()
1248
"""Pack the pack collection totally."""
1249
self.ensure_loaded()
1250
total_packs = len(self._names)
1252
# This is arguably wrong because we might not be optimal, but for
1253
# now lets leave it in. (e.g. reconcile -> one pack. But not
1256
total_revisions = self.revision_index.combined_index.key_count()
1257
# XXX: the following may want to be a class, to pack with a given
1259
mutter('Packing repository %s, which has %d pack files, '
1260
'containing %d revisions into 1 packs.', self, total_packs,
1262
# determine which packs need changing
1263
pack_distribution = [1]
1264
pack_operations = [[0, []]]
1265
for pack in self.all_packs():
1266
pack_operations[-1][0] += pack.get_revision_count()
1267
pack_operations[-1][1].append(pack)
1268
self._execute_pack_operations(pack_operations, OptimisingPacker)
1270
def plan_autopack_combinations(self, existing_packs, pack_distribution):
1271
"""Plan a pack operation.
1273
:param existing_packs: The packs to pack. (A list of (revcount, Pack)
1275
:param pack_distribution: A list with the number of revisions desired
1278
if len(existing_packs) <= len(pack_distribution):
1280
existing_packs.sort(reverse=True)
1281
pack_operations = [[0, []]]
1282
# plan out what packs to keep, and what to reorganise
1283
while len(existing_packs):
1284
# take the largest pack, and if its less than the head of the
1285
# distribution chart we will include its contents in the new pack for
1286
# that position. If its larger, we remove its size from the
1287
# distribution chart
1288
next_pack_rev_count, next_pack = existing_packs.pop(0)
1289
if next_pack_rev_count >= pack_distribution[0]:
1290
# this is already packed 'better' than this, so we can
1291
# not waste time packing it.
1292
while next_pack_rev_count > 0:
1293
next_pack_rev_count -= pack_distribution[0]
1294
if next_pack_rev_count >= 0:
1296
del pack_distribution[0]
1298
# didn't use that entire bucket up
1299
pack_distribution[0] = -next_pack_rev_count
1301
# add the revisions we're going to add to the next output pack
1302
pack_operations[-1][0] += next_pack_rev_count
1303
# allocate this pack to the next pack sub operation
1304
pack_operations[-1][1].append(next_pack)
1305
if pack_operations[-1][0] >= pack_distribution[0]:
1306
# this pack is used up, shift left.
1307
del pack_distribution[0]
1308
pack_operations.append([0, []])
1310
return pack_operations
1312
def ensure_loaded(self):
1313
# NB: if you see an assertion error here, its probably access against
1314
# an unlocked repo. Naughty.
1315
if not self.repo.is_locked():
1316
raise errors.ObjectNotLocked(self.repo)
1317
if self._names is None:
1319
self._packs_at_load = set()
1320
for index, key, value in self._iter_disk_pack_index():
1322
self._names[name] = self._parse_index_sizes(value)
1323
self._packs_at_load.add((key, value))
1324
# populate all the metadata.
1327
def _parse_index_sizes(self, value):
1328
"""Parse a string of index sizes."""
1329
return tuple([int(digits) for digits in value.split(' ')])
1331
def get_pack_by_name(self, name):
1332
"""Get a Pack object by name.
1334
:param name: The name of the pack - e.g. '123456'
1335
:return: A Pack object.
1338
return self._packs_by_name[name]
1340
rev_index = self._make_index(name, '.rix')
1341
inv_index = self._make_index(name, '.iix')
1342
txt_index = self._make_index(name, '.tix')
1343
sig_index = self._make_index(name, '.six')
1344
result = ExistingPack(self._pack_transport, name, rev_index,
1345
inv_index, txt_index, sig_index)
1346
self.add_pack_to_memory(result)
1349
def allocate(self, a_new_pack):
1350
"""Allocate name in the list of packs.
1352
:param a_new_pack: A NewPack instance to be added to the collection of
1353
packs for this repository.
1355
self.ensure_loaded()
1356
if a_new_pack.name in self._names:
1357
raise errors.BzrError(
1358
'Pack %r already exists in %s' % (a_new_pack.name, self))
1359
self._names[a_new_pack.name] = tuple(a_new_pack.index_sizes)
1360
self.add_pack_to_memory(a_new_pack)
1362
def _iter_disk_pack_index(self):
1363
"""Iterate over the contents of the pack-names index.
1365
This is used when loading the list from disk, and before writing to
1366
detect updates from others during our write operation.
1367
:return: An iterator of the index contents.
1369
return GraphIndex(self.transport, 'pack-names', None
1370
).iter_all_entries()
1372
def _make_index(self, name, suffix):
1373
size_offset = self._suffix_offsets[suffix]
1374
index_name = name + suffix
1375
index_size = self._names[name][size_offset]
1377
self._index_transport, index_name, index_size)
1379
def _max_pack_count(self, total_revisions):
1380
"""Return the maximum number of packs to use for total revisions.
1382
:param total_revisions: The total number of revisions in the
1385
if not total_revisions:
1387
digits = str(total_revisions)
1389
for digit in digits:
1390
result += int(digit)
1394
"""Provide an order to the underlying names."""
1395
return sorted(self._names.keys())
1397
def _obsolete_packs(self, packs):
1398
"""Move a number of packs which have been obsoleted out of the way.
1400
Each pack and its associated indices are moved out of the way.
1402
Note: for correctness this function should only be called after a new
1403
pack names index has been written without these pack names, and with
1404
the names of packs that contain the data previously available via these
1407
:param packs: The packs to obsolete.
1408
:param return: None.
1411
pack.pack_transport.rename(pack.file_name(),
1412
'../obsolete_packs/' + pack.file_name())
1413
# TODO: Probably needs to know all possible indices for this pack
1414
# - or maybe list the directory and move all indices matching this
1415
# name whether we recognize it or not?
1416
for suffix in ('.iix', '.six', '.tix', '.rix'):
1417
self._index_transport.rename(pack.name + suffix,
1418
'../obsolete_packs/' + pack.name + suffix)
1420
def pack_distribution(self, total_revisions):
1421
"""Generate a list of the number of revisions to put in each pack.
1423
:param total_revisions: The total number of revisions in the
1426
if total_revisions == 0:
1428
digits = reversed(str(total_revisions))
1430
for exponent, count in enumerate(digits):
1431
size = 10 ** exponent
1432
for pos in range(int(count)):
1434
return list(reversed(result))
1436
def _pack_tuple(self, name):
1437
"""Return a tuple with the transport and file name for a pack name."""
1438
return self._pack_transport, name + '.pack'
1440
def _remove_pack_from_memory(self, pack):
1441
"""Remove pack from the packs accessed by this repository.
1443
Only affects memory state, until self._save_pack_names() is invoked.
1445
self._names.pop(pack.name)
1446
self._packs_by_name.pop(pack.name)
1447
self._remove_pack_indices(pack)
1449
def _remove_pack_indices(self, pack):
1450
"""Remove the indices for pack from the aggregated indices."""
1451
self.revision_index.remove_index(pack.revision_index, pack)
1452
self.inventory_index.remove_index(pack.inventory_index, pack)
1453
self.text_index.remove_index(pack.text_index, pack)
1454
self.signature_index.remove_index(pack.signature_index, pack)
1457
"""Clear all cached data."""
1458
# cached revision data
1459
self.repo._revision_knit = None
1460
self.revision_index.clear()
1461
# cached signature data
1462
self.repo._signature_knit = None
1463
self.signature_index.clear()
1464
# cached file text data
1465
self.text_index.clear()
1466
self.repo._text_knit = None
1467
# cached inventory data
1468
self.inventory_index.clear()
1469
# remove the open pack
1470
self._new_pack = None
1471
# information about packs.
1474
self._packs_by_name = {}
1475
self._packs_at_load = None
1477
def _make_index_map(self, index_suffix):
1478
"""Return information on existing indices.
1480
:param suffix: Index suffix added to pack name.
1482
:returns: (pack_map, indices) where indices is a list of GraphIndex
1483
objects, and pack_map is a mapping from those objects to the
1484
pack tuple they describe.
1486
# TODO: stop using this; it creates new indices unnecessarily.
1487
self.ensure_loaded()
1488
suffix_map = {'.rix': 'revision_index',
1489
'.six': 'signature_index',
1490
'.iix': 'inventory_index',
1491
'.tix': 'text_index',
1493
return self._packs_list_to_pack_map_and_index_list(self.all_packs(),
1494
suffix_map[index_suffix])
1496
def _packs_list_to_pack_map_and_index_list(self, packs, index_attribute):
1497
"""Convert a list of packs to an index pack map and index list.
1499
:param packs: The packs list to process.
1500
:param index_attribute: The attribute that the desired index is found
1502
:return: A tuple (map, list) where map contains the dict from
1503
index:pack_tuple, and lsit contains the indices in the same order
1509
index = getattr(pack, index_attribute)
1510
indices.append(index)
1511
pack_map[index] = (pack.pack_transport, pack.file_name())
1512
return pack_map, indices
1514
def _index_contents(self, pack_map, key_filter=None):
1515
"""Get an iterable of the index contents from a pack_map.
1517
:param pack_map: A map from indices to pack details.
1518
:param key_filter: An optional filter to limit the
1521
indices = [index for index in pack_map.iterkeys()]
1522
all_index = CombinedGraphIndex(indices)
1523
if key_filter is None:
1524
return all_index.iter_all_entries()
1526
return all_index.iter_entries(key_filter)
1528
def _unlock_names(self):
1529
"""Release the mutex around the pack-names index."""
1530
self.repo.control_files.unlock()
1532
def _save_pack_names(self, clear_obsolete_packs=False):
1533
"""Save the list of packs.
1535
This will take out the mutex around the pack names list for the
1536
duration of the method call. If concurrent updates have been made, a
1537
three-way merge between the current list and the current in memory list
1540
:param clear_obsolete_packs: If True, clear out the contents of the
1541
obsolete_packs directory.
1545
builder = GraphIndexBuilder()
1546
# load the disk nodes across
1548
for index, key, value in self._iter_disk_pack_index():
1549
disk_nodes.add((key, value))
1550
# do a two-way diff against our original content
1551
current_nodes = set()
1552
for name, sizes in self._names.iteritems():
1554
((name, ), ' '.join(str(size) for size in sizes)))
1555
deleted_nodes = self._packs_at_load - current_nodes
1556
new_nodes = current_nodes - self._packs_at_load
1557
disk_nodes.difference_update(deleted_nodes)
1558
disk_nodes.update(new_nodes)
1559
# TODO: handle same-name, index-size-changes here -
1560
# e.g. use the value from disk, not ours, *unless* we're the one
1562
for key, value in disk_nodes:
1563
builder.add_node(key, value)
1564
self.transport.put_file('pack-names', builder.finish(),
1565
mode=self.repo.bzrdir._get_file_mode())
1566
# move the baseline forward
1567
self._packs_at_load = disk_nodes
1568
if clear_obsolete_packs:
1569
self._clear_obsolete_packs()
1571
self._unlock_names()
1572
# synchronise the memory packs list with what we just wrote:
1573
new_names = dict(disk_nodes)
1574
# drop no longer present nodes
1575
for pack in self.all_packs():
1576
if (pack.name,) not in new_names:
1577
self._remove_pack_from_memory(pack)
1578
# add new nodes/refresh existing ones
1579
for key, value in disk_nodes:
1581
sizes = self._parse_index_sizes(value)
1582
if name in self._names:
1584
if sizes != self._names[name]:
1585
# the pack for name has had its indices replaced - rare but
1586
# important to handle. XXX: probably can never happen today
1587
# because the three-way merge code above does not handle it
1588
# - you may end up adding the same key twice to the new
1589
# disk index because the set values are the same, unless
1590
# the only index shows up as deleted by the set difference
1591
# - which it may. Until there is a specific test for this,
1592
# assume its broken. RBC 20071017.
1593
self._remove_pack_from_memory(self.get_pack_by_name(name))
1594
self._names[name] = sizes
1595
self.get_pack_by_name(name)
1598
self._names[name] = sizes
1599
self.get_pack_by_name(name)
1601
def _clear_obsolete_packs(self):
1602
"""Delete everything from the obsolete-packs directory.
1604
obsolete_pack_transport = self.transport.clone('obsolete_packs')
1605
for filename in obsolete_pack_transport.list_dir('.'):
1607
obsolete_pack_transport.delete(filename)
1608
except (errors.PathError, errors.TransportError), e:
1609
warning("couldn't delete obsolete pack, skipping it:\n%s" % (e,))
1611
def _start_write_group(self):
1612
# Do not permit preparation for writing if we're not in a 'write lock'.
1613
if not self.repo.is_write_locked():
1614
raise errors.NotWriteLocked(self)
1615
self._new_pack = NewPack(self._upload_transport, self._index_transport,
1616
self._pack_transport, upload_suffix='.pack',
1617
file_mode=self.repo.bzrdir._get_file_mode())
1618
# allow writing: queue writes to a new index
1619
self.revision_index.add_writable_index(self._new_pack.revision_index,
1621
self.inventory_index.add_writable_index(self._new_pack.inventory_index,
1623
self.text_index.add_writable_index(self._new_pack.text_index,
1625
self.signature_index.add_writable_index(self._new_pack.signature_index,
1628
self.repo.inventories._index._add_callback = self.inventory_index.add_callback
1629
self.repo.revisions._index._add_callback = self.revision_index.add_callback
1630
self.repo.signatures._index._add_callback = self.signature_index.add_callback
1631
self.repo.texts._index._add_callback = self.text_index.add_callback
1633
def _abort_write_group(self):
1634
# FIXME: just drop the transient index.
1635
# forget what names there are
1636
if self._new_pack is not None:
1637
self._new_pack.abort()
1638
self._remove_pack_indices(self._new_pack)
1639
self._new_pack = None
1640
self.repo._text_knit = None
1642
def _commit_write_group(self):
1643
self._remove_pack_indices(self._new_pack)
1644
if self._new_pack.data_inserted():
1645
# get all the data to disk and read to use
1646
self._new_pack.finish()
1647
self.allocate(self._new_pack)
1648
self._new_pack = None
1649
if not self.autopack():
1650
# when autopack takes no steps, the names list is still
1652
self._save_pack_names()
1654
self._new_pack.abort()
1655
self._new_pack = None
1656
self.repo._text_knit = None
1659
class KnitPackRepository(KnitRepository):
1660
"""Repository with knit objects stored inside pack containers.
1662
The layering for a KnitPackRepository is:
1664
Graph | HPSS | Repository public layer |
1665
===================================================
1666
Tuple based apis below, string based, and key based apis above
1667
---------------------------------------------------
1669
Provides .texts, .revisions etc
1670
This adapts the N-tuple keys to physical knit records which only have a
1671
single string identifier (for historical reasons), which in older formats
1672
was always the revision_id, and in the mapped code for packs is always
1673
the last element of key tuples.
1674
---------------------------------------------------
1676
A separate GraphIndex is used for each of the
1677
texts/inventories/revisions/signatures contained within each individual
1678
pack file. The GraphIndex layer works in N-tuples and is unaware of any
1680
===================================================
1684
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
1686
KnitRepository.__init__(self, _format, a_bzrdir, control_files,
1687
_commit_builder_class, _serializer)
1688
index_transport = self._transport.clone('indices')
1689
self._pack_collection = RepositoryPackCollection(self, self._transport,
1691
self._transport.clone('upload'),
1692
self._transport.clone('packs'))
1693
self.inventories = KnitVersionedFiles(
1694
_KnitGraphIndex(self._pack_collection.inventory_index.combined_index,
1695
add_callback=self._pack_collection.inventory_index.add_callback,
1696
deltas=True, parents=True, is_locked=self.is_locked),
1697
data_access=self._pack_collection.inventory_index.data_access,
1698
max_delta_chain=200)
1699
self.revisions = KnitVersionedFiles(
1700
_KnitGraphIndex(self._pack_collection.revision_index.combined_index,
1701
add_callback=self._pack_collection.revision_index.add_callback,
1702
deltas=False, parents=True, is_locked=self.is_locked),
1703
data_access=self._pack_collection.revision_index.data_access,
1705
self.signatures = KnitVersionedFiles(
1706
_KnitGraphIndex(self._pack_collection.signature_index.combined_index,
1707
add_callback=self._pack_collection.signature_index.add_callback,
1708
deltas=False, parents=False, is_locked=self.is_locked),
1709
data_access=self._pack_collection.signature_index.data_access,
1711
self.texts = KnitVersionedFiles(
1712
_KnitGraphIndex(self._pack_collection.text_index.combined_index,
1713
add_callback=self._pack_collection.text_index.add_callback,
1714
deltas=True, parents=True, is_locked=self.is_locked),
1715
data_access=self._pack_collection.text_index.data_access,
1716
max_delta_chain=200)
1717
# True when the repository object is 'write locked' (as opposed to the
1718
# physical lock only taken out around changes to the pack-names list.)
1719
# Another way to represent this would be a decorator around the control
1720
# files object that presents logical locks as physical ones - if this
1721
# gets ugly consider that alternative design. RBC 20071011
1722
self._write_lock_count = 0
1723
self._transaction = None
1725
self._reconcile_does_inventory_gc = True
1726
self._reconcile_fixes_text_parents = True
1727
self._reconcile_backsup_inventory = False
1729
def _abort_write_group(self):
1730
self._pack_collection._abort_write_group()
1732
def _find_inconsistent_revision_parents(self):
1733
"""Find revisions with incorrectly cached parents.
1735
:returns: an iterator yielding tuples of (revison-id, parents-in-index,
1736
parents-in-revision).
1738
if not self.is_locked():
1739
raise errors.ObjectNotLocked(self)
1740
pb = ui.ui_factory.nested_progress_bar()
1743
revision_nodes = self._pack_collection.revision_index \
1744
.combined_index.iter_all_entries()
1745
index_positions = []
1746
# Get the cached index values for all revisions, and also the location
1747
# in each index of the revision text so we can perform linear IO.
1748
for index, key, value, refs in revision_nodes:
1749
pos, length = value[1:].split(' ')
1750
index_positions.append((index, int(pos), key[0],
1751
tuple(parent[0] for parent in refs[0])))
1752
pb.update("Reading revision index.", 0, 0)
1753
index_positions.sort()
1754
batch_count = len(index_positions) / 1000 + 1
1755
pb.update("Checking cached revision graph.", 0, batch_count)
1756
for offset in xrange(batch_count):
1757
pb.update("Checking cached revision graph.", offset)
1758
to_query = index_positions[offset * 1000:(offset + 1) * 1000]
1761
rev_ids = [item[2] for item in to_query]
1762
revs = self.get_revisions(rev_ids)
1763
for revision, item in zip(revs, to_query):
1764
index_parents = item[3]
1765
rev_parents = tuple(revision.parent_ids)
1766
if index_parents != rev_parents:
1767
result.append((revision.revision_id, index_parents, rev_parents))
1772
@symbol_versioning.deprecated_method(symbol_versioning.one_one)
1773
def get_parents(self, revision_ids):
1774
"""See graph._StackedParentsProvider.get_parents."""
1775
parent_map = self.get_parent_map(revision_ids)
1776
return [parent_map.get(r, None) for r in revision_ids]
1778
def get_parent_map(self, keys):
1779
"""See graph._StackedParentsProvider.get_parent_map
1781
This implementation accesses the combined revision index to provide
1784
self._pack_collection.ensure_loaded()
1785
index = self._pack_collection.revision_index.combined_index
1788
raise ValueError('get_parent_map(None) is not valid')
1789
if _mod_revision.NULL_REVISION in keys:
1790
keys.discard(_mod_revision.NULL_REVISION)
1791
found_parents = {_mod_revision.NULL_REVISION:()}
1794
search_keys = set((revision_id,) for revision_id in keys)
1795
for index, key, value, refs in index.iter_entries(search_keys):
1798
parents = (_mod_revision.NULL_REVISION,)
1800
parents = tuple(parent[0] for parent in parents)
1801
found_parents[key[0]] = parents
1802
return found_parents
1804
def has_revisions(self, revision_ids):
1805
"""See Repository.has_revisions()."""
1806
revision_ids = set(revision_ids)
1807
result = revision_ids.intersection(
1808
set([None, _mod_revision.NULL_REVISION]))
1809
revision_ids.difference_update(result)
1810
index = self._pack_collection.revision_index.combined_index
1811
keys = [(revision_id,) for revision_id in revision_ids]
1812
result.update(node[1][0] for node in index.iter_entries(keys))
1815
def _make_parents_provider(self):
1816
return graph.CachingParentsProvider(self)
1818
def _refresh_data(self):
1819
if self._write_lock_count == 1 or (
1820
self.control_files._lock_count == 1 and
1821
self.control_files._lock_mode == 'r'):
1822
# forget what names there are
1823
self._pack_collection.reset()
1824
# XXX: Better to do an in-memory merge when acquiring a new lock -
1825
# factor out code from _save_pack_names.
1826
self._pack_collection.ensure_loaded()
1828
def _start_write_group(self):
1829
self._pack_collection._start_write_group()
1831
def _commit_write_group(self):
1832
return self._pack_collection._commit_write_group()
1834
def get_transaction(self):
1835
if self._write_lock_count:
1836
return self._transaction
1838
return self.control_files.get_transaction()
1840
def is_locked(self):
1841
return self._write_lock_count or self.control_files.is_locked()
1843
def is_write_locked(self):
1844
return self._write_lock_count
1846
def lock_write(self, token=None):
1847
if not self._write_lock_count and self.is_locked():
1848
raise errors.ReadOnlyError(self)
1849
self._write_lock_count += 1
1850
if self._write_lock_count == 1:
1851
from bzrlib import transactions
1852
self._transaction = transactions.WriteTransaction()
1853
for repo in self._fallback_repositories:
1854
# Writes don't affect fallback repos
1856
self._refresh_data()
1858
def lock_read(self):
1859
if self._write_lock_count:
1860
self._write_lock_count += 1
1862
self.control_files.lock_read()
1863
for repo in self._fallback_repositories:
1864
# Writes don't affect fallback repos
1866
self._refresh_data()
1868
def leave_lock_in_place(self):
1869
# not supported - raise an error
1870
raise NotImplementedError(self.leave_lock_in_place)
1872
def dont_leave_lock_in_place(self):
1873
# not supported - raise an error
1874
raise NotImplementedError(self.dont_leave_lock_in_place)
1878
"""Compress the data within the repository.
1880
This will pack all the data to a single pack. In future it may
1881
recompress deltas or do other such expensive operations.
1883
self._pack_collection.pack()
1886
def reconcile(self, other=None, thorough=False):
1887
"""Reconcile this repository."""
1888
from bzrlib.reconcile import PackReconciler
1889
reconciler = PackReconciler(self, thorough=thorough)
1890
reconciler.reconcile()
1894
if self._write_lock_count == 1 and self._write_group is not None:
1895
self.abort_write_group()
1896
self._transaction = None
1897
self._write_lock_count = 0
1898
raise errors.BzrError(
1899
'Must end write group before releasing write lock on %s'
1901
if self._write_lock_count:
1902
self._write_lock_count -= 1
1903
if not self._write_lock_count:
1904
transaction = self._transaction
1905
self._transaction = None
1906
transaction.finish()
1907
for repo in self._fallback_repositories:
1910
self.control_files.unlock()
1911
for repo in self._fallback_repositories:
1915
class RepositoryFormatPack(MetaDirRepositoryFormat):
1916
"""Format logic for pack structured repositories.
1918
This repository format has:
1919
- a list of packs in pack-names
1920
- packs in packs/NAME.pack
1921
- indices in indices/NAME.{iix,six,tix,rix}
1922
- knit deltas in the packs, knit indices mapped to the indices.
1923
- thunk objects to support the knits programming API.
1924
- a format marker of its own
1925
- an optional 'shared-storage' flag
1926
- an optional 'no-working-trees' flag
1930
# Set this attribute in derived classes to control the repository class
1931
# created by open and initialize.
1932
repository_class = None
1933
# Set this attribute in derived classes to control the
1934
# _commit_builder_class that the repository objects will have passed to
1935
# their constructor.
1936
_commit_builder_class = None
1937
# Set this attribute in derived clases to control the _serializer that the
1938
# repository objects will have passed to their constructor.
1940
# External references are not supported in pack repositories yet.
1941
supports_external_lookups = False
1943
def initialize(self, a_bzrdir, shared=False):
1944
"""Create a pack based repository.
1946
:param a_bzrdir: bzrdir to contain the new repository; must already
1948
:param shared: If true the repository will be initialized as a shared
1951
mutter('creating repository in %s.', a_bzrdir.transport.base)
1952
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
1953
builder = GraphIndexBuilder()
1954
files = [('pack-names', builder.finish())]
1955
utf8_files = [('format', self.get_format_string())]
1957
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1958
return self.open(a_bzrdir=a_bzrdir, _found=True)
1960
def open(self, a_bzrdir, _found=False, _override_transport=None):
1961
"""See RepositoryFormat.open().
1963
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1964
repository at a slightly different url
1965
than normal. I.e. during 'upgrade'.
1968
format = RepositoryFormat.find_format(a_bzrdir)
1969
if _override_transport is not None:
1970
repo_transport = _override_transport
1972
repo_transport = a_bzrdir.get_repository_transport(None)
1973
control_files = lockable_files.LockableFiles(repo_transport,
1974
'lock', lockdir.LockDir)
1975
return self.repository_class(_format=self,
1977
control_files=control_files,
1978
_commit_builder_class=self._commit_builder_class,
1979
_serializer=self._serializer)
1982
class RepositoryFormatKnitPack1(RepositoryFormatPack):
1983
"""A no-subtrees parameterized Pack repository.
1985
This format was introduced in 0.92.
1988
repository_class = KnitPackRepository
1989
_commit_builder_class = PackCommitBuilder
1990
_serializer = xml5.serializer_v5
1992
def _get_matching_bzrdir(self):
1993
return bzrdir.format_registry.make_bzrdir('pack-0.92')
1995
def _ignore_setting_bzrdir(self, format):
1998
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2000
def get_format_string(self):
2001
"""See RepositoryFormat.get_format_string()."""
2002
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
2004
def get_format_description(self):
2005
"""See RepositoryFormat.get_format_description()."""
2006
return "Packs containing knits without subtree support"
2008
def check_conversion_target(self, target_format):
2012
class RepositoryFormatPack(MetaDirRepositoryFormat):
2013
"""Format logic for pack structured repositories.
2015
This repository format has:
2016
- a list of packs in pack-names
2017
- packs in packs/NAME.pack
2018
- indices in indices/NAME.{iix,six,tix,rix}
2019
- knit deltas in the packs, knit indices mapped to the indices.
2020
- thunk objects to support the knits programming API.
2021
- a format marker of its own
2022
- an optional 'shared-storage' flag
2023
- an optional 'no-working-trees' flag
2027
# Set this attribute in derived classes to control the repository class
2028
# created by open and initialize.
2029
repository_class = None
2030
# Set this attribute in derived classes to control the
2031
# _commit_builder_class that the repository objects will have passed to
2032
# their constructor.
2033
_commit_builder_class = None
2034
# Set this attribute in derived clases to control the _serializer that the
2035
# repository objects will have passed to their constructor.
2037
# External references are not supported in pack repositories yet.
2038
supports_external_lookups = False
2040
def initialize(self, a_bzrdir, shared=False):
2041
"""Create a pack based repository.
2043
:param a_bzrdir: bzrdir to contain the new repository; must already
2045
:param shared: If true the repository will be initialized as a shared
2048
mutter('creating repository in %s.', a_bzrdir.transport.base)
2049
dirs = ['indices', 'obsolete_packs', 'packs', 'upload']
2050
builder = GraphIndexBuilder()
2051
files = [('pack-names', builder.finish())]
2052
utf8_files = [('format', self.get_format_string())]
2054
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
2055
return self.open(a_bzrdir=a_bzrdir, _found=True)
2057
def open(self, a_bzrdir, _found=False, _override_transport=None):
2058
"""See RepositoryFormat.open().
2060
:param _override_transport: INTERNAL USE ONLY. Allows opening the
2061
repository at a slightly different url
2062
than normal. I.e. during 'upgrade'.
2065
format = RepositoryFormat.find_format(a_bzrdir)
2066
if _override_transport is not None:
2067
repo_transport = _override_transport
2069
repo_transport = a_bzrdir.get_repository_transport(None)
2070
control_files = lockable_files.LockableFiles(repo_transport,
2071
'lock', lockdir.LockDir)
2072
return self.repository_class(_format=self,
2074
control_files=control_files,
2075
_commit_builder_class=self._commit_builder_class,
2076
_serializer=self._serializer)
2079
class RepositoryFormatKnitPack1(RepositoryFormatPack):
2080
"""A no-subtrees parameterized Pack repository.
2082
This format was introduced in 0.92.
2085
repository_class = KnitPackRepository
2086
_commit_builder_class = PackCommitBuilder
2087
_serializer = xml5.serializer_v5
2089
def _get_matching_bzrdir(self):
2090
return bzrdir.format_registry.make_bzrdir('pack-0.92')
2092
def _ignore_setting_bzrdir(self, format):
2095
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2097
def get_format_string(self):
2098
"""See RepositoryFormat.get_format_string()."""
2099
return "Bazaar pack repository format 1 (needs bzr 0.92)\n"
2101
def get_format_description(self):
2102
"""See RepositoryFormat.get_format_description()."""
2103
return "Packs containing knits without subtree support"
2105
def check_conversion_target(self, target_format):
2109
class RepositoryFormatKnitPack3(RepositoryFormatPack):
2110
"""A subtrees parameterized Pack repository.
2112
This repository format uses the xml7 serializer to get:
2113
- support for recording full info about the tree root
2114
- support for recording tree-references
2116
This format was introduced in 0.92.
2119
repository_class = KnitPackRepository
2120
_commit_builder_class = PackRootCommitBuilder
2121
rich_root_data = True
2122
supports_tree_reference = True
2123
_serializer = xml7.serializer_v7
2125
def _get_matching_bzrdir(self):
2126
return bzrdir.format_registry.make_bzrdir(
2127
'pack-0.92-subtree')
2129
def _ignore_setting_bzrdir(self, format):
2132
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2134
def check_conversion_target(self, target_format):
2135
if not target_format.rich_root_data:
2136
raise errors.BadConversionTarget(
2137
'Does not support rich root data.', target_format)
2138
if not getattr(target_format, 'supports_tree_reference', False):
2139
raise errors.BadConversionTarget(
2140
'Does not support nested trees', target_format)
2142
def get_format_string(self):
2143
"""See RepositoryFormat.get_format_string()."""
2144
return "Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n"
2146
def get_format_description(self):
2147
"""See RepositoryFormat.get_format_description()."""
2148
return "Packs containing knits with subtree support\n"
2151
class RepositoryFormatKnitPack4(RepositoryFormatPack):
2152
"""A rich-root, no subtrees parameterized Pack repository.
2154
This repository format uses the xml6 serializer to get:
2155
- support for recording full info about the tree root
2157
This format was introduced in 1.0.
2160
repository_class = KnitPackRepository
2161
_commit_builder_class = PackRootCommitBuilder
2162
rich_root_data = True
2163
supports_tree_reference = False
2164
_serializer = xml6.serializer_v6
2166
def _get_matching_bzrdir(self):
2167
return bzrdir.format_registry.make_bzrdir(
2170
def _ignore_setting_bzrdir(self, format):
2173
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2175
def check_conversion_target(self, target_format):
2176
if not target_format.rich_root_data:
2177
raise errors.BadConversionTarget(
2178
'Does not support rich root data.', target_format)
2180
def get_format_string(self):
2181
"""See RepositoryFormat.get_format_string()."""
2182
return ("Bazaar pack repository format 1 with rich root"
2183
" (needs bzr 1.0)\n")
2185
def get_format_description(self):
2186
"""See RepositoryFormat.get_format_description()."""
2187
return "Packs containing knits with rich root support\n"
2190
class RepositoryFormatPackDevelopment0(RepositoryFormatPack):
2191
"""A no-subtrees development repository.
2193
This format should be retained until the second release after bzr 1.0.
2195
No changes to the disk behaviour from pack-0.92.
2198
repository_class = KnitPackRepository
2199
_commit_builder_class = PackCommitBuilder
2200
_serializer = xml5.serializer_v5
2202
def _get_matching_bzrdir(self):
2203
return bzrdir.format_registry.make_bzrdir('development0')
2205
def _ignore_setting_bzrdir(self, format):
2208
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2210
def get_format_string(self):
2211
"""See RepositoryFormat.get_format_string()."""
2212
return "Bazaar development format 0 (needs bzr.dev from before 1.3)\n"
2214
def get_format_description(self):
2215
"""See RepositoryFormat.get_format_description()."""
2216
return ("Development repository format, currently the same as "
2219
def check_conversion_target(self, target_format):
2223
class RepositoryFormatPackDevelopment0Subtree(RepositoryFormatPack):
2224
"""A subtrees development repository.
2226
This format should be retained until the second release after bzr 1.0.
2228
No changes to the disk behaviour from pack-0.92-subtree.
2231
repository_class = KnitPackRepository
2232
_commit_builder_class = PackRootCommitBuilder
2233
rich_root_data = True
2234
supports_tree_reference = True
2235
_serializer = xml7.serializer_v7
2237
def _get_matching_bzrdir(self):
2238
return bzrdir.format_registry.make_bzrdir(
2239
'development0-subtree')
2241
def _ignore_setting_bzrdir(self, format):
2244
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2246
def check_conversion_target(self, target_format):
2247
if not target_format.rich_root_data:
2248
raise errors.BadConversionTarget(
2249
'Does not support rich root data.', target_format)
2250
if not getattr(target_format, 'supports_tree_reference', False):
2251
raise errors.BadConversionTarget(
2252
'Does not support nested trees', target_format)
2254
def get_format_string(self):
2255
"""See RepositoryFormat.get_format_string()."""
2256
return ("Bazaar development format 0 with subtree support "
2257
"(needs bzr.dev from before 1.3)\n")
2259
def get_format_description(self):
2260
"""See RepositoryFormat.get_format_description()."""
2261
return ("Development repository format, currently the same as "
2262
"pack-0.92-subtree\n")
2265
class RepositoryFormatPackDevelopment1(RepositoryFormatPackDevelopment0):
2266
"""A no-subtrees development repository.
2268
This format should be retained until the second release after bzr 1.5.
2270
Supports external lookups, which results in non-truncated ghosts after
2271
reconcile compared to pack-0.92 formats.
2274
supports_external_lookups = True
2276
def _get_matching_bzrdir(self):
2277
return bzrdir.format_registry.make_bzrdir('development1')
2279
def _ignore_setting_bzrdir(self, format):
2282
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2284
def get_format_string(self):
2285
"""See RepositoryFormat.get_format_string()."""
2286
return "Bazaar development format 1 (needs bzr.dev from before 1.6)\n"
2288
def get_format_description(self):
2289
"""See RepositoryFormat.get_format_description()."""
2290
return ("Development repository format, currently the same as "
2291
"pack-0.92 with external reference support.\n")
2293
def check_conversion_target(self, target_format):
2297
class RepositoryFormatPackDevelopment1Subtree(RepositoryFormatPackDevelopment0Subtree):
2298
"""A subtrees development repository.
2300
This format should be retained until the second release after bzr 1.5.
2302
Supports external lookups, which results in non-truncated ghosts after
2303
reconcile compared to pack-0.92 formats.
2306
supports_external_lookups = True
2308
def _get_matching_bzrdir(self):
2309
return bzrdir.format_registry.make_bzrdir(
2310
'development1-subtree')
2312
def _ignore_setting_bzrdir(self, format):
2315
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
2317
def check_conversion_target(self, target_format):
2318
if not target_format.rich_root_data:
2319
raise errors.BadConversionTarget(
2320
'Does not support rich root data.', target_format)
2321
if not getattr(target_format, 'supports_tree_reference', False):
2322
raise errors.BadConversionTarget(
2323
'Does not support nested trees', target_format)
2325
def get_format_string(self):
2326
"""See RepositoryFormat.get_format_string()."""
2327
return ("Bazaar development format 1 with subtree support "
2328
"(needs bzr.dev from before 1.6)\n")
2330
def get_format_description(self):
2331
"""See RepositoryFormat.get_format_description()."""
2332
return ("Development repository format, currently the same as "
2333
"pack-0.92-subtree with external reference support.\n")