1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Repository formats using CHK inventories and groupcompress compression."""
33
revision as _mod_revision,
37
from bzrlib.btree_index import (
41
from bzrlib.groupcompress import (
43
GroupCompressVersionedFiles,
45
from bzrlib.repofmt.pack_repo import (
50
PackRootCommitBuilder,
51
RepositoryPackCollection,
58
class GCPack(NewPack):
60
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
61
"""Create a NewPack instance.
63
:param pack_collection: A PackCollection into which this is being
65
:param upload_suffix: An optional suffix to be given to any temporary
66
files created during the pack creation. e.g '.autopack'
67
:param file_mode: An optional file mode to create the new files with.
69
# replaced from NewPack to:
70
# - change inventory reference list length to 1
71
# - change texts reference lists to 1
72
# TODO: patch this to be parameterised
74
# The relative locations of the packs are constrained, but all are
75
# passed in because the caller has them, so as to avoid object churn.
76
index_builder_class = pack_collection._index_builder_class
78
if pack_collection.chk_index is not None:
79
chk_index = index_builder_class(reference_lists=0)
83
# Revisions: parents list, no text compression.
84
index_builder_class(reference_lists=1),
85
# Inventory: We want to map compression only, but currently the
86
# knit code hasn't been updated enough to understand that, so we
87
# have a regular 2-list index giving parents and compression
89
index_builder_class(reference_lists=1),
90
# Texts: per file graph, for all fileids - so one reference list
91
# and two elements in the key tuple.
92
index_builder_class(reference_lists=1, key_elements=2),
93
# Signatures: Just blobs to store, no compression, no parents
95
index_builder_class(reference_lists=0),
96
# CHK based storage - just blobs, no compression or parents.
99
self._pack_collection = pack_collection
100
# When we make readonly indices, we need this.
101
self.index_class = pack_collection._index_class
102
# where should the new pack be opened
103
self.upload_transport = pack_collection._upload_transport
104
# where are indices written out to
105
self.index_transport = pack_collection._index_transport
106
# where is the pack renamed to when it is finished?
107
self.pack_transport = pack_collection._pack_transport
108
# What file mode to upload the pack and indices with.
109
self._file_mode = file_mode
110
# tracks the content written to the .pack file.
111
self._hash = osutils.md5()
112
# a four-tuple with the length in bytes of the indices, once the pack
113
# is finalised. (rev, inv, text, sigs)
114
self.index_sizes = None
115
# How much data to cache when writing packs. Note that this is not
116
# synchronised with reads, because it's not in the transport layer, so
117
# is not safe unless the client knows it won't be reading from the pack
119
self._cache_limit = 0
120
# the temporary pack file name.
121
self.random_name = osutils.rand_chars(20) + upload_suffix
122
# when was this pack started ?
123
self.start_time = time.time()
124
# open an output stream for the data added to the pack.
125
self.write_stream = self.upload_transport.open_write_stream(
126
self.random_name, mode=self._file_mode)
127
if 'pack' in debug.debug_flags:
128
trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
129
time.ctime(), self.upload_transport.base, self.random_name,
130
time.time() - self.start_time)
131
# A list of byte sequences to be written to the new pack, and the
132
# aggregate size of them. Stored as a list rather than separate
133
# variables so that the _write_data closure below can update them.
134
self._buffer = [[], 0]
135
# create a callable for adding data
137
# robertc says- this is a closure rather than a method on the object
138
# so that the variables are locals, and faster than accessing object
140
def _write_data(bytes, flush=False, _buffer=self._buffer,
141
_write=self.write_stream.write, _update=self._hash.update):
142
_buffer[0].append(bytes)
143
_buffer[1] += len(bytes)
145
if _buffer[1] > self._cache_limit or flush:
146
bytes = ''.join(_buffer[0])
150
# expose this on self, for the occasion when clients want to add data.
151
self._write_data = _write_data
152
# a pack writer object to serialise pack records.
153
self._writer = pack.ContainerWriter(self._write_data)
155
# what state is the pack in? (open, finished, aborted)
157
# no name until we finish writing the content
160
def _check_references(self):
161
"""Make sure our external references are present.
163
Packs are allowed to have deltas whose base is not in the pack, but it
164
must be present somewhere in this collection. It is not allowed to
165
have deltas based on a fallback repository.
166
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
168
# Groupcompress packs don't have any external references, arguably CHK
169
# pages have external references, but we cannot 'cheaply' determine
170
# them without actually walking all of the chk pages.
173
class ResumedGCPack(ResumedPack):
175
def _check_references(self):
176
"""Make sure our external compression parents are present."""
177
# See GCPack._check_references for why this is empty
179
def _get_external_refs(self, index):
180
# GC repositories don't have compression parents external to a given
185
class GCCHKPacker(Packer):
186
"""This class understand what it takes to collect a GCCHK repo."""
188
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
190
super(GCCHKPacker, self).__init__(pack_collection, packs, suffix,
191
revision_ids=revision_ids,
192
reload_func=reload_func)
193
self._pack_collection = pack_collection
194
# ATM, We only support this for GCCHK repositories
195
if pack_collection.chk_index is None:
196
raise AssertionError('pack_collection.chk_index should not be None')
197
self._gather_text_refs = False
198
self._chk_id_roots = []
199
self._chk_p_id_roots = []
200
self._text_refs = None
201
# set by .pack() if self.revision_ids is not None
202
self.revision_keys = None
204
def _get_progress_stream(self, source_vf, keys, message, pb):
206
substream = source_vf.get_record_stream(keys, 'groupcompress', True)
207
for idx, record in enumerate(substream):
209
pb.update(message, idx + 1, len(keys))
213
def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
214
"""Filter the texts of inventories, to find the chk pages."""
215
total_keys = len(keys)
216
def _filtered_inv_stream():
218
p_id_roots_set = set()
219
stream = source_vf.get_record_stream(keys, 'groupcompress', True)
220
for idx, record in enumerate(stream):
221
# Inventories should always be with revisions; assume success.
222
bytes = record.get_bytes_as('fulltext')
223
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
226
pb.update('inv', idx, total_keys)
227
key = chk_inv.id_to_entry.key()
228
if key not in id_roots_set:
229
self._chk_id_roots.append(key)
230
id_roots_set.add(key)
231
p_id_map = chk_inv.parent_id_basename_to_file_id
233
raise AssertionError('Parent id -> file_id map not set')
235
if key not in p_id_roots_set:
236
p_id_roots_set.add(key)
237
self._chk_p_id_roots.append(key)
239
# We have finished processing all of the inventory records, we
240
# don't need these sets anymore
242
p_id_roots_set.clear()
243
return _filtered_inv_stream()
245
def _get_chk_streams(self, source_vf, keys, pb=None):
246
# We want to stream the keys from 'id_roots', and things they
247
# reference, and then stream things from p_id_roots and things they
248
# reference, and then any remaining keys that we didn't get to.
250
# We also group referenced texts together, so if one root references a
251
# text with prefix 'a', and another root references a node with prefix
252
# 'a', we want to yield those nodes before we yield the nodes for 'b'
253
# This keeps 'similar' nodes together.
255
# Note: We probably actually want multiple streams here, to help the
256
# client understand that the different levels won't compress well
257
# against each other.
258
# Test the difference between using one Group per level, and
259
# using 1 Group per prefix. (so '' (root) would get a group, then
260
# all the references to search-key 'a' would get a group, etc.)
261
total_keys = len(keys)
262
remaining_keys = set(keys)
264
if self._gather_text_refs:
265
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
266
self._text_refs = set()
267
def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
270
keys_by_search_prefix = {}
271
remaining_keys.difference_update(cur_keys)
273
def handle_internal_node(node):
274
for prefix, value in node._items.iteritems():
275
# We don't want to request the same key twice, and we
276
# want to order it by the first time it is seen.
277
# Even further, we don't want to request a key which is
278
# not in this group of pack files (it should be in the
279
# repo, but it doesn't have to be in the group being
281
# TODO: consider how to treat externally referenced chk
282
# pages as 'external_references' so that we
283
# always fill them in for stacked branches
284
if value not in next_keys and value in remaining_keys:
285
keys_by_search_prefix.setdefault(prefix,
288
def handle_leaf_node(node):
289
# Store is None, because we know we have a LeafNode, and we
290
# just want its entries
291
for file_id, bytes in node.iteritems(None):
292
name_utf8, file_id, revision_id = bytes_to_info(bytes)
293
self._text_refs.add((file_id, revision_id))
295
stream = source_vf.get_record_stream(cur_keys,
296
'as-requested', True)
297
for record in stream:
298
if record.storage_kind == 'absent':
299
# An absent CHK record: we assume that the missing
300
# record is in a different pack - e.g. a page not
301
# altered by the commit we're packing.
303
bytes = record.get_bytes_as('fulltext')
304
# We don't care about search_key_func for this code,
305
# because we only care about external references.
306
node = chk_map._deserialise(bytes, record.key,
307
search_key_func=None)
308
common_base = node._search_prefix
309
if isinstance(node, chk_map.InternalNode):
310
handle_internal_node(node)
311
elif parse_leaf_nodes:
312
handle_leaf_node(node)
315
pb.update('chk node', counter[0], total_keys)
318
# Double check that we won't be emitting any keys twice
319
# If we get rid of the pre-calculation of all keys, we could
320
# turn this around and do
321
# next_keys.difference_update(seen_keys)
322
# However, we also may have references to chk pages in another
323
# pack file during autopack. We filter earlier, so we should no
324
# longer need to do this
325
# next_keys = next_keys.intersection(remaining_keys)
327
for prefix in sorted(keys_by_search_prefix):
328
cur_keys.extend(keys_by_search_prefix.pop(prefix))
329
for stream in _get_referenced_stream(self._chk_id_roots,
330
self._gather_text_refs):
332
del self._chk_id_roots
333
# while it isn't really possible for chk_id_roots to not be in the
334
# local group of packs, it is possible that the tree shape has not
335
# changed recently, so we need to filter _chk_p_id_roots by the
337
chk_p_id_roots = [key for key in self._chk_p_id_roots
338
if key in remaining_keys]
339
del self._chk_p_id_roots
340
for stream in _get_referenced_stream(chk_p_id_roots, False):
343
trace.mutter('There were %d keys in the chk index, %d of which'
344
' were not referenced', total_keys,
346
if self.revision_ids is None:
347
stream = source_vf.get_record_stream(remaining_keys,
351
def _build_vf(self, index_name, parents, delta, for_write=False):
352
"""Build a VersionedFiles instance on top of this group of packs."""
353
index_name = index_name + '_index'
355
access = knit._DirectPackAccess(index_to_pack)
358
if self.new_pack is None:
359
raise AssertionError('No new pack has been set')
360
index = getattr(self.new_pack, index_name)
361
index_to_pack[index] = self.new_pack.access_tuple()
362
index.set_optimize(for_size=True)
363
access.set_writer(self.new_pack._writer, index,
364
self.new_pack.access_tuple())
365
add_callback = index.add_nodes
368
for pack in self.packs:
369
sub_index = getattr(pack, index_name)
370
index_to_pack[sub_index] = pack.access_tuple()
371
indices.append(sub_index)
372
index = _mod_index.CombinedGraphIndex(indices)
374
vf = GroupCompressVersionedFiles(
376
add_callback=add_callback,
378
is_locked=self._pack_collection.repo.is_locked),
383
def _build_vfs(self, index_name, parents, delta):
384
"""Build the source and target VersionedFiles."""
385
source_vf = self._build_vf(index_name, parents,
386
delta, for_write=False)
387
target_vf = self._build_vf(index_name, parents,
388
delta, for_write=True)
389
return source_vf, target_vf
391
def _copy_stream(self, source_vf, target_vf, keys, message, vf_to_stream,
393
trace.mutter('repacking %d %s', len(keys), message)
394
self.pb.update('repacking %s' % (message,), pb_offset)
395
child_pb = ui.ui_factory.nested_progress_bar()
397
stream = vf_to_stream(source_vf, keys, message, child_pb)
398
for _ in target_vf._insert_record_stream(stream,
405
def _copy_revision_texts(self):
406
source_vf, target_vf = self._build_vfs('revision', True, False)
407
if not self.revision_keys:
408
# We are doing a full fetch, aka 'pack'
409
self.revision_keys = source_vf.keys()
410
self._copy_stream(source_vf, target_vf, self.revision_keys,
411
'revisions', self._get_progress_stream, 1)
413
def _copy_inventory_texts(self):
414
source_vf, target_vf = self._build_vfs('inventory', True, True)
415
self._copy_stream(source_vf, target_vf, self.revision_keys,
416
'inventories', self._get_filtered_inv_stream, 2)
418
def _copy_chk_texts(self):
419
source_vf, target_vf = self._build_vfs('chk', False, False)
420
# TODO: This is technically spurious... if it is a performance issue,
422
total_keys = source_vf.keys()
423
trace.mutter('repacking chk: %d id_to_entry roots,'
424
' %d p_id_map roots, %d total keys',
425
len(self._chk_id_roots), len(self._chk_p_id_roots),
427
self.pb.update('repacking chk', 3)
428
child_pb = ui.ui_factory.nested_progress_bar()
430
for stream in self._get_chk_streams(source_vf, total_keys,
432
for _ in target_vf._insert_record_stream(stream,
439
def _copy_text_texts(self):
440
source_vf, target_vf = self._build_vfs('text', True, True)
441
# XXX: We don't walk the chk map to determine referenced (file_id,
442
# revision_id) keys. We don't do it yet because you really need
443
# to filter out the ones that are present in the parents of the
444
# rev just before the ones you are copying, otherwise the filter
445
# is grabbing too many keys...
446
text_keys = source_vf.keys()
447
self._copy_stream(source_vf, target_vf, text_keys,
448
'texts', self._get_progress_stream, 4)
450
def _copy_signature_texts(self):
451
source_vf, target_vf = self._build_vfs('signature', False, False)
452
signature_keys = source_vf.keys()
453
signature_keys.intersection(self.revision_keys)
454
self._copy_stream(source_vf, target_vf, signature_keys,
455
'signatures', self._get_progress_stream, 5)
457
def _create_pack_from_packs(self):
458
self.pb.update('repacking', 0, 7)
459
self.new_pack = self.open_pack()
460
# Is this necessary for GC ?
461
self.new_pack.set_write_cache_size(1024*1024)
462
self._copy_revision_texts()
463
self._copy_inventory_texts()
464
self._copy_chk_texts()
465
self._copy_text_texts()
466
self._copy_signature_texts()
467
self.new_pack._check_references()
468
if not self._use_pack(self.new_pack):
469
self.new_pack.abort()
471
self.new_pack.finish_content()
472
if len(self.packs) == 1:
473
old_pack = self.packs[0]
474
if old_pack.name == self.new_pack._hash.hexdigest():
475
# The single old pack was already optimally packed.
476
trace.mutter('single pack %s was already optimally packed',
478
self.new_pack.abort()
480
self.pb.update('finishing repack', 6, 7)
481
self.new_pack.finish()
482
self._pack_collection.allocate(self.new_pack)
486
class GCCHKReconcilePacker(GCCHKPacker):
487
"""A packer which regenerates indices etc as it copies.
489
This is used by ``bzr reconcile`` to cause parent text pointers to be
493
def __init__(self, *args, **kwargs):
494
super(GCCHKReconcilePacker, self).__init__(*args, **kwargs)
495
self._data_changed = False
496
self._gather_text_refs = True
498
def _copy_inventory_texts(self):
499
source_vf, target_vf = self._build_vfs('inventory', True, True)
500
self._copy_stream(source_vf, target_vf, self.revision_keys,
501
'inventories', self._get_filtered_inv_stream, 2)
502
if source_vf.keys() != self.revision_keys:
503
self._data_changed = True
505
def _copy_text_texts(self):
506
"""generate what texts we should have and then copy."""
507
source_vf, target_vf = self._build_vfs('text', True, True)
508
trace.mutter('repacking %d texts', len(self._text_refs))
509
self.pb.update("repacking texts", 4)
510
# we have three major tasks here:
511
# 1) generate the ideal index
512
repo = self._pack_collection.repo
513
# We want the one we just wrote, so base it on self.new_pack
514
revision_vf = self._build_vf('revision', True, False, for_write=True)
515
ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
516
# Strip keys back into revision_ids.
517
ancestors = dict((k[0], tuple([p[0] for p in parents]))
518
for k, parents in ancestor_keys.iteritems())
520
# TODO: _generate_text_key_index should be much cheaper to generate from
521
# a chk repository, rather than the current implementation
522
ideal_index = repo._generate_text_key_index(None, ancestors)
523
file_id_parent_map = source_vf.get_parent_map(self._text_refs)
524
# 2) generate a keys list that contains all the entries that can
525
# be used as-is, with corrected parents.
527
new_parent_keys = {} # (key, parent_keys)
529
NULL_REVISION = _mod_revision.NULL_REVISION
530
for key in self._text_refs:
536
ideal_parents = tuple(ideal_index[key])
538
discarded_keys.append(key)
539
self._data_changed = True
541
if ideal_parents == (NULL_REVISION,):
543
source_parents = file_id_parent_map[key]
544
if ideal_parents == source_parents:
548
# We need to change the parent graph, but we don't need to
549
# re-insert the text (since we don't pun the compression
550
# parent with the parents list)
551
self._data_changed = True
552
new_parent_keys[key] = ideal_parents
553
# we're finished with some data.
555
del file_id_parent_map
556
# 3) bulk copy the data, updating records than need it
557
def _update_parents_for_texts():
558
stream = source_vf.get_record_stream(self._text_refs,
559
'groupcompress', False)
560
for record in stream:
561
if record.key in new_parent_keys:
562
record.parents = new_parent_keys[record.key]
564
target_vf.insert_record_stream(_update_parents_for_texts())
566
def _use_pack(self, new_pack):
567
"""Override _use_pack to check for reconcile having changed content."""
568
return new_pack.data_inserted() and self._data_changed
571
class GCRepositoryPackCollection(RepositoryPackCollection):
573
pack_factory = GCPack
574
resumed_pack_factory = ResumedGCPack
576
def _execute_pack_operations(self, pack_operations,
577
_packer_class=GCCHKPacker,
579
"""Execute a series of pack operations.
581
:param pack_operations: A list of [revision_count, packs_to_combine].
582
:param _packer_class: The class of packer to use (default: Packer).
585
# XXX: Copied across from RepositoryPackCollection simply because we
586
# want to override the _packer_class ... :(
587
for revision_count, packs in pack_operations:
588
# we may have no-ops from the setup logic
591
packer = GCCHKPacker(self, packs, '.autopack',
592
reload_func=reload_func)
595
except errors.RetryWithNewPacks:
596
# An exception is propagating out of this context, make sure
597
# this packer has cleaned up. Packer() doesn't set its new_pack
598
# state into the RepositoryPackCollection object, so we only
599
# have access to it directly here.
600
if packer.new_pack is not None:
601
packer.new_pack.abort()
604
self._remove_pack_from_memory(pack)
605
# record the newly available packs and stop advertising the old
607
self._save_pack_names(clear_obsolete_packs=True)
608
# Move the old packs out of the way now they are no longer referenced.
609
for revision_count, packs in pack_operations:
610
self._obsolete_packs(packs)
613
class CHKInventoryRepository(KnitPackRepository):
614
"""subclass of KnitPackRepository that uses CHK based inventories."""
616
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
618
"""Overridden to change pack collection class."""
619
KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
620
_commit_builder_class, _serializer)
621
# and now replace everything it did :)
622
index_transport = self._transport.clone('indices')
623
self._pack_collection = GCRepositoryPackCollection(self,
624
self._transport, index_transport,
625
self._transport.clone('upload'),
626
self._transport.clone('packs'),
627
_format.index_builder_class,
629
use_chk_index=self._format.supports_chks,
631
self.inventories = GroupCompressVersionedFiles(
632
_GCGraphIndex(self._pack_collection.inventory_index.combined_index,
633
add_callback=self._pack_collection.inventory_index.add_callback,
634
parents=True, is_locked=self.is_locked,
635
inconsistency_fatal=False),
636
access=self._pack_collection.inventory_index.data_access)
637
self.revisions = GroupCompressVersionedFiles(
638
_GCGraphIndex(self._pack_collection.revision_index.combined_index,
639
add_callback=self._pack_collection.revision_index.add_callback,
640
parents=True, is_locked=self.is_locked,
641
track_external_parent_refs=True),
642
access=self._pack_collection.revision_index.data_access,
644
self.signatures = GroupCompressVersionedFiles(
645
_GCGraphIndex(self._pack_collection.signature_index.combined_index,
646
add_callback=self._pack_collection.signature_index.add_callback,
647
parents=False, is_locked=self.is_locked,
648
inconsistency_fatal=False),
649
access=self._pack_collection.signature_index.data_access,
651
self.texts = GroupCompressVersionedFiles(
652
_GCGraphIndex(self._pack_collection.text_index.combined_index,
653
add_callback=self._pack_collection.text_index.add_callback,
654
parents=True, is_locked=self.is_locked,
655
inconsistency_fatal=False),
656
access=self._pack_collection.text_index.data_access)
657
# No parents, individual CHK pages don't have specific ancestry
658
self.chk_bytes = GroupCompressVersionedFiles(
659
_GCGraphIndex(self._pack_collection.chk_index.combined_index,
660
add_callback=self._pack_collection.chk_index.add_callback,
661
parents=False, is_locked=self.is_locked,
662
inconsistency_fatal=False),
663
access=self._pack_collection.chk_index.data_access)
664
search_key_name = self._format._serializer.search_key_name
665
search_key_func = chk_map.search_key_registry.get(search_key_name)
666
self.chk_bytes._search_key_func = search_key_func
667
# True when the repository object is 'write locked' (as opposed to the
668
# physical lock only taken out around changes to the pack-names list.)
669
# Another way to represent this would be a decorator around the control
670
# files object that presents logical locks as physical ones - if this
671
# gets ugly consider that alternative design. RBC 20071011
672
self._write_lock_count = 0
673
self._transaction = None
675
self._reconcile_does_inventory_gc = True
676
self._reconcile_fixes_text_parents = True
677
self._reconcile_backsup_inventory = False
679
def _add_inventory_checked(self, revision_id, inv, parents):
680
"""Add inv to the repository after checking the inputs.
682
This function can be overridden to allow different inventory styles.
684
:seealso: add_inventory, for the contract.
687
serializer = self._format._serializer
688
result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
689
maximum_size=serializer.maximum_size,
690
search_key_name=serializer.search_key_name)
691
inv_lines = result.to_lines()
692
return self._inventory_add_lines(revision_id, parents,
693
inv_lines, check_content=False)
695
def _create_inv_from_null(self, delta, revision_id):
696
"""This will mutate new_inv directly.
698
This is a simplified form of create_by_apply_delta which knows that all
699
the old values must be None, so everything is a create.
701
serializer = self._format._serializer
702
new_inv = inventory.CHKInventory(serializer.search_key_name)
703
new_inv.revision_id = revision_id
704
entry_to_bytes = new_inv._entry_to_bytes
705
id_to_entry_dict = {}
706
parent_id_basename_dict = {}
707
for old_path, new_path, file_id, entry in delta:
708
if old_path is not None:
709
raise ValueError('Invalid delta, somebody tried to delete %r'
710
' from the NULL_REVISION'
711
% ((old_path, file_id),))
713
raise ValueError('Invalid delta, delta from NULL_REVISION has'
714
' no new_path %r' % (file_id,))
716
new_inv.root_id = file_id
717
parent_id_basename_key = ('', '')
719
utf8_entry_name = entry.name.encode('utf-8')
720
parent_id_basename_key = (entry.parent_id, utf8_entry_name)
721
new_value = entry_to_bytes(entry)
723
# new_inv._path_to_fileid_cache[new_path] = file_id
724
id_to_entry_dict[(file_id,)] = new_value
725
parent_id_basename_dict[parent_id_basename_key] = file_id
727
new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
728
parent_id_basename_dict, maximum_size=serializer.maximum_size)
731
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
732
parents, basis_inv=None, propagate_caches=False):
733
"""Add a new inventory expressed as a delta against another revision.
735
:param basis_revision_id: The inventory id the delta was created
737
:param delta: The inventory delta (see Inventory.apply_delta for
739
:param new_revision_id: The revision id that the inventory is being
741
:param parents: The revision ids of the parents that revision_id is
742
known to have and are in the repository already. These are supplied
743
for repositories that depend on the inventory graph for revision
744
graph access, as well as for those that pun ancestry with delta
746
:param basis_inv: The basis inventory if it is already known,
748
:param propagate_caches: If True, the caches for this inventory are
749
copied to and updated for the result if possible.
751
:returns: (validator, new_inv)
752
The validator(which is a sha1 digest, though what is sha'd is
753
repository format specific) of the serialized inventory, and the
756
if not self.is_in_write_group():
757
raise AssertionError("%r not in write group" % (self,))
758
_mod_revision.check_not_reserved_id(new_revision_id)
760
if basis_inv is None:
761
if basis_revision_id == _mod_revision.NULL_REVISION:
762
new_inv = self._create_inv_from_null(delta, new_revision_id)
763
inv_lines = new_inv.to_lines()
764
return self._inventory_add_lines(new_revision_id, parents,
765
inv_lines, check_content=False), new_inv
767
basis_tree = self.revision_tree(basis_revision_id)
768
basis_tree.lock_read()
769
basis_inv = basis_tree.inventory
771
result = basis_inv.create_by_apply_delta(delta, new_revision_id,
772
propagate_caches=propagate_caches)
773
inv_lines = result.to_lines()
774
return self._inventory_add_lines(new_revision_id, parents,
775
inv_lines, check_content=False), result
777
if basis_tree is not None:
780
def deserialise_inventory(self, revision_id, bytes):
781
return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
784
def _iter_inventories(self, revision_ids, ordering):
785
"""Iterate over many inventory objects."""
787
ordering = 'unordered'
788
keys = [(revision_id,) for revision_id in revision_ids]
789
stream = self.inventories.get_record_stream(keys, ordering, True)
791
for record in stream:
792
if record.storage_kind != 'absent':
793
texts[record.key] = record.get_bytes_as('fulltext')
795
raise errors.NoSuchRevision(self, record.key)
797
yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
799
def _iter_inventory_xmls(self, revision_ids, ordering):
800
# Without a native 'xml' inventory, this method doesn't make sense, so
801
# make it raise to trap naughty direct users.
802
raise NotImplementedError(self._iter_inventory_xmls)
804
def _find_present_inventory_keys(self, revision_keys):
805
parent_map = self.inventories.get_parent_map(revision_keys)
806
present_inventory_keys = set(k for k in parent_map)
807
return present_inventory_keys
809
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
810
"""Find the file ids and versions affected by revisions.
812
:param revisions: an iterable containing revision ids.
813
:param _inv_weave: The inventory weave from this repository or None.
814
If None, the inventory weave will be opened automatically.
815
:return: a dictionary mapping altered file-ids to an iterable of
816
revision_ids. Each altered file-ids has the exact revision_ids that
817
altered it listed explicitly.
819
rich_root = self.supports_rich_root()
820
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
821
file_id_revisions = {}
822
pb = ui.ui_factory.nested_progress_bar()
824
revision_keys = [(r,) for r in revision_ids]
825
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
826
# TODO: instead of using _find_present_inventory_keys, change the
827
# code paths to allow missing inventories to be tolerated.
828
# However, we only want to tolerate missing parent
829
# inventories, not missing inventories for revision_ids
830
present_parent_inv_keys = self._find_present_inventory_keys(
832
present_parent_inv_ids = set(
833
[k[-1] for k in present_parent_inv_keys])
834
uninteresting_root_keys = set()
835
interesting_root_keys = set()
836
inventories_to_read = set(revision_ids)
837
inventories_to_read.update(present_parent_inv_ids)
838
for inv in self.iter_inventories(inventories_to_read):
839
entry_chk_root_key = inv.id_to_entry.key()
840
if inv.revision_id in present_parent_inv_ids:
841
uninteresting_root_keys.add(entry_chk_root_key)
843
interesting_root_keys.add(entry_chk_root_key)
845
chk_bytes = self.chk_bytes
846
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
847
interesting_root_keys, uninteresting_root_keys,
849
for name, bytes in items:
850
(name_utf8, file_id, revision_id) = bytes_to_info(bytes)
851
if not rich_root and name_utf8 == '':
854
file_id_revisions[file_id].add(revision_id)
856
file_id_revisions[file_id] = set([revision_id])
859
return file_id_revisions
861
def find_text_key_references(self):
862
"""Find the text key references within the repository.
864
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
865
to whether they were referred to by the inventory of the
866
revision_id that they contain. The inventory texts from all present
867
revision ids are assessed to generate this report.
869
# XXX: Slow version but correct: rewrite as a series of delta
870
# examinations/direct tree traversal. Note that that will require care
871
# as a common node is reachable both from the inventory that added it,
872
# and others afterwards.
873
revision_keys = self.revisions.keys()
875
rich_roots = self.supports_rich_root()
876
pb = ui.ui_factory.nested_progress_bar()
878
all_revs = self.all_revision_ids()
879
total = len(all_revs)
880
for pos, inv in enumerate(self.iter_inventories(all_revs)):
881
pb.update("Finding text references", pos, total)
882
for _, entry in inv.iter_entries():
883
if not rich_roots and entry.file_id == inv.root_id:
885
key = (entry.file_id, entry.revision)
886
result.setdefault(key, False)
887
if entry.revision == inv.revision_id:
893
def _reconcile_pack(self, collection, packs, extension, revs, pb):
894
packer = GCCHKReconcilePacker(collection, packs, extension)
895
return packer.pack(pb)
897
def _get_source(self, to_format):
898
"""Return a source for streaming from this repository."""
899
if self._format._serializer == to_format._serializer:
900
# We must be exactly the same format, otherwise stuff like the chk
901
# page layout might be different.
902
# Actually, this test is just slightly looser than exact so that
903
# CHK2 <-> 2a transfers will work.
904
return GroupCHKStreamSource(self, to_format)
905
return super(CHKInventoryRepository, self)._get_source(to_format)
908
class GroupCHKStreamSource(KnitPackStreamSource):
909
"""Used when both the source and target repo are GroupCHK repos."""
911
def __init__(self, from_repository, to_format):
912
"""Create a StreamSource streaming from from_repository."""
913
super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
914
self._revision_keys = None
915
self._text_keys = None
916
self._text_fetch_order = 'groupcompress'
917
self._chk_id_roots = None
918
self._chk_p_id_roots = None
920
def _get_inventory_stream(self, inventory_keys, allow_absent=False):
921
"""Get a stream of inventory texts.
923
When this function returns, self._chk_id_roots and self._chk_p_id_roots
926
self._chk_id_roots = []
927
self._chk_p_id_roots = []
928
def _filtered_inv_stream():
930
p_id_roots_set = set()
931
source_vf = self.from_repository.inventories
932
stream = source_vf.get_record_stream(inventory_keys,
933
'groupcompress', True)
934
for record in stream:
935
if record.storage_kind == 'absent':
939
raise errors.NoSuchRevision(self, record.key)
940
bytes = record.get_bytes_as('fulltext')
941
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
943
key = chk_inv.id_to_entry.key()
944
if key not in id_roots_set:
945
self._chk_id_roots.append(key)
946
id_roots_set.add(key)
947
p_id_map = chk_inv.parent_id_basename_to_file_id
949
raise AssertionError('Parent id -> file_id map not set')
951
if key not in p_id_roots_set:
952
p_id_roots_set.add(key)
953
self._chk_p_id_roots.append(key)
955
# We have finished processing all of the inventory records, we
956
# don't need these sets anymore
958
p_id_roots_set.clear()
959
return ('inventories', _filtered_inv_stream())
961
def _get_filtered_chk_streams(self, excluded_revision_keys):
962
self._text_keys = set()
963
excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
964
if not excluded_revision_keys:
965
uninteresting_root_keys = set()
966
uninteresting_pid_root_keys = set()
968
# filter out any excluded revisions whose inventories are not
970
# TODO: Update Repository.iter_inventories() to add
971
# ignore_missing=True
972
present_keys = self.from_repository._find_present_inventory_keys(
973
excluded_revision_keys)
974
present_ids = [k[-1] for k in present_keys]
975
uninteresting_root_keys = set()
976
uninteresting_pid_root_keys = set()
977
for inv in self.from_repository.iter_inventories(present_ids):
978
uninteresting_root_keys.add(inv.id_to_entry.key())
979
uninteresting_pid_root_keys.add(
980
inv.parent_id_basename_to_file_id.key())
981
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
982
chk_bytes = self.from_repository.chk_bytes
983
def _filter_id_to_entry():
984
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
985
self._chk_id_roots, uninteresting_root_keys):
986
for name, bytes in items:
987
# Note: we don't care about name_utf8, because we are always
989
_, file_id, revision_id = bytes_to_info(bytes)
990
self._text_keys.add((file_id, revision_id))
991
if record is not None:
994
self._chk_id_roots = None
995
yield 'chk_bytes', _filter_id_to_entry()
996
def _get_parent_id_basename_to_file_id_pages():
997
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
998
self._chk_p_id_roots, uninteresting_pid_root_keys):
999
if record is not None:
1002
self._chk_p_id_roots = None
1003
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1005
def get_stream(self, search):
1006
revision_ids = search.get_keys()
1007
for stream_info in self._fetch_revision_texts(revision_ids):
1009
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1010
yield self._get_inventory_stream(self._revision_keys)
1011
# TODO: The keys to exclude might be part of the search recipe
1012
# For now, exclude all parents that are at the edge of ancestry, for
1013
# which we have inventories
1014
from_repo = self.from_repository
1015
parent_keys = from_repo._find_parent_keys_of_revisions(
1016
self._revision_keys)
1017
for stream_info in self._get_filtered_chk_streams(parent_keys):
1019
yield self._get_text_stream()
1021
def get_stream_for_missing_keys(self, missing_keys):
1022
# missing keys can only occur when we are byte copying and not
1023
# translating (because translation means we don't send
1024
# unreconstructable deltas ever).
1025
missing_inventory_keys = set()
1026
for key in missing_keys:
1027
if key[0] != 'inventories':
1028
raise AssertionError('The only missing keys we should'
1029
' be filling in are inventory keys, not %s'
1031
missing_inventory_keys.add(key[1:])
1032
if self._chk_id_roots or self._chk_p_id_roots:
1033
raise AssertionError('Cannot call get_stream_for_missing_keys'
1034
' untill all of get_stream() has been consumed.')
1035
# Yield the inventory stream, so we can find the chk stream
1036
# Some of the missing_keys will be missing because they are ghosts.
1037
# As such, we can ignore them. The Sink is required to verify there are
1038
# no unavailable texts when the ghost inventories are not filled in.
1039
yield self._get_inventory_stream(missing_inventory_keys,
1041
# We use the empty set for excluded_revision_keys, to make it clear
1042
# that we want to transmit all referenced chk pages.
1043
for stream_info in self._get_filtered_chk_streams(set()):
1047
class RepositoryFormatCHK1(RepositoryFormatPack):
1048
"""A hashed CHK+group compress pack repository."""
1050
repository_class = CHKInventoryRepository
1051
supports_external_lookups = True
1052
supports_chks = True
1053
# For right now, setting this to True gives us InterModel1And2 rather
1054
# than InterDifferingSerializer
1055
_commit_builder_class = PackRootCommitBuilder
1056
rich_root_data = True
1057
_serializer = chk_serializer.chk_serializer_255_bigpage
1058
_commit_inv_deltas = True
1059
# What index classes to use
1060
index_builder_class = BTreeBuilder
1061
index_class = BTreeGraphIndex
1062
# Note: We cannot unpack a delta that references a text we haven't
1063
# seen yet. There are 2 options, work in fulltexts, or require
1064
# topological sorting. Using fulltexts is more optimal for local
1065
# operations, because the source can be smart about extracting
1066
# multiple in-a-row (and sharing strings). Topological is better
1067
# for remote, because we access less data.
1068
_fetch_order = 'unordered'
1069
_fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1071
pack_compresses = True
1073
def _get_matching_bzrdir(self):
1074
return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1076
def _ignore_setting_bzrdir(self, format):
1079
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1081
def get_format_string(self):
1082
"""See RepositoryFormat.get_format_string()."""
1083
return ('Bazaar development format - group compression and chk inventory'
1084
' (needs bzr.dev from 1.14)\n')
1086
def get_format_description(self):
1087
"""See RepositoryFormat.get_format_description()."""
1088
return ("Development repository format - rich roots, group compression"
1089
" and chk inventories")
1091
def check_conversion_target(self, target_format):
1092
if not target_format.rich_root_data:
1093
raise errors.BadConversionTarget(
1094
'Does not support rich root data.', target_format)
1095
if (self.supports_tree_reference and
1096
not getattr(target_format, 'supports_tree_reference', False)):
1097
raise errors.BadConversionTarget(
1098
'Does not support nested trees', target_format)
1102
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1103
"""A CHK repository that uses the bencode revision serializer."""
1105
_serializer = chk_serializer.chk_bencode_serializer
1107
def _get_matching_bzrdir(self):
1108
return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1110
def _ignore_setting_bzrdir(self, format):
1113
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1115
def get_format_string(self):
1116
"""See RepositoryFormat.get_format_string()."""
1117
return ('Bazaar development format - chk repository with bencode '
1118
'revision serialization (needs bzr.dev from 1.16)\n')
1121
class RepositoryFormat2a(RepositoryFormatCHK2):
1122
"""A CHK repository that uses the bencode revision serializer.
1124
This is the same as RepositoryFormatCHK2 but with a public name.
1127
_serializer = chk_serializer.chk_bencode_serializer
1129
def _get_matching_bzrdir(self):
1130
return bzrdir.format_registry.make_bzrdir('2a')
1132
def _ignore_setting_bzrdir(self, format):
1135
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1137
def get_format_string(self):
1138
return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')