1
# Copyright (C) 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Repository formats using CHK inventories and groupcompress compression."""
33
revision as _mod_revision,
37
from bzrlib.btree_index import (
41
from bzrlib.groupcompress import (
43
GroupCompressVersionedFiles,
45
from bzrlib.repofmt.pack_repo import (
50
PackRootCommitBuilder,
51
RepositoryPackCollection,
58
class GCPack(NewPack):
60
def __init__(self, pack_collection, upload_suffix='', file_mode=None):
61
"""Create a NewPack instance.
63
:param pack_collection: A PackCollection into which this is being
65
:param upload_suffix: An optional suffix to be given to any temporary
66
files created during the pack creation. e.g '.autopack'
67
:param file_mode: An optional file mode to create the new files with.
69
# replaced from NewPack to:
70
# - change inventory reference list length to 1
71
# - change texts reference lists to 1
72
# TODO: patch this to be parameterised
74
# The relative locations of the packs are constrained, but all are
75
# passed in because the caller has them, so as to avoid object churn.
76
index_builder_class = pack_collection._index_builder_class
78
if pack_collection.chk_index is not None:
79
chk_index = index_builder_class(reference_lists=0)
83
# Revisions: parents list, no text compression.
84
index_builder_class(reference_lists=1),
85
# Inventory: We want to map compression only, but currently the
86
# knit code hasn't been updated enough to understand that, so we
87
# have a regular 2-list index giving parents and compression
89
index_builder_class(reference_lists=1),
90
# Texts: per file graph, for all fileids - so one reference list
91
# and two elements in the key tuple.
92
index_builder_class(reference_lists=1, key_elements=2),
93
# Signatures: Just blobs to store, no compression, no parents
95
index_builder_class(reference_lists=0),
96
# CHK based storage - just blobs, no compression or parents.
99
self._pack_collection = pack_collection
100
# When we make readonly indices, we need this.
101
self.index_class = pack_collection._index_class
102
# where should the new pack be opened
103
self.upload_transport = pack_collection._upload_transport
104
# where are indices written out to
105
self.index_transport = pack_collection._index_transport
106
# where is the pack renamed to when it is finished?
107
self.pack_transport = pack_collection._pack_transport
108
# What file mode to upload the pack and indices with.
109
self._file_mode = file_mode
110
# tracks the content written to the .pack file.
111
self._hash = osutils.md5()
112
# a four-tuple with the length in bytes of the indices, once the pack
113
# is finalised. (rev, inv, text, sigs)
114
self.index_sizes = None
115
# How much data to cache when writing packs. Note that this is not
116
# synchronised with reads, because it's not in the transport layer, so
117
# is not safe unless the client knows it won't be reading from the pack
119
self._cache_limit = 0
120
# the temporary pack file name.
121
self.random_name = osutils.rand_chars(20) + upload_suffix
122
# when was this pack started ?
123
self.start_time = time.time()
124
# open an output stream for the data added to the pack.
125
self.write_stream = self.upload_transport.open_write_stream(
126
self.random_name, mode=self._file_mode)
127
if 'pack' in debug.debug_flags:
128
trace.mutter('%s: create_pack: pack stream open: %s%s t+%6.3fs',
129
time.ctime(), self.upload_transport.base, self.random_name,
130
time.time() - self.start_time)
131
# A list of byte sequences to be written to the new pack, and the
132
# aggregate size of them. Stored as a list rather than separate
133
# variables so that the _write_data closure below can update them.
134
self._buffer = [[], 0]
135
# create a callable for adding data
137
# robertc says- this is a closure rather than a method on the object
138
# so that the variables are locals, and faster than accessing object
140
def _write_data(bytes, flush=False, _buffer=self._buffer,
141
_write=self.write_stream.write, _update=self._hash.update):
142
_buffer[0].append(bytes)
143
_buffer[1] += len(bytes)
145
if _buffer[1] > self._cache_limit or flush:
146
bytes = ''.join(_buffer[0])
150
# expose this on self, for the occasion when clients want to add data.
151
self._write_data = _write_data
152
# a pack writer object to serialise pack records.
153
self._writer = pack.ContainerWriter(self._write_data)
155
# what state is the pack in? (open, finished, aborted)
158
def _check_references(self):
159
"""Make sure our external references are present.
161
Packs are allowed to have deltas whose base is not in the pack, but it
162
must be present somewhere in this collection. It is not allowed to
163
have deltas based on a fallback repository.
164
(See <https://bugs.launchpad.net/bzr/+bug/288751>)
166
# Groupcompress packs don't have any external references, arguably CHK
167
# pages have external references, but we cannot 'cheaply' determine
168
# them without actually walking all of the chk pages.
171
class ResumedGCPack(ResumedPack):
173
def _check_references(self):
174
"""Make sure our external compression parents are present."""
175
# See GCPack._check_references for why this is empty
177
def _get_external_refs(self, index):
178
# GC repositories don't have compression parents external to a given
183
class GCCHKPacker(Packer):
184
"""This class understand what it takes to collect a GCCHK repo."""
186
def __init__(self, pack_collection, packs, suffix, revision_ids=None,
188
super(GCCHKPacker, self).__init__(pack_collection, packs, suffix,
189
revision_ids=revision_ids,
190
reload_func=reload_func)
191
self._pack_collection = pack_collection
192
# ATM, We only support this for GCCHK repositories
193
if pack_collection.chk_index is None:
194
raise AssertionError('pack_collection.chk_index should not be None')
195
self._gather_text_refs = False
196
self._chk_id_roots = []
197
self._chk_p_id_roots = []
198
self._text_refs = None
199
# set by .pack() if self.revision_ids is not None
200
self.revision_keys = None
202
def _get_progress_stream(self, source_vf, keys, message, pb):
204
substream = source_vf.get_record_stream(keys, 'groupcompress', True)
205
for idx, record in enumerate(substream):
207
pb.update(message, idx + 1, len(keys))
211
def _get_filtered_inv_stream(self, source_vf, keys, message, pb=None):
212
"""Filter the texts of inventories, to find the chk pages."""
213
total_keys = len(keys)
214
def _filtered_inv_stream():
216
p_id_roots_set = set()
217
stream = source_vf.get_record_stream(keys, 'groupcompress', True)
218
for idx, record in enumerate(stream):
219
# Inventories should always be with revisions; assume success.
220
bytes = record.get_bytes_as('fulltext')
221
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
224
pb.update('inv', idx, total_keys)
225
key = chk_inv.id_to_entry.key()
226
if key not in id_roots_set:
227
self._chk_id_roots.append(key)
228
id_roots_set.add(key)
229
p_id_map = chk_inv.parent_id_basename_to_file_id
231
raise AssertionError('Parent id -> file_id map not set')
233
if key not in p_id_roots_set:
234
p_id_roots_set.add(key)
235
self._chk_p_id_roots.append(key)
237
# We have finished processing all of the inventory records, we
238
# don't need these sets anymore
240
p_id_roots_set.clear()
241
return _filtered_inv_stream()
243
def _get_chk_streams(self, source_vf, keys, pb=None):
244
# We want to stream the keys from 'id_roots', and things they
245
# reference, and then stream things from p_id_roots and things they
246
# reference, and then any remaining keys that we didn't get to.
248
# We also group referenced texts together, so if one root references a
249
# text with prefix 'a', and another root references a node with prefix
250
# 'a', we want to yield those nodes before we yield the nodes for 'b'
251
# This keeps 'similar' nodes together.
253
# Note: We probably actually want multiple streams here, to help the
254
# client understand that the different levels won't compress well
255
# against each other.
256
# Test the difference between using one Group per level, and
257
# using 1 Group per prefix. (so '' (root) would get a group, then
258
# all the references to search-key 'a' would get a group, etc.)
259
total_keys = len(keys)
260
remaining_keys = set(keys)
262
if self._gather_text_refs:
263
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
264
self._text_refs = set()
265
def _get_referenced_stream(root_keys, parse_leaf_nodes=False):
268
keys_by_search_prefix = {}
269
remaining_keys.difference_update(cur_keys)
271
def handle_internal_node(node):
272
for prefix, value in node._items.iteritems():
273
# We don't want to request the same key twice, and we
274
# want to order it by the first time it is seen.
275
# Even further, we don't want to request a key which is
276
# not in this group of pack files (it should be in the
277
# repo, but it doesn't have to be in the group being
279
# TODO: consider how to treat externally referenced chk
280
# pages as 'external_references' so that we
281
# always fill them in for stacked branches
282
if value not in next_keys and value in remaining_keys:
283
keys_by_search_prefix.setdefault(prefix,
286
def handle_leaf_node(node):
287
# Store is None, because we know we have a LeafNode, and we
288
# just want its entries
289
for file_id, bytes in node.iteritems(None):
290
name_utf8, file_id, revision_id = bytes_to_info(bytes)
291
self._text_refs.add((file_id, revision_id))
293
stream = source_vf.get_record_stream(cur_keys,
294
'as-requested', True)
295
for record in stream:
296
if record.storage_kind == 'absent':
297
# An absent CHK record: we assume that the missing
298
# record is in a different pack - e.g. a page not
299
# altered by the commit we're packing.
301
bytes = record.get_bytes_as('fulltext')
302
# We don't care about search_key_func for this code,
303
# because we only care about external references.
304
node = chk_map._deserialise(bytes, record.key,
305
search_key_func=None)
306
common_base = node._search_prefix
307
if isinstance(node, chk_map.InternalNode):
308
handle_internal_node(node)
309
elif parse_leaf_nodes:
310
handle_leaf_node(node)
313
pb.update('chk node', counter[0], total_keys)
316
# Double check that we won't be emitting any keys twice
317
# If we get rid of the pre-calculation of all keys, we could
318
# turn this around and do
319
# next_keys.difference_update(seen_keys)
320
# However, we also may have references to chk pages in another
321
# pack file during autopack. We filter earlier, so we should no
322
# longer need to do this
323
# next_keys = next_keys.intersection(remaining_keys)
325
for prefix in sorted(keys_by_search_prefix):
326
cur_keys.extend(keys_by_search_prefix.pop(prefix))
327
for stream in _get_referenced_stream(self._chk_id_roots,
328
self._gather_text_refs):
330
del self._chk_id_roots
331
# while it isn't really possible for chk_id_roots to not be in the
332
# local group of packs, it is possible that the tree shape has not
333
# changed recently, so we need to filter _chk_p_id_roots by the
335
chk_p_id_roots = [key for key in self._chk_p_id_roots
336
if key in remaining_keys]
337
del self._chk_p_id_roots
338
for stream in _get_referenced_stream(chk_p_id_roots, False):
341
trace.mutter('There were %d keys in the chk index, %d of which'
342
' were not referenced', total_keys,
344
if self.revision_ids is None:
345
stream = source_vf.get_record_stream(remaining_keys,
349
def _build_vf(self, index_name, parents, delta, for_write=False):
350
"""Build a VersionedFiles instance on top of this group of packs."""
351
index_name = index_name + '_index'
353
access = knit._DirectPackAccess(index_to_pack)
356
if self.new_pack is None:
357
raise AssertionError('No new pack has been set')
358
index = getattr(self.new_pack, index_name)
359
index_to_pack[index] = self.new_pack.access_tuple()
360
index.set_optimize(for_size=True)
361
access.set_writer(self.new_pack._writer, index,
362
self.new_pack.access_tuple())
363
add_callback = index.add_nodes
366
for pack in self.packs:
367
sub_index = getattr(pack, index_name)
368
index_to_pack[sub_index] = pack.access_tuple()
369
indices.append(sub_index)
370
index = _mod_index.CombinedGraphIndex(indices)
372
vf = GroupCompressVersionedFiles(
374
add_callback=add_callback,
376
is_locked=self._pack_collection.repo.is_locked),
381
def _build_vfs(self, index_name, parents, delta):
382
"""Build the source and target VersionedFiles."""
383
source_vf = self._build_vf(index_name, parents,
384
delta, for_write=False)
385
target_vf = self._build_vf(index_name, parents,
386
delta, for_write=True)
387
return source_vf, target_vf
389
def _copy_stream(self, source_vf, target_vf, keys, message, vf_to_stream,
391
trace.mutter('repacking %d %s', len(keys), message)
392
self.pb.update('repacking %s' % (message,), pb_offset)
393
child_pb = ui.ui_factory.nested_progress_bar()
395
stream = vf_to_stream(source_vf, keys, message, child_pb)
396
for _ in target_vf._insert_record_stream(stream,
403
def _copy_revision_texts(self):
404
source_vf, target_vf = self._build_vfs('revision', True, False)
405
if not self.revision_keys:
406
# We are doing a full fetch, aka 'pack'
407
self.revision_keys = source_vf.keys()
408
self._copy_stream(source_vf, target_vf, self.revision_keys,
409
'revisions', self._get_progress_stream, 1)
411
def _copy_inventory_texts(self):
412
source_vf, target_vf = self._build_vfs('inventory', True, True)
413
# It is not sufficient to just use self.revision_keys, as stacked
414
# repositories can have more inventories than they have revisions.
415
# One alternative would be to do something with
416
# get_parent_map(self.revision_keys), but that shouldn't be any faster
418
inventory_keys = source_vf.keys()
419
self._copy_stream(source_vf, target_vf, inventory_keys,
420
'inventories', self._get_filtered_inv_stream, 2)
422
def _copy_chk_texts(self):
423
source_vf, target_vf = self._build_vfs('chk', False, False)
424
# TODO: This is technically spurious... if it is a performance issue,
426
total_keys = source_vf.keys()
427
trace.mutter('repacking chk: %d id_to_entry roots,'
428
' %d p_id_map roots, %d total keys',
429
len(self._chk_id_roots), len(self._chk_p_id_roots),
431
self.pb.update('repacking chk', 3)
432
child_pb = ui.ui_factory.nested_progress_bar()
434
for stream in self._get_chk_streams(source_vf, total_keys,
436
for _ in target_vf._insert_record_stream(stream,
443
def _copy_text_texts(self):
444
source_vf, target_vf = self._build_vfs('text', True, True)
445
# XXX: We don't walk the chk map to determine referenced (file_id,
446
# revision_id) keys. We don't do it yet because you really need
447
# to filter out the ones that are present in the parents of the
448
# rev just before the ones you are copying, otherwise the filter
449
# is grabbing too many keys...
450
text_keys = source_vf.keys()
451
self._copy_stream(source_vf, target_vf, text_keys,
452
'texts', self._get_progress_stream, 4)
454
def _copy_signature_texts(self):
455
source_vf, target_vf = self._build_vfs('signature', False, False)
456
signature_keys = source_vf.keys()
457
signature_keys.intersection(self.revision_keys)
458
self._copy_stream(source_vf, target_vf, signature_keys,
459
'signatures', self._get_progress_stream, 5)
461
def _create_pack_from_packs(self):
462
self.pb.update('repacking', 0, 7)
463
self.new_pack = self.open_pack()
464
# Is this necessary for GC ?
465
self.new_pack.set_write_cache_size(1024*1024)
466
self._copy_revision_texts()
467
self._copy_inventory_texts()
468
self._copy_chk_texts()
469
self._copy_text_texts()
470
self._copy_signature_texts()
471
self.new_pack._check_references()
472
if not self._use_pack(self.new_pack):
473
self.new_pack.abort()
475
self.pb.update('finishing repack', 6, 7)
476
self.new_pack.finish()
477
self._pack_collection.allocate(self.new_pack)
481
class GCCHKReconcilePacker(GCCHKPacker):
482
"""A packer which regenerates indices etc as it copies.
484
This is used by ``bzr reconcile`` to cause parent text pointers to be
488
def __init__(self, *args, **kwargs):
489
super(GCCHKReconcilePacker, self).__init__(*args, **kwargs)
490
self._data_changed = False
491
self._gather_text_refs = True
493
def _copy_inventory_texts(self):
494
source_vf, target_vf = self._build_vfs('inventory', True, True)
495
self._copy_stream(source_vf, target_vf, self.revision_keys,
496
'inventories', self._get_filtered_inv_stream, 2)
497
if source_vf.keys() != self.revision_keys:
498
self._data_changed = True
500
def _copy_text_texts(self):
501
"""generate what texts we should have and then copy."""
502
source_vf, target_vf = self._build_vfs('text', True, True)
503
trace.mutter('repacking %d texts', len(self._text_refs))
504
self.pb.update("repacking texts", 4)
505
# we have three major tasks here:
506
# 1) generate the ideal index
507
repo = self._pack_collection.repo
508
# We want the one we just wrote, so base it on self.new_pack
509
revision_vf = self._build_vf('revision', True, False, for_write=True)
510
ancestor_keys = revision_vf.get_parent_map(revision_vf.keys())
511
# Strip keys back into revision_ids.
512
ancestors = dict((k[0], tuple([p[0] for p in parents]))
513
for k, parents in ancestor_keys.iteritems())
515
# TODO: _generate_text_key_index should be much cheaper to generate from
516
# a chk repository, rather than the current implementation
517
ideal_index = repo._generate_text_key_index(None, ancestors)
518
file_id_parent_map = source_vf.get_parent_map(self._text_refs)
519
# 2) generate a keys list that contains all the entries that can
520
# be used as-is, with corrected parents.
522
new_parent_keys = {} # (key, parent_keys)
524
NULL_REVISION = _mod_revision.NULL_REVISION
525
for key in self._text_refs:
531
ideal_parents = tuple(ideal_index[key])
533
discarded_keys.append(key)
534
self._data_changed = True
536
if ideal_parents == (NULL_REVISION,):
538
source_parents = file_id_parent_map[key]
539
if ideal_parents == source_parents:
543
# We need to change the parent graph, but we don't need to
544
# re-insert the text (since we don't pun the compression
545
# parent with the parents list)
546
self._data_changed = True
547
new_parent_keys[key] = ideal_parents
548
# we're finished with some data.
550
del file_id_parent_map
551
# 3) bulk copy the data, updating records than need it
552
def _update_parents_for_texts():
553
stream = source_vf.get_record_stream(self._text_refs,
554
'groupcompress', False)
555
for record in stream:
556
if record.key in new_parent_keys:
557
record.parents = new_parent_keys[record.key]
559
target_vf.insert_record_stream(_update_parents_for_texts())
561
def _use_pack(self, new_pack):
562
"""Override _use_pack to check for reconcile having changed content."""
563
return new_pack.data_inserted() and self._data_changed
566
class GCRepositoryPackCollection(RepositoryPackCollection):
568
pack_factory = GCPack
569
resumed_pack_factory = ResumedGCPack
571
def _execute_pack_operations(self, pack_operations,
572
_packer_class=GCCHKPacker,
574
"""Execute a series of pack operations.
576
:param pack_operations: A list of [revision_count, packs_to_combine].
577
:param _packer_class: The class of packer to use (default: Packer).
580
# XXX: Copied across from RepositoryPackCollection simply because we
581
# want to override the _packer_class ... :(
582
for revision_count, packs in pack_operations:
583
# we may have no-ops from the setup logic
586
packer = GCCHKPacker(self, packs, '.autopack',
587
reload_func=reload_func)
590
except errors.RetryWithNewPacks:
591
# An exception is propagating out of this context, make sure
592
# this packer has cleaned up. Packer() doesn't set its new_pack
593
# state into the RepositoryPackCollection object, so we only
594
# have access to it directly here.
595
if packer.new_pack is not None:
596
packer.new_pack.abort()
599
self._remove_pack_from_memory(pack)
600
# record the newly available packs and stop advertising the old
602
self._save_pack_names(clear_obsolete_packs=True)
603
# Move the old packs out of the way now they are no longer referenced.
604
for revision_count, packs in pack_operations:
605
self._obsolete_packs(packs)
608
class CHKInventoryRepository(KnitPackRepository):
609
"""subclass of KnitPackRepository that uses CHK based inventories."""
611
def __init__(self, _format, a_bzrdir, control_files, _commit_builder_class,
613
"""Overridden to change pack collection class."""
614
KnitPackRepository.__init__(self, _format, a_bzrdir, control_files,
615
_commit_builder_class, _serializer)
616
# and now replace everything it did :)
617
index_transport = self._transport.clone('indices')
618
self._pack_collection = GCRepositoryPackCollection(self,
619
self._transport, index_transport,
620
self._transport.clone('upload'),
621
self._transport.clone('packs'),
622
_format.index_builder_class,
624
use_chk_index=self._format.supports_chks,
626
self.inventories = GroupCompressVersionedFiles(
627
_GCGraphIndex(self._pack_collection.inventory_index.combined_index,
628
add_callback=self._pack_collection.inventory_index.add_callback,
629
parents=True, is_locked=self.is_locked,
630
inconsistency_fatal=False),
631
access=self._pack_collection.inventory_index.data_access)
632
self.revisions = GroupCompressVersionedFiles(
633
_GCGraphIndex(self._pack_collection.revision_index.combined_index,
634
add_callback=self._pack_collection.revision_index.add_callback,
635
parents=True, is_locked=self.is_locked,
636
track_external_parent_refs=True),
637
access=self._pack_collection.revision_index.data_access,
639
self.signatures = GroupCompressVersionedFiles(
640
_GCGraphIndex(self._pack_collection.signature_index.combined_index,
641
add_callback=self._pack_collection.signature_index.add_callback,
642
parents=False, is_locked=self.is_locked,
643
inconsistency_fatal=False),
644
access=self._pack_collection.signature_index.data_access,
646
self.texts = GroupCompressVersionedFiles(
647
_GCGraphIndex(self._pack_collection.text_index.combined_index,
648
add_callback=self._pack_collection.text_index.add_callback,
649
parents=True, is_locked=self.is_locked,
650
inconsistency_fatal=False),
651
access=self._pack_collection.text_index.data_access)
652
# No parents, individual CHK pages don't have specific ancestry
653
self.chk_bytes = GroupCompressVersionedFiles(
654
_GCGraphIndex(self._pack_collection.chk_index.combined_index,
655
add_callback=self._pack_collection.chk_index.add_callback,
656
parents=False, is_locked=self.is_locked,
657
inconsistency_fatal=False),
658
access=self._pack_collection.chk_index.data_access)
659
search_key_name = self._format._serializer.search_key_name
660
search_key_func = chk_map.search_key_registry.get(search_key_name)
661
self.chk_bytes._search_key_func = search_key_func
662
# True when the repository object is 'write locked' (as opposed to the
663
# physical lock only taken out around changes to the pack-names list.)
664
# Another way to represent this would be a decorator around the control
665
# files object that presents logical locks as physical ones - if this
666
# gets ugly consider that alternative design. RBC 20071011
667
self._write_lock_count = 0
668
self._transaction = None
670
self._reconcile_does_inventory_gc = True
671
self._reconcile_fixes_text_parents = True
672
self._reconcile_backsup_inventory = False
674
def _add_inventory_checked(self, revision_id, inv, parents):
675
"""Add inv to the repository after checking the inputs.
677
This function can be overridden to allow different inventory styles.
679
:seealso: add_inventory, for the contract.
682
serializer = self._format._serializer
683
result = inventory.CHKInventory.from_inventory(self.chk_bytes, inv,
684
maximum_size=serializer.maximum_size,
685
search_key_name=serializer.search_key_name)
686
inv_lines = result.to_lines()
687
return self._inventory_add_lines(revision_id, parents,
688
inv_lines, check_content=False)
690
def _create_inv_from_null(self, delta, revision_id):
691
"""This will mutate new_inv directly.
693
This is a simplified form of create_by_apply_delta which knows that all
694
the old values must be None, so everything is a create.
696
serializer = self._format._serializer
697
new_inv = inventory.CHKInventory(serializer.search_key_name)
698
new_inv.revision_id = revision_id
699
entry_to_bytes = new_inv._entry_to_bytes
700
id_to_entry_dict = {}
701
parent_id_basename_dict = {}
702
for old_path, new_path, file_id, entry in delta:
703
if old_path is not None:
704
raise ValueError('Invalid delta, somebody tried to delete %r'
705
' from the NULL_REVISION'
706
% ((old_path, file_id),))
708
raise ValueError('Invalid delta, delta from NULL_REVISION has'
709
' no new_path %r' % (file_id,))
711
new_inv.root_id = file_id
712
parent_id_basename_key = ('', '')
714
utf8_entry_name = entry.name.encode('utf-8')
715
parent_id_basename_key = (entry.parent_id, utf8_entry_name)
716
new_value = entry_to_bytes(entry)
718
# new_inv._path_to_fileid_cache[new_path] = file_id
719
id_to_entry_dict[(file_id,)] = new_value
720
parent_id_basename_dict[parent_id_basename_key] = file_id
722
new_inv._populate_from_dicts(self.chk_bytes, id_to_entry_dict,
723
parent_id_basename_dict, maximum_size=serializer.maximum_size)
726
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
727
parents, basis_inv=None, propagate_caches=False):
728
"""Add a new inventory expressed as a delta against another revision.
730
:param basis_revision_id: The inventory id the delta was created
732
:param delta: The inventory delta (see Inventory.apply_delta for
734
:param new_revision_id: The revision id that the inventory is being
736
:param parents: The revision ids of the parents that revision_id is
737
known to have and are in the repository already. These are supplied
738
for repositories that depend on the inventory graph for revision
739
graph access, as well as for those that pun ancestry with delta
741
:param basis_inv: The basis inventory if it is already known,
743
:param propagate_caches: If True, the caches for this inventory are
744
copied to and updated for the result if possible.
746
:returns: (validator, new_inv)
747
The validator(which is a sha1 digest, though what is sha'd is
748
repository format specific) of the serialized inventory, and the
751
if not self.is_in_write_group():
752
raise AssertionError("%r not in write group" % (self,))
753
_mod_revision.check_not_reserved_id(new_revision_id)
755
if basis_inv is None:
756
if basis_revision_id == _mod_revision.NULL_REVISION:
757
new_inv = self._create_inv_from_null(delta, new_revision_id)
758
inv_lines = new_inv.to_lines()
759
return self._inventory_add_lines(new_revision_id, parents,
760
inv_lines, check_content=False), new_inv
762
basis_tree = self.revision_tree(basis_revision_id)
763
basis_tree.lock_read()
764
basis_inv = basis_tree.inventory
766
result = basis_inv.create_by_apply_delta(delta, new_revision_id,
767
propagate_caches=propagate_caches)
768
inv_lines = result.to_lines()
769
return self._inventory_add_lines(new_revision_id, parents,
770
inv_lines, check_content=False), result
772
if basis_tree is not None:
775
def deserialise_inventory(self, revision_id, bytes):
776
return inventory.CHKInventory.deserialise(self.chk_bytes, bytes,
779
def _iter_inventories(self, revision_ids):
780
"""Iterate over many inventory objects."""
781
keys = [(revision_id,) for revision_id in revision_ids]
782
stream = self.inventories.get_record_stream(keys, 'unordered', True)
784
for record in stream:
785
if record.storage_kind != 'absent':
786
texts[record.key] = record.get_bytes_as('fulltext')
788
raise errors.NoSuchRevision(self, record.key)
790
yield inventory.CHKInventory.deserialise(self.chk_bytes, texts[key], key)
792
def _iter_inventory_xmls(self, revision_ids):
793
# Without a native 'xml' inventory, this method doesn't make sense, so
794
# make it raise to trap naughty direct users.
795
raise NotImplementedError(self._iter_inventory_xmls)
797
def _find_present_inventory_keys(self, revision_keys):
798
parent_map = self.inventories.get_parent_map(revision_keys)
799
present_inventory_keys = set(k for k in parent_map)
800
return present_inventory_keys
802
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
803
"""Find the file ids and versions affected by revisions.
805
:param revisions: an iterable containing revision ids.
806
:param _inv_weave: The inventory weave from this repository or None.
807
If None, the inventory weave will be opened automatically.
808
:return: a dictionary mapping altered file-ids to an iterable of
809
revision_ids. Each altered file-ids has the exact revision_ids that
810
altered it listed explicitly.
812
rich_root = self.supports_rich_root()
813
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
814
file_id_revisions = {}
815
pb = ui.ui_factory.nested_progress_bar()
817
revision_keys = [(r,) for r in revision_ids]
818
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
819
# TODO: instead of using _find_present_inventory_keys, change the
820
# code paths to allow missing inventories to be tolerated.
821
# However, we only want to tolerate missing parent
822
# inventories, not missing inventories for revision_ids
823
present_parent_inv_keys = self._find_present_inventory_keys(
825
present_parent_inv_ids = set(
826
[k[-1] for k in present_parent_inv_keys])
827
uninteresting_root_keys = set()
828
interesting_root_keys = set()
829
inventories_to_read = set(revision_ids)
830
inventories_to_read.update(present_parent_inv_ids)
831
for inv in self.iter_inventories(inventories_to_read):
832
entry_chk_root_key = inv.id_to_entry.key()
833
if inv.revision_id in present_parent_inv_ids:
834
uninteresting_root_keys.add(entry_chk_root_key)
836
interesting_root_keys.add(entry_chk_root_key)
838
chk_bytes = self.chk_bytes
839
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
840
interesting_root_keys, uninteresting_root_keys,
842
for name, bytes in items:
843
(name_utf8, file_id, revision_id) = bytes_to_info(bytes)
844
if not rich_root and name_utf8 == '':
847
file_id_revisions[file_id].add(revision_id)
849
file_id_revisions[file_id] = set([revision_id])
852
return file_id_revisions
854
def find_text_key_references(self):
855
"""Find the text key references within the repository.
857
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
858
to whether they were referred to by the inventory of the
859
revision_id that they contain. The inventory texts from all present
860
revision ids are assessed to generate this report.
862
# XXX: Slow version but correct: rewrite as a series of delta
863
# examinations/direct tree traversal. Note that that will require care
864
# as a common node is reachable both from the inventory that added it,
865
# and others afterwards.
866
revision_keys = self.revisions.keys()
868
rich_roots = self.supports_rich_root()
869
pb = ui.ui_factory.nested_progress_bar()
871
all_revs = self.all_revision_ids()
872
total = len(all_revs)
873
for pos, inv in enumerate(self.iter_inventories(all_revs)):
874
pb.update("Finding text references", pos, total)
875
for _, entry in inv.iter_entries():
876
if not rich_roots and entry.file_id == inv.root_id:
878
key = (entry.file_id, entry.revision)
879
result.setdefault(key, False)
880
if entry.revision == inv.revision_id:
886
def _reconcile_pack(self, collection, packs, extension, revs, pb):
887
packer = GCCHKReconcilePacker(collection, packs, extension)
888
return packer.pack(pb)
890
def _get_source(self, to_format):
891
"""Return a source for streaming from this repository."""
892
if isinstance(to_format, remote.RemoteRepositoryFormat):
893
# Can't just check attributes on to_format with the current code,
895
to_format._ensure_real()
896
to_format = to_format._custom_format
897
if to_format.__class__ is self._format.__class__:
898
# We must be exactly the same format, otherwise stuff like the chk
899
# page layout might be different
900
return GroupCHKStreamSource(self, to_format)
901
return super(CHKInventoryRepository, self)._get_source(to_format)
904
class GroupCHKStreamSource(KnitPackStreamSource):
905
"""Used when both the source and target repo are GroupCHK repos."""
907
def __init__(self, from_repository, to_format):
908
"""Create a StreamSource streaming from from_repository."""
909
super(GroupCHKStreamSource, self).__init__(from_repository, to_format)
910
self._revision_keys = None
911
self._text_keys = None
912
self._text_fetch_order = 'groupcompress'
913
self._chk_id_roots = None
914
self._chk_p_id_roots = None
916
def _get_inventory_stream(self, inventory_keys, allow_absent=False):
917
"""Get a stream of inventory texts.
919
When this function returns, self._chk_id_roots and self._chk_p_id_roots
922
self._chk_id_roots = []
923
self._chk_p_id_roots = []
924
def _filtered_inv_stream():
926
p_id_roots_set = set()
927
source_vf = self.from_repository.inventories
928
stream = source_vf.get_record_stream(inventory_keys,
929
'groupcompress', True)
930
for record in stream:
931
if record.storage_kind == 'absent':
935
raise errors.NoSuchRevision(self, record.key)
936
bytes = record.get_bytes_as('fulltext')
937
chk_inv = inventory.CHKInventory.deserialise(None, bytes,
939
key = chk_inv.id_to_entry.key()
940
if key not in id_roots_set:
941
self._chk_id_roots.append(key)
942
id_roots_set.add(key)
943
p_id_map = chk_inv.parent_id_basename_to_file_id
945
raise AssertionError('Parent id -> file_id map not set')
947
if key not in p_id_roots_set:
948
p_id_roots_set.add(key)
949
self._chk_p_id_roots.append(key)
951
# We have finished processing all of the inventory records, we
952
# don't need these sets anymore
954
p_id_roots_set.clear()
955
return ('inventories', _filtered_inv_stream())
957
def _get_filtered_chk_streams(self, excluded_revision_keys):
958
self._text_keys = set()
959
excluded_revision_keys.discard(_mod_revision.NULL_REVISION)
960
if not excluded_revision_keys:
961
uninteresting_root_keys = set()
962
uninteresting_pid_root_keys = set()
964
# filter out any excluded revisions whose inventories are not
966
# TODO: Update Repository.iter_inventories() to add
967
# ignore_missing=True
968
present_keys = self.from_repository._find_present_inventory_keys(
969
excluded_revision_keys)
970
present_ids = [k[-1] for k in present_keys]
971
uninteresting_root_keys = set()
972
uninteresting_pid_root_keys = set()
973
for inv in self.from_repository.iter_inventories(present_ids):
974
uninteresting_root_keys.add(inv.id_to_entry.key())
975
uninteresting_pid_root_keys.add(
976
inv.parent_id_basename_to_file_id.key())
977
bytes_to_info = inventory.CHKInventory._bytes_to_utf8name_key
978
chk_bytes = self.from_repository.chk_bytes
979
def _filter_id_to_entry():
980
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
981
self._chk_id_roots, uninteresting_root_keys):
982
for name, bytes in items:
983
# Note: we don't care about name_utf8, because we are always
985
_, file_id, revision_id = bytes_to_info(bytes)
986
self._text_keys.add((file_id, revision_id))
987
if record is not None:
990
self._chk_id_roots = None
991
yield 'chk_bytes', _filter_id_to_entry()
992
def _get_parent_id_basename_to_file_id_pages():
993
for record, items in chk_map.iter_interesting_nodes(chk_bytes,
994
self._chk_p_id_roots, uninteresting_pid_root_keys):
995
if record is not None:
998
self._chk_p_id_roots = None
999
yield 'chk_bytes', _get_parent_id_basename_to_file_id_pages()
1001
def get_stream(self, search):
1002
revision_ids = search.get_keys()
1003
for stream_info in self._fetch_revision_texts(revision_ids):
1005
self._revision_keys = [(rev_id,) for rev_id in revision_ids]
1006
yield self._get_inventory_stream(self._revision_keys)
1007
# TODO: The keys to exclude might be part of the search recipe
1008
# For now, exclude all parents that are at the edge of ancestry, for
1009
# which we have inventories
1010
from_repo = self.from_repository
1011
parent_keys = from_repo._find_parent_keys_of_revisions(
1012
self._revision_keys)
1013
for stream_info in self._get_filtered_chk_streams(parent_keys):
1015
yield self._get_text_stream()
1017
def get_stream_for_missing_keys(self, missing_keys):
1018
# missing keys can only occur when we are byte copying and not
1019
# translating (because translation means we don't send
1020
# unreconstructable deltas ever).
1021
missing_inventory_keys = set()
1022
for key in missing_keys:
1023
if key[0] != 'inventories':
1024
raise AssertionError('The only missing keys we should'
1025
' be filling in are inventory keys, not %s'
1027
missing_inventory_keys.add(key[1:])
1028
if self._chk_id_roots or self._chk_p_id_roots:
1029
raise AssertionError('Cannot call get_stream_for_missing_keys'
1030
' untill all of get_stream() has been consumed.')
1031
# Yield the inventory stream, so we can find the chk stream
1032
# Some of the missing_keys will be missing because they are ghosts.
1033
# As such, we can ignore them. The Sink is required to verify there are
1034
# no unavailable texts when the ghost inventories are not filled in.
1035
yield self._get_inventory_stream(missing_inventory_keys,
1037
# We use the empty set for excluded_revision_keys, to make it clear
1038
# that we want to transmit all referenced chk pages.
1039
for stream_info in self._get_filtered_chk_streams(set()):
1043
class RepositoryFormatCHK1(RepositoryFormatPack):
1044
"""A hashed CHK+group compress pack repository."""
1046
repository_class = CHKInventoryRepository
1047
supports_external_lookups = True
1048
supports_chks = True
1049
# For right now, setting this to True gives us InterModel1And2 rather
1050
# than InterDifferingSerializer
1051
_commit_builder_class = PackRootCommitBuilder
1052
rich_root_data = True
1053
_serializer = chk_serializer.chk_serializer_255_bigpage
1054
_commit_inv_deltas = True
1055
# What index classes to use
1056
index_builder_class = BTreeBuilder
1057
index_class = BTreeGraphIndex
1058
# Note: We cannot unpack a delta that references a text we haven't
1059
# seen yet. There are 2 options, work in fulltexts, or require
1060
# topological sorting. Using fulltexts is more optimal for local
1061
# operations, because the source can be smart about extracting
1062
# multiple in-a-row (and sharing strings). Topological is better
1063
# for remote, because we access less data.
1064
_fetch_order = 'unordered'
1065
_fetch_uses_deltas = False # essentially ignored by the groupcompress code.
1067
pack_compresses = True
1069
def _get_matching_bzrdir(self):
1070
return bzrdir.format_registry.make_bzrdir('development6-rich-root')
1072
def _ignore_setting_bzrdir(self, format):
1075
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1077
def get_format_string(self):
1078
"""See RepositoryFormat.get_format_string()."""
1079
return ('Bazaar development format - group compression and chk inventory'
1080
' (needs bzr.dev from 1.14)\n')
1082
def get_format_description(self):
1083
"""See RepositoryFormat.get_format_description()."""
1084
return ("Development repository format - rich roots, group compression"
1085
" and chk inventories")
1087
def check_conversion_target(self, target_format):
1088
if not target_format.rich_root_data:
1089
raise errors.BadConversionTarget(
1090
'Does not support rich root data.', target_format)
1091
if (self.supports_tree_reference and
1092
not getattr(target_format, 'supports_tree_reference', False)):
1093
raise errors.BadConversionTarget(
1094
'Does not support nested trees', target_format)
1098
class RepositoryFormatCHK2(RepositoryFormatCHK1):
1099
"""A CHK repository that uses the bencode revision serializer."""
1101
_serializer = chk_serializer.chk_bencode_serializer
1103
def _get_matching_bzrdir(self):
1104
return bzrdir.format_registry.make_bzrdir('development7-rich-root')
1106
def _ignore_setting_bzrdir(self, format):
1109
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1111
def get_format_string(self):
1112
"""See RepositoryFormat.get_format_string()."""
1113
return ('Bazaar development format - chk repository with bencode '
1114
'revision serialization (needs bzr.dev from 1.16)\n')
1117
class RepositoryFormat2a(RepositoryFormatCHK2):
1118
"""A CHK repository that uses the bencode revision serializer.
1120
This is the same as RepositoryFormatCHK2 but with a public name.
1123
_serializer = chk_serializer.chk_bencode_serializer
1125
def _get_matching_bzrdir(self):
1126
return bzrdir.format_registry.make_bzrdir('2a')
1128
def _ignore_setting_bzrdir(self, format):
1131
_matchingbzrdir = property(_get_matching_bzrdir, _ignore_setting_bzrdir)
1133
def get_format_string(self):
1134
return ('Bazaar repository format 2a (needs bzr 1.16 or later)\n')