1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
39
revision as _mod_revision,
45
from bzrlib.bundle import serializer
46
from bzrlib.revisiontree import RevisionTree
47
from bzrlib.store.versioned import VersionedFileStore
48
from bzrlib.testament import Testament
51
from bzrlib.decorators import needs_read_lock, needs_write_lock
52
from bzrlib.inter import InterObject
53
from bzrlib.inventory import (
59
from bzrlib import registry
60
from bzrlib.symbol_versioning import (
63
from bzrlib.trace import (
64
log_exception_quietly, note, mutter, mutter_callsite, warning)
67
# Old formats display a warning, but only once
68
_deprecation_warning_done = False
71
class CommitBuilder(object):
72
"""Provides an interface to build up a commit.
74
This allows describing a tree to be committed without needing to
75
know the internals of the format of the repository.
78
# all clients should supply tree roots.
79
record_root_entry = True
80
# the default CommitBuilder does not manage trees whose root is versioned.
81
_versioned_root = False
83
def __init__(self, repository, parents, config, timestamp=None,
84
timezone=None, committer=None, revprops=None,
86
"""Initiate a CommitBuilder.
88
:param repository: Repository to commit to.
89
:param parents: Revision ids of the parents of the new revision.
90
:param config: Configuration to use.
91
:param timestamp: Optional timestamp recorded for commit.
92
:param timezone: Optional timezone for timestamp.
93
:param committer: Optional committer to set for commit.
94
:param revprops: Optional dictionary of revision properties.
95
:param revision_id: Optional revision id.
100
self._committer = self._config.username()
102
self._committer = committer
104
self.new_inventory = Inventory(None)
105
self._new_revision_id = revision_id
106
self.parents = parents
107
self.repository = repository
110
if revprops is not None:
111
self._validate_revprops(revprops)
112
self._revprops.update(revprops)
114
if timestamp is None:
115
timestamp = time.time()
116
# Restrict resolution to 1ms
117
self._timestamp = round(timestamp, 3)
120
self._timezone = osutils.local_time_offset()
122
self._timezone = int(timezone)
124
self._generate_revision_if_needed()
125
self.__heads = graph.HeadsCache(repository.get_graph()).heads
126
self._basis_delta = []
127
# API compatibility, older code that used CommitBuilder did not call
128
# .record_delete(), which means the delta that is computed would not be
129
# valid. Callers that will call record_delete() should call
130
# .will_record_deletes() to indicate that.
131
self._recording_deletes = False
132
# memo'd check for no-op commits.
133
self._any_changes = False
135
def any_changes(self):
136
"""Return True if any entries were changed.
138
This includes merge-only changes. It is the core for the --unchanged
141
:return: True if any changes have occured.
143
return self._any_changes
145
def _validate_unicode_text(self, text, context):
146
"""Verify things like commit messages don't have bogus characters."""
148
raise ValueError('Invalid value for %s: %r' % (context, text))
150
def _validate_revprops(self, revprops):
151
for key, value in revprops.iteritems():
152
# We know that the XML serializers do not round trip '\r'
153
# correctly, so refuse to accept them
154
if not isinstance(value, basestring):
155
raise ValueError('revision property (%s) is not a valid'
156
' (unicode) string: %r' % (key, value))
157
self._validate_unicode_text(value,
158
'revision property (%s)' % (key,))
160
def commit(self, message):
161
"""Make the actual commit.
163
:return: The revision id of the recorded revision.
165
self._validate_unicode_text(message, 'commit message')
166
rev = _mod_revision.Revision(
167
timestamp=self._timestamp,
168
timezone=self._timezone,
169
committer=self._committer,
171
inventory_sha1=self.inv_sha1,
172
revision_id=self._new_revision_id,
173
properties=self._revprops)
174
rev.parent_ids = self.parents
175
self.repository.add_revision(self._new_revision_id, rev,
176
self.new_inventory, self._config)
177
self.repository.commit_write_group()
178
return self._new_revision_id
181
"""Abort the commit that is being built.
183
self.repository.abort_write_group()
185
def revision_tree(self):
186
"""Return the tree that was just committed.
188
After calling commit() this can be called to get a RevisionTree
189
representing the newly committed tree. This is preferred to
190
calling Repository.revision_tree() because that may require
191
deserializing the inventory, while we already have a copy in
194
if self.new_inventory is None:
195
self.new_inventory = self.repository.get_inventory(
196
self._new_revision_id)
197
return RevisionTree(self.repository, self.new_inventory,
198
self._new_revision_id)
200
def finish_inventory(self):
201
"""Tell the builder that the inventory is finished.
203
:return: The inventory id in the repository, which can be used with
204
repository.get_inventory.
206
if self.new_inventory is None:
207
# an inventory delta was accumulated without creating a new
209
basis_id = self.basis_delta_revision
210
self.inv_sha1 = self.repository.add_inventory_by_delta(
211
basis_id, self._basis_delta, self._new_revision_id,
214
if self.new_inventory.root is None:
215
raise AssertionError('Root entry should be supplied to'
216
' record_entry_contents, as of bzr 0.10.')
217
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
218
self.new_inventory.revision_id = self._new_revision_id
219
self.inv_sha1 = self.repository.add_inventory(
220
self._new_revision_id,
224
return self._new_revision_id
226
def _gen_revision_id(self):
227
"""Return new revision-id."""
228
return generate_ids.gen_revision_id(self._config.username(),
231
def _generate_revision_if_needed(self):
232
"""Create a revision id if None was supplied.
234
If the repository can not support user-specified revision ids
235
they should override this function and raise CannotSetRevisionId
236
if _new_revision_id is not None.
238
:raises: CannotSetRevisionId
240
if self._new_revision_id is None:
241
self._new_revision_id = self._gen_revision_id()
242
self.random_revid = True
244
self.random_revid = False
246
def _heads(self, file_id, revision_ids):
247
"""Calculate the graph heads for revision_ids in the graph of file_id.
249
This can use either a per-file graph or a global revision graph as we
250
have an identity relationship between the two graphs.
252
return self.__heads(revision_ids)
254
def _check_root(self, ie, parent_invs, tree):
255
"""Helper for record_entry_contents.
257
:param ie: An entry being added.
258
:param parent_invs: The inventories of the parent revisions of the
260
:param tree: The tree that is being committed.
262
# In this revision format, root entries have no knit or weave When
263
# serializing out to disk and back in root.revision is always
265
ie.revision = self._new_revision_id
267
def _require_root_change(self, tree):
268
"""Enforce an appropriate root object change.
270
This is called once when record_iter_changes is called, if and only if
271
the root was not in the delta calculated by record_iter_changes.
273
:param tree: The tree which is being committed.
275
# NB: if there are no parents then this method is not called, so no
276
# need to guard on parents having length.
277
entry = entry_factory['directory'](tree.path2id(''), '',
279
entry.revision = self._new_revision_id
280
self._basis_delta.append(('', '', entry.file_id, entry))
282
def _get_delta(self, ie, basis_inv, path):
283
"""Get a delta against the basis inventory for ie."""
284
if ie.file_id not in basis_inv:
286
result = (None, path, ie.file_id, ie)
287
self._basis_delta.append(result)
289
elif ie != basis_inv[ie.file_id]:
291
# TODO: avoid tis id2path call.
292
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
293
self._basis_delta.append(result)
299
def get_basis_delta(self):
300
"""Return the complete inventory delta versus the basis inventory.
302
This has been built up with the calls to record_delete and
303
record_entry_contents. The client must have already called
304
will_record_deletes() to indicate that they will be generating a
307
:return: An inventory delta, suitable for use with apply_delta, or
308
Repository.add_inventory_by_delta, etc.
310
if not self._recording_deletes:
311
raise AssertionError("recording deletes not activated.")
312
return self._basis_delta
314
def record_delete(self, path, file_id):
315
"""Record that a delete occured against a basis tree.
317
This is an optional API - when used it adds items to the basis_delta
318
being accumulated by the commit builder. It cannot be called unless the
319
method will_record_deletes() has been called to inform the builder that
320
a delta is being supplied.
322
:param path: The path of the thing deleted.
323
:param file_id: The file id that was deleted.
325
if not self._recording_deletes:
326
raise AssertionError("recording deletes not activated.")
327
delta = (path, None, file_id, None)
328
self._basis_delta.append(delta)
329
self._any_changes = True
332
def will_record_deletes(self):
333
"""Tell the commit builder that deletes are being notified.
335
This enables the accumulation of an inventory delta; for the resulting
336
commit to be valid, deletes against the basis MUST be recorded via
337
builder.record_delete().
339
self._recording_deletes = True
341
basis_id = self.parents[0]
343
basis_id = _mod_revision.NULL_REVISION
344
self.basis_delta_revision = basis_id
346
def record_entry_contents(self, ie, parent_invs, path, tree,
348
"""Record the content of ie from tree into the commit if needed.
350
Side effect: sets ie.revision when unchanged
352
:param ie: An inventory entry present in the commit.
353
:param parent_invs: The inventories of the parent revisions of the
355
:param path: The path the entry is at in the tree.
356
:param tree: The tree which contains this entry and should be used to
358
:param content_summary: Summary data from the tree about the paths
359
content - stat, length, exec, sha/link target. This is only
360
accessed when the entry has a revision of None - that is when it is
361
a candidate to commit.
362
:return: A tuple (change_delta, version_recorded, fs_hash).
363
change_delta is an inventory_delta change for this entry against
364
the basis tree of the commit, or None if no change occured against
366
version_recorded is True if a new version of the entry has been
367
recorded. For instance, committing a merge where a file was only
368
changed on the other side will return (delta, False).
369
fs_hash is either None, or the hash details for the path (currently
370
a tuple of the contents sha1 and the statvalue returned by
371
tree.get_file_with_stat()).
373
if self.new_inventory.root is None:
374
if ie.parent_id is not None:
375
raise errors.RootMissing()
376
self._check_root(ie, parent_invs, tree)
377
if ie.revision is None:
378
kind = content_summary[0]
380
# ie is carried over from a prior commit
382
# XXX: repository specific check for nested tree support goes here - if
383
# the repo doesn't want nested trees we skip it ?
384
if (kind == 'tree-reference' and
385
not self.repository._format.supports_tree_reference):
386
# mismatch between commit builder logic and repository:
387
# this needs the entry creation pushed down into the builder.
388
raise NotImplementedError('Missing repository subtree support.')
389
self.new_inventory.add(ie)
391
# TODO: slow, take it out of the inner loop.
393
basis_inv = parent_invs[0]
395
basis_inv = Inventory(root_id=None)
397
# ie.revision is always None if the InventoryEntry is considered
398
# for committing. We may record the previous parents revision if the
399
# content is actually unchanged against a sole head.
400
if ie.revision is not None:
401
if not self._versioned_root and path == '':
402
# repositories that do not version the root set the root's
403
# revision to the new commit even when no change occurs (more
404
# specifically, they do not record a revision on the root; and
405
# the rev id is assigned to the root during deserialisation -
406
# this masks when a change may have occurred against the basis.
407
# To match this we always issue a delta, because the revision
408
# of the root will always be changing.
409
if ie.file_id in basis_inv:
410
delta = (basis_inv.id2path(ie.file_id), path,
414
delta = (None, path, ie.file_id, ie)
415
self._basis_delta.append(delta)
416
return delta, False, None
418
# we don't need to commit this, because the caller already
419
# determined that an existing revision of this file is
420
# appropriate. If its not being considered for committing then
421
# it and all its parents to the root must be unaltered so
422
# no-change against the basis.
423
if ie.revision == self._new_revision_id:
424
raise AssertionError("Impossible situation, a skipped "
425
"inventory entry (%r) claims to be modified in this "
426
"commit (%r).", (ie, self._new_revision_id))
427
return None, False, None
428
# XXX: Friction: parent_candidates should return a list not a dict
429
# so that we don't have to walk the inventories again.
430
parent_candiate_entries = ie.parent_candidates(parent_invs)
431
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
433
for inv in parent_invs:
434
if ie.file_id in inv:
435
old_rev = inv[ie.file_id].revision
436
if old_rev in head_set:
437
heads.append(inv[ie.file_id].revision)
438
head_set.remove(inv[ie.file_id].revision)
441
# now we check to see if we need to write a new record to the
443
# We write a new entry unless there is one head to the ancestors, and
444
# the kind-derived content is unchanged.
446
# Cheapest check first: no ancestors, or more the one head in the
447
# ancestors, we write a new node.
451
# There is a single head, look it up for comparison
452
parent_entry = parent_candiate_entries[heads[0]]
453
# if the non-content specific data has changed, we'll be writing a
455
if (parent_entry.parent_id != ie.parent_id or
456
parent_entry.name != ie.name):
458
# now we need to do content specific checks:
460
# if the kind changed the content obviously has
461
if kind != parent_entry.kind:
463
# Stat cache fingerprint feedback for the caller - None as we usually
464
# don't generate one.
467
if content_summary[2] is None:
468
raise ValueError("Files must not have executable = None")
470
if (# if the file length changed we have to store:
471
parent_entry.text_size != content_summary[1] or
472
# if the exec bit has changed we have to store:
473
parent_entry.executable != content_summary[2]):
475
elif parent_entry.text_sha1 == content_summary[3]:
476
# all meta and content is unchanged (using a hash cache
477
# hit to check the sha)
478
ie.revision = parent_entry.revision
479
ie.text_size = parent_entry.text_size
480
ie.text_sha1 = parent_entry.text_sha1
481
ie.executable = parent_entry.executable
482
return self._get_delta(ie, basis_inv, path), False, None
484
# Either there is only a hash change(no hash cache entry,
485
# or same size content change), or there is no change on
487
# Provide the parent's hash to the store layer, so that the
488
# content is unchanged we will not store a new node.
489
nostore_sha = parent_entry.text_sha1
491
# We want to record a new node regardless of the presence or
492
# absence of a content change in the file.
494
ie.executable = content_summary[2]
495
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
497
lines = file_obj.readlines()
501
ie.text_sha1, ie.text_size = self._add_text_to_weave(
502
ie.file_id, lines, heads, nostore_sha)
503
# Let the caller know we generated a stat fingerprint.
504
fingerprint = (ie.text_sha1, stat_value)
505
except errors.ExistingContent:
506
# Turns out that the file content was unchanged, and we were
507
# only going to store a new node if it was changed. Carry over
509
ie.revision = parent_entry.revision
510
ie.text_size = parent_entry.text_size
511
ie.text_sha1 = parent_entry.text_sha1
512
ie.executable = parent_entry.executable
513
return self._get_delta(ie, basis_inv, path), False, None
514
elif kind == 'directory':
516
# all data is meta here, nothing specific to directory, so
518
ie.revision = parent_entry.revision
519
return self._get_delta(ie, basis_inv, path), False, None
521
self._add_text_to_weave(ie.file_id, lines, heads, None)
522
elif kind == 'symlink':
523
current_link_target = content_summary[3]
525
# symlink target is not generic metadata, check if it has
527
if current_link_target != parent_entry.symlink_target:
530
# unchanged, carry over.
531
ie.revision = parent_entry.revision
532
ie.symlink_target = parent_entry.symlink_target
533
return self._get_delta(ie, basis_inv, path), False, None
534
ie.symlink_target = current_link_target
536
self._add_text_to_weave(ie.file_id, lines, heads, None)
537
elif kind == 'tree-reference':
539
if content_summary[3] != parent_entry.reference_revision:
542
# unchanged, carry over.
543
ie.reference_revision = parent_entry.reference_revision
544
ie.revision = parent_entry.revision
545
return self._get_delta(ie, basis_inv, path), False, None
546
ie.reference_revision = content_summary[3]
548
self._add_text_to_weave(ie.file_id, lines, heads, None)
550
raise NotImplementedError('unknown kind')
551
ie.revision = self._new_revision_id
552
self._any_changes = True
553
return self._get_delta(ie, basis_inv, path), True, fingerprint
555
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
556
_entry_factory=entry_factory):
557
"""Record a new tree via iter_changes.
559
:param tree: The tree to obtain text contents from for changed objects.
560
:param basis_revision_id: The revision id of the tree the iter_changes
561
has been generated against. Currently assumed to be the same
562
as self.parents[0] - if it is not, errors may occur.
563
:param iter_changes: An iter_changes iterator with the changes to apply
564
to basis_revision_id. The iterator must not include any items with
565
a current kind of None - missing items must be either filtered out
566
or errored-on beefore record_iter_changes sees the item.
567
:param _entry_factory: Private method to bind entry_factory locally for
569
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
572
# Create an inventory delta based on deltas between all the parents and
573
# deltas between all the parent inventories. We use inventory delta's
574
# between the inventory objects because iter_changes masks
575
# last-changed-field only changes.
577
# file_id -> change map, change is fileid, paths, changed, versioneds,
578
# parents, names, kinds, executables
580
# {file_id -> revision_id -> inventory entry, for entries in parent
581
# trees that are not parents[0]
585
revtrees = list(self.repository.revision_trees(self.parents))
586
except errors.NoSuchRevision:
587
# one or more ghosts, slow path.
589
for revision_id in self.parents:
591
revtrees.append(self.repository.revision_tree(revision_id))
592
except errors.NoSuchRevision:
594
basis_revision_id = _mod_revision.NULL_REVISION
596
revtrees.append(self.repository.revision_tree(
597
_mod_revision.NULL_REVISION))
598
# The basis inventory from a repository
600
basis_inv = revtrees[0].inventory
602
basis_inv = self.repository.revision_tree(
603
_mod_revision.NULL_REVISION).inventory
604
if len(self.parents) > 0:
605
if basis_revision_id != self.parents[0] and not ghost_basis:
607
"arbitrary basis parents not yet supported with merges")
608
for revtree in revtrees[1:]:
609
for change in revtree.inventory._make_delta(basis_inv):
610
if change[1] is None:
611
# Not present in this parent.
613
if change[2] not in merged_ids:
614
if change[0] is not None:
615
basis_entry = basis_inv[change[2]]
616
merged_ids[change[2]] = [
618
basis_entry.revision,
621
parent_entries[change[2]] = {
623
basis_entry.revision:basis_entry,
625
change[3].revision:change[3],
628
merged_ids[change[2]] = [change[3].revision]
629
parent_entries[change[2]] = {change[3].revision:change[3]}
631
merged_ids[change[2]].append(change[3].revision)
632
parent_entries[change[2]][change[3].revision] = change[3]
635
# Setup the changes from the tree:
636
# changes maps file_id -> (change, [parent revision_ids])
638
for change in iter_changes:
639
# This probably looks up in basis_inv way to much.
640
if change[1][0] is not None:
641
head_candidate = [basis_inv[change[0]].revision]
644
changes[change[0]] = change, merged_ids.get(change[0],
646
unchanged_merged = set(merged_ids) - set(changes)
647
# Extend the changes dict with synthetic changes to record merges of
649
for file_id in unchanged_merged:
650
# Record a merged version of these items that did not change vs the
651
# basis. This can be either identical parallel changes, or a revert
652
# of a specific file after a merge. The recorded content will be
653
# that of the current tree (which is the same as the basis), but
654
# the per-file graph will reflect a merge.
655
# NB:XXX: We are reconstructing path information we had, this
656
# should be preserved instead.
657
# inv delta change: (file_id, (path_in_source, path_in_target),
658
# changed_content, versioned, parent, name, kind,
661
basis_entry = basis_inv[file_id]
662
except errors.NoSuchId:
663
# a change from basis->some_parents but file_id isn't in basis
664
# so was new in the merge, which means it must have changed
665
# from basis -> current, and as it hasn't the add was reverted
666
# by the user. So we discard this change.
670
(basis_inv.id2path(file_id), tree.id2path(file_id)),
672
(basis_entry.parent_id, basis_entry.parent_id),
673
(basis_entry.name, basis_entry.name),
674
(basis_entry.kind, basis_entry.kind),
675
(basis_entry.executable, basis_entry.executable))
676
changes[file_id] = (change, merged_ids[file_id])
677
# changes contains tuples with the change and a set of inventory
678
# candidates for the file.
680
# old_path, new_path, file_id, new_inventory_entry
681
seen_root = False # Is the root in the basis delta?
682
inv_delta = self._basis_delta
683
modified_rev = self._new_revision_id
684
for change, head_candidates in changes.values():
685
if change[3][1]: # versioned in target.
686
# Several things may be happening here:
687
# We may have a fork in the per-file graph
688
# - record a change with the content from tree
689
# We may have a change against < all trees
690
# - carry over the tree that hasn't changed
691
# We may have a change against all trees
692
# - record the change with the content from tree
695
entry = _entry_factory[kind](file_id, change[5][1],
697
head_set = self._heads(change[0], set(head_candidates))
700
for head_candidate in head_candidates:
701
if head_candidate in head_set:
702
heads.append(head_candidate)
703
head_set.remove(head_candidate)
706
# Could be a carry-over situation:
707
parent_entry_revs = parent_entries.get(file_id, None)
708
if parent_entry_revs:
709
parent_entry = parent_entry_revs.get(heads[0], None)
712
if parent_entry is None:
713
# The parent iter_changes was called against is the one
714
# that is the per-file head, so any change is relevant
715
# iter_changes is valid.
716
carry_over_possible = False
718
# could be a carry over situation
719
# A change against the basis may just indicate a merge,
720
# we need to check the content against the source of the
721
# merge to determine if it was changed after the merge
723
if (parent_entry.kind != entry.kind or
724
parent_entry.parent_id != entry.parent_id or
725
parent_entry.name != entry.name):
726
# Metadata common to all entries has changed
727
# against per-file parent
728
carry_over_possible = False
730
carry_over_possible = True
731
# per-type checks for changes against the parent_entry
734
# Cannot be a carry-over situation
735
carry_over_possible = False
736
# Populate the entry in the delta
738
# XXX: There is still a small race here: If someone reverts the content of a file
739
# after iter_changes examines and decides it has changed,
740
# we will unconditionally record a new version even if some
741
# other process reverts it while commit is running (with
742
# the revert happening after iter_changes did it's
745
entry.executable = True
747
entry.executable = False
748
if (carry_over_possible and
749
parent_entry.executable == entry.executable):
750
# Check the file length, content hash after reading
752
nostore_sha = parent_entry.text_sha1
755
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
757
lines = file_obj.readlines()
761
entry.text_sha1, entry.text_size = self._add_text_to_weave(
762
file_id, lines, heads, nostore_sha)
763
yield file_id, change[1][1], (entry.text_sha1, stat_value)
764
except errors.ExistingContent:
765
# No content change against a carry_over parent
766
# Perhaps this should also yield a fs hash update?
768
entry.text_size = parent_entry.text_size
769
entry.text_sha1 = parent_entry.text_sha1
770
elif kind == 'symlink':
772
entry.symlink_target = tree.get_symlink_target(file_id)
773
if (carry_over_possible and
774
parent_entry.symlink_target == entry.symlink_target):
777
self._add_text_to_weave(change[0], [], heads, None)
778
elif kind == 'directory':
779
if carry_over_possible:
782
# Nothing to set on the entry.
783
# XXX: split into the Root and nonRoot versions.
784
if change[1][1] != '' or self.repository.supports_rich_root():
785
self._add_text_to_weave(change[0], [], heads, None)
786
elif kind == 'tree-reference':
787
if not self.repository._format.supports_tree_reference:
788
# This isn't quite sane as an error, but we shouldn't
789
# ever see this code path in practice: tree's don't
790
# permit references when the repo doesn't support tree
792
raise errors.UnsupportedOperation(tree.add_reference,
794
entry.reference_revision = \
795
tree.get_reference_revision(change[0])
796
if (carry_over_possible and
797
parent_entry.reference_revision == reference_revision):
800
self._add_text_to_weave(change[0], [], heads, None)
802
raise AssertionError('unknown kind %r' % kind)
804
entry.revision = modified_rev
806
entry.revision = parent_entry.revision
809
new_path = change[1][1]
810
inv_delta.append((change[1][0], new_path, change[0], entry))
813
self.new_inventory = None
815
self._any_changes = True
817
# housekeeping root entry changes do not affect no-change commits.
818
self._require_root_change(tree)
819
self.basis_delta_revision = basis_revision_id
821
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
822
# Note: as we read the content directly from the tree, we know its not
823
# been turned into unicode or badly split - but a broken tree
824
# implementation could give us bad output from readlines() so this is
825
# not a guarantee of safety. What would be better is always checking
826
# the content during test suite execution. RBC 20070912
827
parent_keys = tuple((file_id, parent) for parent in parents)
828
return self.repository.texts.add_lines(
829
(file_id, self._new_revision_id), parent_keys, new_lines,
830
nostore_sha=nostore_sha, random_id=self.random_revid,
831
check_content=False)[0:2]
834
class RootCommitBuilder(CommitBuilder):
835
"""This commitbuilder actually records the root id"""
837
# the root entry gets versioned properly by this builder.
838
_versioned_root = True
840
def _check_root(self, ie, parent_invs, tree):
841
"""Helper for record_entry_contents.
843
:param ie: An entry being added.
844
:param parent_invs: The inventories of the parent revisions of the
846
:param tree: The tree that is being committed.
849
def _require_root_change(self, tree):
850
"""Enforce an appropriate root object change.
852
This is called once when record_iter_changes is called, if and only if
853
the root was not in the delta calculated by record_iter_changes.
855
:param tree: The tree which is being committed.
857
# versioned roots do not change unless the tree found a change.
860
######################################################################
863
class Repository(object):
864
"""Repository holding history for one or more branches.
866
The repository holds and retrieves historical information including
867
revisions and file history. It's normally accessed only by the Branch,
868
which views a particular line of development through that history.
870
The Repository builds on top of some byte storage facilies (the revisions,
871
signatures, inventories, texts and chk_bytes attributes) and a Transport,
872
which respectively provide byte storage and a means to access the (possibly
875
The byte storage facilities are addressed via tuples, which we refer to
876
as 'keys' throughout the code base. Revision_keys, inventory_keys and
877
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
878
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
879
byte string made up of a hash identifier and a hash value.
880
We use this interface because it allows low friction with the underlying
881
code that implements disk indices, network encoding and other parts of
884
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
885
the serialised revisions for the repository. This can be used to obtain
886
revision graph information or to access raw serialised revisions.
887
The result of trying to insert data into the repository via this store
888
is undefined: it should be considered read-only except for implementors
890
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
891
the serialised signatures for the repository. This can be used to
892
obtain access to raw serialised signatures. The result of trying to
893
insert data into the repository via this store is undefined: it should
894
be considered read-only except for implementors of repositories.
895
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
896
the serialised inventories for the repository. This can be used to
897
obtain unserialised inventories. The result of trying to insert data
898
into the repository via this store is undefined: it should be
899
considered read-only except for implementors of repositories.
900
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
901
texts of files and directories for the repository. This can be used to
902
obtain file texts or file graphs. Note that Repository.iter_file_bytes
903
is usually a better interface for accessing file texts.
904
The result of trying to insert data into the repository via this store
905
is undefined: it should be considered read-only except for implementors
907
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
908
any data the repository chooses to store or have indexed by its hash.
909
The result of trying to insert data into the repository via this store
910
is undefined: it should be considered read-only except for implementors
912
:ivar _transport: Transport for file access to repository, typically
913
pointing to .bzr/repository.
916
# What class to use for a CommitBuilder. Often its simpler to change this
917
# in a Repository class subclass rather than to override
918
# get_commit_builder.
919
_commit_builder_class = CommitBuilder
920
# The search regex used by xml based repositories to determine what things
921
# where changed in a single commit.
922
_file_ids_altered_regex = lazy_regex.lazy_compile(
923
r'file_id="(?P<file_id>[^"]+)"'
924
r'.* revision="(?P<revision_id>[^"]+)"'
927
def abort_write_group(self, suppress_errors=False):
928
"""Commit the contents accrued within the current write group.
930
:param suppress_errors: if true, abort_write_group will catch and log
931
unexpected errors that happen during the abort, rather than
932
allowing them to propagate. Defaults to False.
934
:seealso: start_write_group.
936
if self._write_group is not self.get_transaction():
937
# has an unlock or relock occured ?
938
raise errors.BzrError(
939
'mismatched lock context and write group. %r, %r' %
940
(self._write_group, self.get_transaction()))
942
self._abort_write_group()
943
except Exception, exc:
944
self._write_group = None
945
if not suppress_errors:
947
mutter('abort_write_group failed')
948
log_exception_quietly()
949
note('bzr: ERROR (ignored): %s', exc)
950
self._write_group = None
952
def _abort_write_group(self):
953
"""Template method for per-repository write group cleanup.
955
This is called during abort before the write group is considered to be
956
finished and should cleanup any internal state accrued during the write
957
group. There is no requirement that data handed to the repository be
958
*not* made available - this is not a rollback - but neither should any
959
attempt be made to ensure that data added is fully commited. Abort is
960
invoked when an error has occured so futher disk or network operations
961
may not be possible or may error and if possible should not be
965
def add_fallback_repository(self, repository):
966
"""Add a repository to use for looking up data not held locally.
968
:param repository: A repository.
970
if not self._format.supports_external_lookups:
971
raise errors.UnstackableRepositoryFormat(self._format, self.base)
972
self._check_fallback_repository(repository)
973
self._fallback_repositories.append(repository)
974
self.texts.add_fallback_versioned_files(repository.texts)
975
self.inventories.add_fallback_versioned_files(repository.inventories)
976
self.revisions.add_fallback_versioned_files(repository.revisions)
977
self.signatures.add_fallback_versioned_files(repository.signatures)
978
if self.chk_bytes is not None:
979
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
981
def _check_fallback_repository(self, repository):
982
"""Check that this repository can fallback to repository safely.
984
Raise an error if not.
986
:param repository: A repository to fallback to.
988
return InterRepository._assert_same_model(self, repository)
990
def add_inventory(self, revision_id, inv, parents):
991
"""Add the inventory inv to the repository as revision_id.
993
:param parents: The revision ids of the parents that revision_id
994
is known to have and are in the repository already.
996
:returns: The validator(which is a sha1 digest, though what is sha'd is
997
repository format specific) of the serialized inventory.
999
if not self.is_in_write_group():
1000
raise AssertionError("%r not in write group" % (self,))
1001
_mod_revision.check_not_reserved_id(revision_id)
1002
if not (inv.revision_id is None or inv.revision_id == revision_id):
1003
raise AssertionError(
1004
"Mismatch between inventory revision"
1005
" id and insertion revid (%r, %r)"
1006
% (inv.revision_id, revision_id))
1007
if inv.root is None:
1008
raise AssertionError()
1009
return self._add_inventory_checked(revision_id, inv, parents)
1011
def _add_inventory_checked(self, revision_id, inv, parents):
1012
"""Add inv to the repository after checking the inputs.
1014
This function can be overridden to allow different inventory styles.
1016
:seealso: add_inventory, for the contract.
1018
inv_lines = self._serialise_inventory_to_lines(inv)
1019
return self._inventory_add_lines(revision_id, parents,
1020
inv_lines, check_content=False)
1022
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1023
parents, basis_inv=None, propagate_caches=False):
1024
"""Add a new inventory expressed as a delta against another revision.
1026
:param basis_revision_id: The inventory id the delta was created
1027
against. (This does not have to be a direct parent.)
1028
:param delta: The inventory delta (see Inventory.apply_delta for
1030
:param new_revision_id: The revision id that the inventory is being
1032
:param parents: The revision ids of the parents that revision_id is
1033
known to have and are in the repository already. These are supplied
1034
for repositories that depend on the inventory graph for revision
1035
graph access, as well as for those that pun ancestry with delta
1037
:param basis_inv: The basis inventory if it is already known,
1039
:param propagate_caches: If True, the caches for this inventory are
1040
copied to and updated for the result if possible.
1042
:returns: (validator, new_inv)
1043
The validator(which is a sha1 digest, though what is sha'd is
1044
repository format specific) of the serialized inventory, and the
1045
resulting inventory.
1047
if not self.is_in_write_group():
1048
raise AssertionError("%r not in write group" % (self,))
1049
_mod_revision.check_not_reserved_id(new_revision_id)
1050
basis_tree = self.revision_tree(basis_revision_id)
1051
basis_tree.lock_read()
1053
# Note that this mutates the inventory of basis_tree, which not all
1054
# inventory implementations may support: A better idiom would be to
1055
# return a new inventory, but as there is no revision tree cache in
1056
# repository this is safe for now - RBC 20081013
1057
if basis_inv is None:
1058
basis_inv = basis_tree.inventory
1059
basis_inv.apply_delta(delta)
1060
basis_inv.revision_id = new_revision_id
1061
return (self.add_inventory(new_revision_id, basis_inv, parents),
1066
def _inventory_add_lines(self, revision_id, parents, lines,
1067
check_content=True):
1068
"""Store lines in inv_vf and return the sha1 of the inventory."""
1069
parents = [(parent,) for parent in parents]
1070
return self.inventories.add_lines((revision_id,), parents, lines,
1071
check_content=check_content)[0]
1073
def add_revision(self, revision_id, rev, inv=None, config=None):
1074
"""Add rev to the revision store as revision_id.
1076
:param revision_id: the revision id to use.
1077
:param rev: The revision object.
1078
:param inv: The inventory for the revision. if None, it will be looked
1079
up in the inventory storer
1080
:param config: If None no digital signature will be created.
1081
If supplied its signature_needed method will be used
1082
to determine if a signature should be made.
1084
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1086
_mod_revision.check_not_reserved_id(revision_id)
1087
if config is not None and config.signature_needed():
1089
inv = self.get_inventory(revision_id)
1090
plaintext = Testament(rev, inv).as_short_text()
1091
self.store_revision_signature(
1092
gpg.GPGStrategy(config), plaintext, revision_id)
1093
# check inventory present
1094
if not self.inventories.get_parent_map([(revision_id,)]):
1096
raise errors.WeaveRevisionNotPresent(revision_id,
1099
# yes, this is not suitable for adding with ghosts.
1100
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1103
key = (revision_id,)
1104
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1105
self._add_revision(rev)
1107
def _add_revision(self, revision):
1108
text = self._serializer.write_revision_to_string(revision)
1109
key = (revision.revision_id,)
1110
parents = tuple((parent,) for parent in revision.parent_ids)
1111
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1113
def all_revision_ids(self):
1114
"""Returns a list of all the revision ids in the repository.
1116
This is conceptually deprecated because code should generally work on
1117
the graph reachable from a particular revision, and ignore any other
1118
revisions that might be present. There is no direct replacement
1121
if 'evil' in debug.debug_flags:
1122
mutter_callsite(2, "all_revision_ids is linear with history.")
1123
return self._all_revision_ids()
1125
def _all_revision_ids(self):
1126
"""Returns a list of all the revision ids in the repository.
1128
These are in as much topological order as the underlying store can
1131
raise NotImplementedError(self._all_revision_ids)
1133
def break_lock(self):
1134
"""Break a lock if one is present from another instance.
1136
Uses the ui factory to ask for confirmation if the lock may be from
1139
self.control_files.break_lock()
1142
def _eliminate_revisions_not_present(self, revision_ids):
1143
"""Check every revision id in revision_ids to see if we have it.
1145
Returns a set of the present revisions.
1148
graph = self.get_graph()
1149
parent_map = graph.get_parent_map(revision_ids)
1150
# The old API returned a list, should this actually be a set?
1151
return parent_map.keys()
1154
def create(a_bzrdir):
1155
"""Construct the current default format repository in a_bzrdir."""
1156
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1158
def __init__(self, _format, a_bzrdir, control_files):
1159
"""instantiate a Repository.
1161
:param _format: The format of the repository on disk.
1162
:param a_bzrdir: The BzrDir of the repository.
1164
In the future we will have a single api for all stores for
1165
getting file texts, inventories and revisions, then
1166
this construct will accept instances of those things.
1168
super(Repository, self).__init__()
1169
self._format = _format
1170
# the following are part of the public API for Repository:
1171
self.bzrdir = a_bzrdir
1172
self.control_files = control_files
1173
self._transport = control_files._transport
1174
self.base = self._transport.base
1176
self._reconcile_does_inventory_gc = True
1177
self._reconcile_fixes_text_parents = False
1178
self._reconcile_backsup_inventory = True
1179
# not right yet - should be more semantically clear ?
1181
# TODO: make sure to construct the right store classes, etc, depending
1182
# on whether escaping is required.
1183
self._warn_if_deprecated()
1184
self._write_group = None
1185
# Additional places to query for data.
1186
self._fallback_repositories = []
1187
# An InventoryEntry cache, used during deserialization
1188
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1191
return '%s(%r)' % (self.__class__.__name__,
1194
def has_same_location(self, other):
1195
"""Returns a boolean indicating if this repository is at the same
1196
location as another repository.
1198
This might return False even when two repository objects are accessing
1199
the same physical repository via different URLs.
1201
if self.__class__ is not other.__class__:
1203
return (self._transport.base == other._transport.base)
1205
def is_in_write_group(self):
1206
"""Return True if there is an open write group.
1208
:seealso: start_write_group.
1210
return self._write_group is not None
1212
def is_locked(self):
1213
return self.control_files.is_locked()
1215
def is_write_locked(self):
1216
"""Return True if this object is write locked."""
1217
return self.is_locked() and self.control_files._lock_mode == 'w'
1219
def lock_write(self, token=None):
1220
"""Lock this repository for writing.
1222
This causes caching within the repository obejct to start accumlating
1223
data during reads, and allows a 'write_group' to be obtained. Write
1224
groups must be used for actual data insertion.
1226
:param token: if this is already locked, then lock_write will fail
1227
unless the token matches the existing lock.
1228
:returns: a token if this instance supports tokens, otherwise None.
1229
:raises TokenLockingNotSupported: when a token is given but this
1230
instance doesn't support using token locks.
1231
:raises MismatchedToken: if the specified token doesn't match the token
1232
of the existing lock.
1233
:seealso: start_write_group.
1235
A token should be passed in if you know that you have locked the object
1236
some other way, and need to synchronise this object's state with that
1239
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1241
locked = self.is_locked()
1242
result = self.control_files.lock_write(token=token)
1243
for repo in self._fallback_repositories:
1244
# Writes don't affect fallback repos
1247
self._refresh_data()
1250
def lock_read(self):
1251
locked = self.is_locked()
1252
self.control_files.lock_read()
1253
for repo in self._fallback_repositories:
1256
self._refresh_data()
1258
def get_physical_lock_status(self):
1259
return self.control_files.get_physical_lock_status()
1261
def leave_lock_in_place(self):
1262
"""Tell this repository not to release the physical lock when this
1265
If lock_write doesn't return a token, then this method is not supported.
1267
self.control_files.leave_in_place()
1269
def dont_leave_lock_in_place(self):
1270
"""Tell this repository to release the physical lock when this
1271
object is unlocked, even if it didn't originally acquire it.
1273
If lock_write doesn't return a token, then this method is not supported.
1275
self.control_files.dont_leave_in_place()
1278
def gather_stats(self, revid=None, committers=None):
1279
"""Gather statistics from a revision id.
1281
:param revid: The revision id to gather statistics from, if None, then
1282
no revision specific statistics are gathered.
1283
:param committers: Optional parameter controlling whether to grab
1284
a count of committers from the revision specific statistics.
1285
:return: A dictionary of statistics. Currently this contains:
1286
committers: The number of committers if requested.
1287
firstrev: A tuple with timestamp, timezone for the penultimate left
1288
most ancestor of revid, if revid is not the NULL_REVISION.
1289
latestrev: A tuple with timestamp, timezone for revid, if revid is
1290
not the NULL_REVISION.
1291
revisions: The total revision count in the repository.
1292
size: An estimate disk size of the repository in bytes.
1295
if revid and committers:
1296
result['committers'] = 0
1297
if revid and revid != _mod_revision.NULL_REVISION:
1299
all_committers = set()
1300
revisions = self.get_ancestry(revid)
1301
# pop the leading None
1303
first_revision = None
1305
# ignore the revisions in the middle - just grab first and last
1306
revisions = revisions[0], revisions[-1]
1307
for revision in self.get_revisions(revisions):
1308
if not first_revision:
1309
first_revision = revision
1311
all_committers.add(revision.committer)
1312
last_revision = revision
1314
result['committers'] = len(all_committers)
1315
result['firstrev'] = (first_revision.timestamp,
1316
first_revision.timezone)
1317
result['latestrev'] = (last_revision.timestamp,
1318
last_revision.timezone)
1320
# now gather global repository information
1321
# XXX: This is available for many repos regardless of listability.
1322
if self.bzrdir.root_transport.listable():
1323
# XXX: do we want to __define len__() ?
1324
# Maybe the versionedfiles object should provide a different
1325
# method to get the number of keys.
1326
result['revisions'] = len(self.revisions.keys())
1327
# result['size'] = t
1330
def find_branches(self, using=False):
1331
"""Find branches underneath this repository.
1333
This will include branches inside other branches.
1335
:param using: If True, list only branches using this repository.
1337
if using and not self.is_shared():
1339
return [self.bzrdir.open_branch()]
1340
except errors.NotBranchError:
1342
class Evaluator(object):
1345
self.first_call = True
1347
def __call__(self, bzrdir):
1348
# On the first call, the parameter is always the bzrdir
1349
# containing the current repo.
1350
if not self.first_call:
1352
repository = bzrdir.open_repository()
1353
except errors.NoRepositoryPresent:
1356
return False, (None, repository)
1357
self.first_call = False
1359
value = (bzrdir.open_branch(), None)
1360
except errors.NotBranchError:
1361
value = (None, None)
1365
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1366
self.bzrdir.root_transport, evaluate=Evaluator()):
1367
if branch is not None:
1368
branches.append(branch)
1369
if not using and repository is not None:
1370
branches.extend(repository.find_branches())
1374
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1375
"""Return the revision ids that other has that this does not.
1377
These are returned in topological order.
1379
revision_id: only return revision ids included by revision_id.
1381
return InterRepository.get(other, self).search_missing_revision_ids(
1382
revision_id, find_ghosts)
1386
"""Open the repository rooted at base.
1388
For instance, if the repository is at URL/.bzr/repository,
1389
Repository.open(URL) -> a Repository instance.
1391
control = bzrdir.BzrDir.open(base)
1392
return control.open_repository()
1394
def copy_content_into(self, destination, revision_id=None):
1395
"""Make a complete copy of the content in self into destination.
1397
This is a destructive operation! Do not use it on existing
1400
return InterRepository.get(self, destination).copy_content(revision_id)
1402
def commit_write_group(self):
1403
"""Commit the contents accrued within the current write group.
1405
:seealso: start_write_group.
1407
if self._write_group is not self.get_transaction():
1408
# has an unlock or relock occured ?
1409
raise errors.BzrError('mismatched lock context %r and '
1411
(self.get_transaction(), self._write_group))
1412
self._commit_write_group()
1413
self._write_group = None
1415
def _commit_write_group(self):
1416
"""Template method for per-repository write group cleanup.
1418
This is called before the write group is considered to be
1419
finished and should ensure that all data handed to the repository
1420
for writing during the write group is safely committed (to the
1421
extent possible considering file system caching etc).
1424
def suspend_write_group(self):
1425
raise errors.UnsuspendableWriteGroup(self)
1427
def get_missing_parent_inventories(self):
1428
"""Return the keys of missing inventory parents for revisions added in
1431
A revision is not complete if the inventory delta for that revision
1432
cannot be calculated. Therefore if the parent inventories of a
1433
revision are not present, the revision is incomplete, and e.g. cannot
1434
be streamed by a smart server. This method finds missing inventory
1435
parents for revisions added in this write group.
1437
if not self._format.supports_external_lookups:
1438
# This is only an issue for stacked repositories
1440
if not self.is_in_write_group():
1441
raise AssertionError('not in a write group')
1443
# XXX: We assume that every added revision already has its
1444
# corresponding inventory, so we only check for parent inventories that
1445
# might be missing, rather than all inventories.
1446
parents = set(self.revisions._index.get_missing_parents())
1447
parents.discard(_mod_revision.NULL_REVISION)
1448
unstacked_inventories = self.inventories._index
1449
present_inventories = unstacked_inventories.get_parent_map(
1450
key[-1:] for key in parents)
1451
key_deps = self.revisions._index._key_dependencies
1452
key_deps.add_keys(present_inventories)
1453
parents = set(self.revisions._index.get_missing_parents())
1454
# ok, now we have a list of missing inventories. But these only matter
1455
# if the inventories that reference them are missing some texts they
1456
# appear to introduce.
1457
mutter('parents: %r', parents)
1458
referrers = key_deps.get_referrers()
1459
mutter('referrers: %r', referrers)
1460
file_ids = self.fileids_altered_by_revision_ids(r[0] for r in referrers)
1461
mutter('file_ids altered by: %r', file_ids)
1462
missing_texts = set()
1463
for file_id, version_ids in file_ids.iteritems():
1464
missing_texts.update(
1465
(file_id, version_id) for version_id in version_ids)
1466
present_texts = self.texts.get_parent_map(missing_texts)
1467
missing_texts.difference_update(present_texts)
1468
mutter('missing texts: %r', file_ids)
1469
if not missing_texts:
1470
# no texts are missing, so all revisions and their deltas are
1473
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1476
def refresh_data(self):
1477
"""Re-read any data needed to to synchronise with disk.
1479
This method is intended to be called after another repository instance
1480
(such as one used by a smart server) has inserted data into the
1481
repository. It may not be called during a write group, but may be
1482
called at any other time.
1484
if self.is_in_write_group():
1485
raise errors.InternalBzrError(
1486
"May not refresh_data while in a write group.")
1487
self._refresh_data()
1489
def resume_write_group(self, tokens):
1490
if not self.is_write_locked():
1491
raise errors.NotWriteLocked(self)
1492
if self._write_group:
1493
raise errors.BzrError('already in a write group')
1494
self._resume_write_group(tokens)
1495
# so we can detect unlock/relock - the write group is now entered.
1496
self._write_group = self.get_transaction()
1498
def _resume_write_group(self, tokens):
1499
raise errors.UnsuspendableWriteGroup(self)
1501
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1503
"""Fetch the content required to construct revision_id from source.
1505
If revision_id is None and fetch_spec is None, then all content is
1508
fetch() may not be used when the repository is in a write group -
1509
either finish the current write group before using fetch, or use
1510
fetch before starting the write group.
1512
:param find_ghosts: Find and copy revisions in the source that are
1513
ghosts in the target (and not reachable directly by walking out to
1514
the first-present revision in target from revision_id).
1515
:param revision_id: If specified, all the content needed for this
1516
revision ID will be copied to the target. Fetch will determine for
1517
itself which content needs to be copied.
1518
:param fetch_spec: If specified, a SearchResult or
1519
PendingAncestryResult that describes which revisions to copy. This
1520
allows copying multiple heads at once. Mutually exclusive with
1523
if fetch_spec is not None and revision_id is not None:
1524
raise AssertionError(
1525
"fetch_spec and revision_id are mutually exclusive.")
1526
if self.is_in_write_group():
1527
raise errors.InternalBzrError(
1528
"May not fetch while in a write group.")
1529
# fast path same-url fetch operations
1530
if self.has_same_location(source) and fetch_spec is None:
1531
# check that last_revision is in 'from' and then return a
1533
if (revision_id is not None and
1534
not _mod_revision.is_null(revision_id)):
1535
self.get_revision(revision_id)
1537
# if there is no specific appropriate InterRepository, this will get
1538
# the InterRepository base class, which raises an
1539
# IncompatibleRepositories when asked to fetch.
1540
inter = InterRepository.get(source, self)
1541
return inter.fetch(revision_id=revision_id, pb=pb,
1542
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1544
def create_bundle(self, target, base, fileobj, format=None):
1545
return serializer.write_bundle(self, target, base, fileobj, format)
1547
def get_commit_builder(self, branch, parents, config, timestamp=None,
1548
timezone=None, committer=None, revprops=None,
1550
"""Obtain a CommitBuilder for this repository.
1552
:param branch: Branch to commit to.
1553
:param parents: Revision ids of the parents of the new revision.
1554
:param config: Configuration to use.
1555
:param timestamp: Optional timestamp recorded for commit.
1556
:param timezone: Optional timezone for timestamp.
1557
:param committer: Optional committer to set for commit.
1558
:param revprops: Optional dictionary of revision properties.
1559
:param revision_id: Optional revision id.
1561
result = self._commit_builder_class(self, parents, config,
1562
timestamp, timezone, committer, revprops, revision_id)
1563
self.start_write_group()
1567
if (self.control_files._lock_count == 1 and
1568
self.control_files._lock_mode == 'w'):
1569
if self._write_group is not None:
1570
self.abort_write_group()
1571
self.control_files.unlock()
1572
raise errors.BzrError(
1573
'Must end write groups before releasing write locks.')
1574
self.control_files.unlock()
1575
if self.control_files._lock_count == 0:
1576
self._inventory_entry_cache.clear()
1577
for repo in self._fallback_repositories:
1581
def clone(self, a_bzrdir, revision_id=None):
1582
"""Clone this repository into a_bzrdir using the current format.
1584
Currently no check is made that the format of this repository and
1585
the bzrdir format are compatible. FIXME RBC 20060201.
1587
:return: The newly created destination repository.
1589
# TODO: deprecate after 0.16; cloning this with all its settings is
1590
# probably not very useful -- mbp 20070423
1591
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1592
self.copy_content_into(dest_repo, revision_id)
1595
def start_write_group(self):
1596
"""Start a write group in the repository.
1598
Write groups are used by repositories which do not have a 1:1 mapping
1599
between file ids and backend store to manage the insertion of data from
1600
both fetch and commit operations.
1602
A write lock is required around the start_write_group/commit_write_group
1603
for the support of lock-requiring repository formats.
1605
One can only insert data into a repository inside a write group.
1609
if not self.is_write_locked():
1610
raise errors.NotWriteLocked(self)
1611
if self._write_group:
1612
raise errors.BzrError('already in a write group')
1613
self._start_write_group()
1614
# so we can detect unlock/relock - the write group is now entered.
1615
self._write_group = self.get_transaction()
1617
def _start_write_group(self):
1618
"""Template method for per-repository write group startup.
1620
This is called before the write group is considered to be
1625
def sprout(self, to_bzrdir, revision_id=None):
1626
"""Create a descendent repository for new development.
1628
Unlike clone, this does not copy the settings of the repository.
1630
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1631
dest_repo.fetch(self, revision_id=revision_id)
1634
def _create_sprouting_repo(self, a_bzrdir, shared):
1635
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1636
# use target default format.
1637
dest_repo = a_bzrdir.create_repository()
1639
# Most control formats need the repository to be specifically
1640
# created, but on some old all-in-one formats it's not needed
1642
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1643
except errors.UninitializableFormat:
1644
dest_repo = a_bzrdir.open_repository()
1647
def _get_sink(self):
1648
"""Return a sink for streaming into this repository."""
1649
return StreamSink(self)
1651
def _get_source(self, to_format):
1652
"""Return a source for streaming from this repository."""
1653
return StreamSource(self, to_format)
1656
def has_revision(self, revision_id):
1657
"""True if this repository has a copy of the revision."""
1658
return revision_id in self.has_revisions((revision_id,))
1661
def has_revisions(self, revision_ids):
1662
"""Probe to find out the presence of multiple revisions.
1664
:param revision_ids: An iterable of revision_ids.
1665
:return: A set of the revision_ids that were present.
1667
parent_map = self.revisions.get_parent_map(
1668
[(rev_id,) for rev_id in revision_ids])
1670
if _mod_revision.NULL_REVISION in revision_ids:
1671
result.add(_mod_revision.NULL_REVISION)
1672
result.update([key[0] for key in parent_map])
1676
def get_revision(self, revision_id):
1677
"""Return the Revision object for a named revision."""
1678
return self.get_revisions([revision_id])[0]
1681
def get_revision_reconcile(self, revision_id):
1682
"""'reconcile' helper routine that allows access to a revision always.
1684
This variant of get_revision does not cross check the weave graph
1685
against the revision one as get_revision does: but it should only
1686
be used by reconcile, or reconcile-alike commands that are correcting
1687
or testing the revision graph.
1689
return self._get_revisions([revision_id])[0]
1692
def get_revisions(self, revision_ids):
1693
"""Get many revisions at once."""
1694
return self._get_revisions(revision_ids)
1697
def _get_revisions(self, revision_ids):
1698
"""Core work logic to get many revisions without sanity checks."""
1699
for rev_id in revision_ids:
1700
if not rev_id or not isinstance(rev_id, basestring):
1701
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1702
keys = [(key,) for key in revision_ids]
1703
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1705
for record in stream:
1706
if record.storage_kind == 'absent':
1707
raise errors.NoSuchRevision(self, record.key[0])
1708
text = record.get_bytes_as('fulltext')
1709
rev = self._serializer.read_revision_from_string(text)
1710
revs[record.key[0]] = rev
1711
return [revs[revid] for revid in revision_ids]
1714
def get_revision_xml(self, revision_id):
1715
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1716
# would have already do it.
1717
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1718
# TODO: this can't just be replaced by:
1719
# return self._serializer.write_revision_to_string(
1720
# self.get_revision(revision_id))
1721
# as cStringIO preservers the encoding unlike write_revision_to_string
1722
# or some other call down the path.
1723
rev = self.get_revision(revision_id)
1724
rev_tmp = cStringIO.StringIO()
1725
# the current serializer..
1726
self._serializer.write_revision(rev, rev_tmp)
1728
return rev_tmp.getvalue()
1730
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1731
"""Produce a generator of revision deltas.
1733
Note that the input is a sequence of REVISIONS, not revision_ids.
1734
Trees will be held in memory until the generator exits.
1735
Each delta is relative to the revision's lefthand predecessor.
1737
:param specific_fileids: if not None, the result is filtered
1738
so that only those file-ids, their parents and their
1739
children are included.
1741
# Get the revision-ids of interest
1742
required_trees = set()
1743
for revision in revisions:
1744
required_trees.add(revision.revision_id)
1745
required_trees.update(revision.parent_ids[:1])
1747
# Get the matching filtered trees. Note that it's more
1748
# efficient to pass filtered trees to changes_from() rather
1749
# than doing the filtering afterwards. changes_from() could
1750
# arguably do the filtering itself but it's path-based, not
1751
# file-id based, so filtering before or afterwards is
1753
if specific_fileids is None:
1754
trees = dict((t.get_revision_id(), t) for
1755
t in self.revision_trees(required_trees))
1757
trees = dict((t.get_revision_id(), t) for
1758
t in self._filtered_revision_trees(required_trees,
1761
# Calculate the deltas
1762
for revision in revisions:
1763
if not revision.parent_ids:
1764
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1766
old_tree = trees[revision.parent_ids[0]]
1767
yield trees[revision.revision_id].changes_from(old_tree)
1770
def get_revision_delta(self, revision_id, specific_fileids=None):
1771
"""Return the delta for one revision.
1773
The delta is relative to the left-hand predecessor of the
1776
:param specific_fileids: if not None, the result is filtered
1777
so that only those file-ids, their parents and their
1778
children are included.
1780
r = self.get_revision(revision_id)
1781
return list(self.get_deltas_for_revisions([r],
1782
specific_fileids=specific_fileids))[0]
1785
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1786
signature = gpg_strategy.sign(plaintext)
1787
self.add_signature_text(revision_id, signature)
1790
def add_signature_text(self, revision_id, signature):
1791
self.signatures.add_lines((revision_id,), (),
1792
osutils.split_lines(signature))
1794
def find_text_key_references(self):
1795
"""Find the text key references within the repository.
1797
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1798
to whether they were referred to by the inventory of the
1799
revision_id that they contain. The inventory texts from all present
1800
revision ids are assessed to generate this report.
1802
revision_keys = self.revisions.keys()
1803
w = self.inventories
1804
pb = ui.ui_factory.nested_progress_bar()
1806
return self._find_text_key_references_from_xml_inventory_lines(
1807
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1811
def _find_text_key_references_from_xml_inventory_lines(self,
1813
"""Core routine for extracting references to texts from inventories.
1815
This performs the translation of xml lines to revision ids.
1817
:param line_iterator: An iterator of lines, origin_version_id
1818
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1819
to whether they were referred to by the inventory of the
1820
revision_id that they contain. Note that if that revision_id was
1821
not part of the line_iterator's output then False will be given -
1822
even though it may actually refer to that key.
1824
if not self._serializer.support_altered_by_hack:
1825
raise AssertionError(
1826
"_find_text_key_references_from_xml_inventory_lines only "
1827
"supported for branches which store inventory as unnested xml"
1828
", not on %r" % self)
1831
# this code needs to read every new line in every inventory for the
1832
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1833
# not present in one of those inventories is unnecessary but not
1834
# harmful because we are filtering by the revision id marker in the
1835
# inventory lines : we only select file ids altered in one of those
1836
# revisions. We don't need to see all lines in the inventory because
1837
# only those added in an inventory in rev X can contain a revision=X
1839
unescape_revid_cache = {}
1840
unescape_fileid_cache = {}
1842
# jam 20061218 In a big fetch, this handles hundreds of thousands
1843
# of lines, so it has had a lot of inlining and optimizing done.
1844
# Sorry that it is a little bit messy.
1845
# Move several functions to be local variables, since this is a long
1847
search = self._file_ids_altered_regex.search
1848
unescape = _unescape_xml
1849
setdefault = result.setdefault
1850
for line, line_key in line_iterator:
1851
match = search(line)
1854
# One call to match.group() returning multiple items is quite a
1855
# bit faster than 2 calls to match.group() each returning 1
1856
file_id, revision_id = match.group('file_id', 'revision_id')
1858
# Inlining the cache lookups helps a lot when you make 170,000
1859
# lines and 350k ids, versus 8.4 unique ids.
1860
# Using a cache helps in 2 ways:
1861
# 1) Avoids unnecessary decoding calls
1862
# 2) Re-uses cached strings, which helps in future set and
1864
# (2) is enough that removing encoding entirely along with
1865
# the cache (so we are using plain strings) results in no
1866
# performance improvement.
1868
revision_id = unescape_revid_cache[revision_id]
1870
unescaped = unescape(revision_id)
1871
unescape_revid_cache[revision_id] = unescaped
1872
revision_id = unescaped
1874
# Note that unconditionally unescaping means that we deserialise
1875
# every fileid, which for general 'pull' is not great, but we don't
1876
# really want to have some many fulltexts that this matters anyway.
1879
file_id = unescape_fileid_cache[file_id]
1881
unescaped = unescape(file_id)
1882
unescape_fileid_cache[file_id] = unescaped
1885
key = (file_id, revision_id)
1886
setdefault(key, False)
1887
if revision_id == line_key[-1]:
1891
def _inventory_xml_lines_for_keys(self, keys):
1892
"""Get a line iterator of the sort needed for findind references.
1894
Not relevant for non-xml inventory repositories.
1896
Ghosts in revision_keys are ignored.
1898
:param revision_keys: The revision keys for the inventories to inspect.
1899
:return: An iterator over (inventory line, revid) for the fulltexts of
1900
all of the xml inventories specified by revision_keys.
1902
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1903
for record in stream:
1904
if record.storage_kind != 'absent':
1905
chunks = record.get_bytes_as('chunked')
1906
revid = record.key[-1]
1907
lines = osutils.chunks_to_lines(chunks)
1911
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1913
"""Helper routine for fileids_altered_by_revision_ids.
1915
This performs the translation of xml lines to revision ids.
1917
:param line_iterator: An iterator of lines, origin_version_id
1918
:param revision_ids: The revision ids to filter for. This should be a
1919
set or other type which supports efficient __contains__ lookups, as
1920
the revision id from each parsed line will be looked up in the
1921
revision_ids filter.
1922
:return: a dictionary mapping altered file-ids to an iterable of
1923
revision_ids. Each altered file-ids has the exact revision_ids that
1924
altered it listed explicitly.
1926
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1927
line_iterator).iterkeys())
1928
# Note that revision_ids are revision keys.
1929
parent_maps = self.revisions.get_parent_map(revision_ids)
1931
map(parents.update, parent_maps.itervalues())
1932
parents.difference_update(revision_ids)
1933
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1934
self._inventory_xml_lines_for_keys(parents)))
1935
new_keys = seen - parent_seen
1937
setdefault = result.setdefault
1938
for key in new_keys:
1939
setdefault(key[0], set()).add(key[-1])
1942
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1943
"""Find the file ids and versions affected by revisions.
1945
:param revisions: an iterable containing revision ids.
1946
:param _inv_weave: The inventory weave from this repository or None.
1947
If None, the inventory weave will be opened automatically.
1948
:return: a dictionary mapping altered file-ids to an iterable of
1949
revision_ids. Each altered file-ids has the exact revision_ids that
1950
altered it listed explicitly.
1952
selected_keys = set((revid,) for revid in revision_ids)
1953
w = _inv_weave or self.inventories
1954
pb = ui.ui_factory.nested_progress_bar()
1956
return self._find_file_ids_from_xml_inventory_lines(
1957
w.iter_lines_added_or_present_in_keys(
1958
selected_keys, pb=pb),
1963
def iter_files_bytes(self, desired_files):
1964
"""Iterate through file versions.
1966
Files will not necessarily be returned in the order they occur in
1967
desired_files. No specific order is guaranteed.
1969
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1970
value supplied by the caller as part of desired_files. It should
1971
uniquely identify the file version in the caller's context. (Examples:
1972
an index number or a TreeTransform trans_id.)
1974
bytes_iterator is an iterable of bytestrings for the file. The
1975
kind of iterable and length of the bytestrings are unspecified, but for
1976
this implementation, it is a list of bytes produced by
1977
VersionedFile.get_record_stream().
1979
:param desired_files: a list of (file_id, revision_id, identifier)
1983
for file_id, revision_id, callable_data in desired_files:
1984
text_keys[(file_id, revision_id)] = callable_data
1985
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1986
if record.storage_kind == 'absent':
1987
raise errors.RevisionNotPresent(record.key, self)
1988
yield text_keys[record.key], record.get_bytes_as('chunked')
1990
def _generate_text_key_index(self, text_key_references=None,
1992
"""Generate a new text key index for the repository.
1994
This is an expensive function that will take considerable time to run.
1996
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1997
list of parents, also text keys. When a given key has no parents,
1998
the parents list will be [NULL_REVISION].
2000
# All revisions, to find inventory parents.
2001
if ancestors is None:
2002
graph = self.get_graph()
2003
ancestors = graph.get_parent_map(self.all_revision_ids())
2004
if text_key_references is None:
2005
text_key_references = self.find_text_key_references()
2006
pb = ui.ui_factory.nested_progress_bar()
2008
return self._do_generate_text_key_index(ancestors,
2009
text_key_references, pb)
2013
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2014
"""Helper for _generate_text_key_index to avoid deep nesting."""
2015
revision_order = tsort.topo_sort(ancestors)
2016
invalid_keys = set()
2018
for revision_id in revision_order:
2019
revision_keys[revision_id] = set()
2020
text_count = len(text_key_references)
2021
# a cache of the text keys to allow reuse; costs a dict of all the
2022
# keys, but saves a 2-tuple for every child of a given key.
2024
for text_key, valid in text_key_references.iteritems():
2026
invalid_keys.add(text_key)
2028
revision_keys[text_key[1]].add(text_key)
2029
text_key_cache[text_key] = text_key
2030
del text_key_references
2032
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2033
NULL_REVISION = _mod_revision.NULL_REVISION
2034
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2035
# too small for large or very branchy trees. However, for 55K path
2036
# trees, it would be easy to use too much memory trivially. Ideally we
2037
# could gauge this by looking at available real memory etc, but this is
2038
# always a tricky proposition.
2039
inventory_cache = lru_cache.LRUCache(10)
2040
batch_size = 10 # should be ~150MB on a 55K path tree
2041
batch_count = len(revision_order) / batch_size + 1
2043
pb.update("Calculating text parents", processed_texts, text_count)
2044
for offset in xrange(batch_count):
2045
to_query = revision_order[offset * batch_size:(offset + 1) *
2049
for rev_tree in self.revision_trees(to_query):
2050
revision_id = rev_tree.get_revision_id()
2051
parent_ids = ancestors[revision_id]
2052
for text_key in revision_keys[revision_id]:
2053
pb.update("Calculating text parents", processed_texts)
2054
processed_texts += 1
2055
candidate_parents = []
2056
for parent_id in parent_ids:
2057
parent_text_key = (text_key[0], parent_id)
2059
check_parent = parent_text_key not in \
2060
revision_keys[parent_id]
2062
# the parent parent_id is a ghost:
2063
check_parent = False
2064
# truncate the derived graph against this ghost.
2065
parent_text_key = None
2067
# look at the parent commit details inventories to
2068
# determine possible candidates in the per file graph.
2071
inv = inventory_cache[parent_id]
2073
inv = self.revision_tree(parent_id).inventory
2074
inventory_cache[parent_id] = inv
2076
parent_entry = inv[text_key[0]]
2077
except (KeyError, errors.NoSuchId):
2079
if parent_entry is not None:
2081
text_key[0], parent_entry.revision)
2083
parent_text_key = None
2084
if parent_text_key is not None:
2085
candidate_parents.append(
2086
text_key_cache[parent_text_key])
2087
parent_heads = text_graph.heads(candidate_parents)
2088
new_parents = list(parent_heads)
2089
new_parents.sort(key=lambda x:candidate_parents.index(x))
2090
if new_parents == []:
2091
new_parents = [NULL_REVISION]
2092
text_index[text_key] = new_parents
2094
for text_key in invalid_keys:
2095
text_index[text_key] = [NULL_REVISION]
2098
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2099
"""Get an iterable listing the keys of all the data introduced by a set
2102
The keys will be ordered so that the corresponding items can be safely
2103
fetched and inserted in that order.
2105
:returns: An iterable producing tuples of (knit-kind, file-id,
2106
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2107
'revisions'. file-id is None unless knit-kind is 'file'.
2109
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2112
for result in self._find_non_file_keys_to_fetch(revision_ids):
2115
def _find_file_keys_to_fetch(self, revision_ids, pb):
2116
# XXX: it's a bit weird to control the inventory weave caching in this
2117
# generator. Ideally the caching would be done in fetch.py I think. Or
2118
# maybe this generator should explicitly have the contract that it
2119
# should not be iterated until the previously yielded item has been
2121
inv_w = self.inventories
2123
# file ids that changed
2124
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2126
num_file_ids = len(file_ids)
2127
for file_id, altered_versions in file_ids.iteritems():
2129
pb.update("fetch texts", count, num_file_ids)
2131
yield ("file", file_id, altered_versions)
2133
def _find_non_file_keys_to_fetch(self, revision_ids):
2135
yield ("inventory", None, revision_ids)
2138
# XXX: Note ATM no callers actually pay attention to this return
2139
# instead they just use the list of revision ids and ignore
2140
# missing sigs. Consider removing this work entirely
2141
revisions_with_signatures = set(self.signatures.get_parent_map(
2142
[(r,) for r in revision_ids]))
2143
revisions_with_signatures = set(
2144
[r for (r,) in revisions_with_signatures])
2145
revisions_with_signatures.intersection_update(revision_ids)
2146
yield ("signatures", None, revisions_with_signatures)
2149
yield ("revisions", None, revision_ids)
2152
def get_inventory(self, revision_id):
2153
"""Get Inventory object by revision id."""
2154
return self.iter_inventories([revision_id]).next()
2156
def iter_inventories(self, revision_ids):
2157
"""Get many inventories by revision_ids.
2159
This will buffer some or all of the texts used in constructing the
2160
inventories in memory, but will only parse a single inventory at a
2163
:param revision_ids: The expected revision ids of the inventories.
2164
:return: An iterator of inventories.
2166
if ((None in revision_ids)
2167
or (_mod_revision.NULL_REVISION in revision_ids)):
2168
raise ValueError('cannot get null revision inventory')
2169
return self._iter_inventories(revision_ids)
2171
def _iter_inventories(self, revision_ids):
2172
"""single-document based inventory iteration."""
2173
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2174
yield self.deserialise_inventory(revision_id, text)
2176
def _iter_inventory_xmls(self, revision_ids):
2177
keys = [(revision_id,) for revision_id in revision_ids]
2178
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2180
for record in stream:
2181
if record.storage_kind != 'absent':
2182
text_chunks[record.key] = record.get_bytes_as('chunked')
2184
raise errors.NoSuchRevision(self, record.key)
2186
chunks = text_chunks.pop(key)
2187
yield ''.join(chunks), key[-1]
2189
def deserialise_inventory(self, revision_id, xml):
2190
"""Transform the xml into an inventory object.
2192
:param revision_id: The expected revision id of the inventory.
2193
:param xml: A serialised inventory.
2195
result = self._serializer.read_inventory_from_string(xml, revision_id,
2196
entry_cache=self._inventory_entry_cache)
2197
if result.revision_id != revision_id:
2198
raise AssertionError('revision id mismatch %s != %s' % (
2199
result.revision_id, revision_id))
2202
def serialise_inventory(self, inv):
2203
return self._serializer.write_inventory_to_string(inv)
2205
def _serialise_inventory_to_lines(self, inv):
2206
return self._serializer.write_inventory_to_lines(inv)
2208
def get_serializer_format(self):
2209
return self._serializer.format_num
2212
def get_inventory_xml(self, revision_id):
2213
"""Get inventory XML as a file object."""
2214
texts = self._iter_inventory_xmls([revision_id])
2216
text, revision_id = texts.next()
2217
except StopIteration:
2218
raise errors.HistoryMissing(self, 'inventory', revision_id)
2222
def get_inventory_sha1(self, revision_id):
2223
"""Return the sha1 hash of the inventory entry
2225
return self.get_revision(revision_id).inventory_sha1
2227
def iter_reverse_revision_history(self, revision_id):
2228
"""Iterate backwards through revision ids in the lefthand history
2230
:param revision_id: The revision id to start with. All its lefthand
2231
ancestors will be traversed.
2233
graph = self.get_graph()
2234
next_id = revision_id
2236
if next_id in (None, _mod_revision.NULL_REVISION):
2239
# Note: The following line may raise KeyError in the event of
2240
# truncated history. We decided not to have a try:except:raise
2241
# RevisionNotPresent here until we see a use for it, because of the
2242
# cost in an inner loop that is by its very nature O(history).
2243
# Robert Collins 20080326
2244
parents = graph.get_parent_map([next_id])[next_id]
2245
if len(parents) == 0:
2248
next_id = parents[0]
2251
def get_revision_inventory(self, revision_id):
2252
"""Return inventory of a past revision."""
2253
# TODO: Unify this with get_inventory()
2254
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2255
# must be the same as its revision, so this is trivial.
2256
if revision_id is None:
2257
# This does not make sense: if there is no revision,
2258
# then it is the current tree inventory surely ?!
2259
# and thus get_root_id() is something that looks at the last
2260
# commit on the branch, and the get_root_id is an inventory check.
2261
raise NotImplementedError
2262
# return Inventory(self.get_root_id())
2264
return self.get_inventory(revision_id)
2266
def is_shared(self):
2267
"""Return True if this repository is flagged as a shared repository."""
2268
raise NotImplementedError(self.is_shared)
2271
def reconcile(self, other=None, thorough=False):
2272
"""Reconcile this repository."""
2273
from bzrlib.reconcile import RepoReconciler
2274
reconciler = RepoReconciler(self, thorough=thorough)
2275
reconciler.reconcile()
2278
def _refresh_data(self):
2279
"""Helper called from lock_* to ensure coherency with disk.
2281
The default implementation does nothing; it is however possible
2282
for repositories to maintain loaded indices across multiple locks
2283
by checking inside their implementation of this method to see
2284
whether their indices are still valid. This depends of course on
2285
the disk format being validatable in this manner. This method is
2286
also called by the refresh_data() public interface to cause a refresh
2287
to occur while in a write lock so that data inserted by a smart server
2288
push operation is visible on the client's instance of the physical
2293
def revision_tree(self, revision_id):
2294
"""Return Tree for a revision on this branch.
2296
`revision_id` may be NULL_REVISION for the empty tree revision.
2298
revision_id = _mod_revision.ensure_null(revision_id)
2299
# TODO: refactor this to use an existing revision object
2300
# so we don't need to read it in twice.
2301
if revision_id == _mod_revision.NULL_REVISION:
2302
return RevisionTree(self, Inventory(root_id=None),
2303
_mod_revision.NULL_REVISION)
2305
inv = self.get_revision_inventory(revision_id)
2306
return RevisionTree(self, inv, revision_id)
2308
def revision_trees(self, revision_ids):
2309
"""Return Trees for revisions in this repository.
2311
:param revision_ids: a sequence of revision-ids;
2312
a revision-id may not be None or 'null:'
2314
inventories = self.iter_inventories(revision_ids)
2315
for inv in inventories:
2316
yield RevisionTree(self, inv, inv.revision_id)
2318
def _filtered_revision_trees(self, revision_ids, file_ids):
2319
"""Return Tree for a revision on this branch with only some files.
2321
:param revision_ids: a sequence of revision-ids;
2322
a revision-id may not be None or 'null:'
2323
:param file_ids: if not None, the result is filtered
2324
so that only those file-ids, their parents and their
2325
children are included.
2327
inventories = self.iter_inventories(revision_ids)
2328
for inv in inventories:
2329
# Should we introduce a FilteredRevisionTree class rather
2330
# than pre-filter the inventory here?
2331
filtered_inv = inv.filter(file_ids)
2332
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2335
def get_ancestry(self, revision_id, topo_sorted=True):
2336
"""Return a list of revision-ids integrated by a revision.
2338
The first element of the list is always None, indicating the origin
2339
revision. This might change when we have history horizons, or
2340
perhaps we should have a new API.
2342
This is topologically sorted.
2344
if _mod_revision.is_null(revision_id):
2346
if not self.has_revision(revision_id):
2347
raise errors.NoSuchRevision(self, revision_id)
2348
graph = self.get_graph()
2350
search = graph._make_breadth_first_searcher([revision_id])
2353
found, ghosts = search.next_with_ghosts()
2354
except StopIteration:
2357
if _mod_revision.NULL_REVISION in keys:
2358
keys.remove(_mod_revision.NULL_REVISION)
2360
parent_map = graph.get_parent_map(keys)
2361
keys = tsort.topo_sort(parent_map)
2362
return [None] + list(keys)
2365
"""Compress the data within the repository.
2367
This operation only makes sense for some repository types. For other
2368
types it should be a no-op that just returns.
2370
This stub method does not require a lock, but subclasses should use
2371
@needs_write_lock as this is a long running call its reasonable to
2372
implicitly lock for the user.
2375
def get_transaction(self):
2376
return self.control_files.get_transaction()
2378
def get_parent_map(self, revision_ids):
2379
"""See graph._StackedParentsProvider.get_parent_map"""
2380
# revisions index works in keys; this just works in revisions
2381
# therefore wrap and unwrap
2384
for revision_id in revision_ids:
2385
if revision_id == _mod_revision.NULL_REVISION:
2386
result[revision_id] = ()
2387
elif revision_id is None:
2388
raise ValueError('get_parent_map(None) is not valid')
2390
query_keys.append((revision_id ,))
2391
for ((revision_id,), parent_keys) in \
2392
self.revisions.get_parent_map(query_keys).iteritems():
2394
result[revision_id] = tuple(parent_revid
2395
for (parent_revid,) in parent_keys)
2397
result[revision_id] = (_mod_revision.NULL_REVISION,)
2400
def _make_parents_provider(self):
2403
def get_graph(self, other_repository=None):
2404
"""Return the graph walker for this repository format"""
2405
parents_provider = self._make_parents_provider()
2406
if (other_repository is not None and
2407
not self.has_same_location(other_repository)):
2408
parents_provider = graph._StackedParentsProvider(
2409
[parents_provider, other_repository._make_parents_provider()])
2410
return graph.Graph(parents_provider)
2412
def _get_versioned_file_checker(self, text_key_references=None):
2413
"""Return an object suitable for checking versioned files.
2415
:param text_key_references: if non-None, an already built
2416
dictionary mapping text keys ((fileid, revision_id) tuples)
2417
to whether they were referred to by the inventory of the
2418
revision_id that they contain. If None, this will be
2421
return _VersionedFileChecker(self,
2422
text_key_references=text_key_references)
2424
def revision_ids_to_search_result(self, result_set):
2425
"""Convert a set of revision ids to a graph SearchResult."""
2426
result_parents = set()
2427
for parents in self.get_graph().get_parent_map(
2428
result_set).itervalues():
2429
result_parents.update(parents)
2430
included_keys = result_set.intersection(result_parents)
2431
start_keys = result_set.difference(included_keys)
2432
exclude_keys = result_parents.difference(result_set)
2433
result = graph.SearchResult(start_keys, exclude_keys,
2434
len(result_set), result_set)
2438
def set_make_working_trees(self, new_value):
2439
"""Set the policy flag for making working trees when creating branches.
2441
This only applies to branches that use this repository.
2443
The default is 'True'.
2444
:param new_value: True to restore the default, False to disable making
2447
raise NotImplementedError(self.set_make_working_trees)
2449
def make_working_trees(self):
2450
"""Returns the policy for making working trees on new branches."""
2451
raise NotImplementedError(self.make_working_trees)
2454
def sign_revision(self, revision_id, gpg_strategy):
2455
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2456
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2459
def has_signature_for_revision_id(self, revision_id):
2460
"""Query for a revision signature for revision_id in the repository."""
2461
if not self.has_revision(revision_id):
2462
raise errors.NoSuchRevision(self, revision_id)
2463
sig_present = (1 == len(
2464
self.signatures.get_parent_map([(revision_id,)])))
2468
def get_signature_text(self, revision_id):
2469
"""Return the text for a signature."""
2470
stream = self.signatures.get_record_stream([(revision_id,)],
2472
record = stream.next()
2473
if record.storage_kind == 'absent':
2474
raise errors.NoSuchRevision(self, revision_id)
2475
return record.get_bytes_as('fulltext')
2478
def check(self, revision_ids=None):
2479
"""Check consistency of all history of given revision_ids.
2481
Different repository implementations should override _check().
2483
:param revision_ids: A non-empty list of revision_ids whose ancestry
2484
will be checked. Typically the last revision_id of a branch.
2486
return self._check(revision_ids)
2488
def _check(self, revision_ids):
2489
result = check.Check(self)
2493
def _warn_if_deprecated(self):
2494
global _deprecation_warning_done
2495
if _deprecation_warning_done:
2497
_deprecation_warning_done = True
2498
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2499
% (self._format, self.bzrdir.transport.base))
2501
def supports_rich_root(self):
2502
return self._format.rich_root_data
2504
def _check_ascii_revisionid(self, revision_id, method):
2505
"""Private helper for ascii-only repositories."""
2506
# weave repositories refuse to store revisionids that are non-ascii.
2507
if revision_id is not None:
2508
# weaves require ascii revision ids.
2509
if isinstance(revision_id, unicode):
2511
revision_id.encode('ascii')
2512
except UnicodeEncodeError:
2513
raise errors.NonAsciiRevisionId(method, self)
2516
revision_id.decode('ascii')
2517
except UnicodeDecodeError:
2518
raise errors.NonAsciiRevisionId(method, self)
2520
def revision_graph_can_have_wrong_parents(self):
2521
"""Is it possible for this repository to have a revision graph with
2524
If True, then this repository must also implement
2525
_find_inconsistent_revision_parents so that check and reconcile can
2526
check for inconsistencies before proceeding with other checks that may
2527
depend on the revision index being consistent.
2529
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2532
# remove these delegates a while after bzr 0.15
2533
def __make_delegated(name, from_module):
2534
def _deprecated_repository_forwarder():
2535
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2536
% (name, from_module),
2539
m = __import__(from_module, globals(), locals(), [name])
2541
return getattr(m, name)
2542
except AttributeError:
2543
raise AttributeError('module %s has no name %s'
2545
globals()[name] = _deprecated_repository_forwarder
2548
'AllInOneRepository',
2549
'WeaveMetaDirRepository',
2550
'PreSplitOutRepositoryFormat',
2551
'RepositoryFormat4',
2552
'RepositoryFormat5',
2553
'RepositoryFormat6',
2554
'RepositoryFormat7',
2556
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2560
'RepositoryFormatKnit',
2561
'RepositoryFormatKnit1',
2563
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2566
def install_revision(repository, rev, revision_tree):
2567
"""Install all revision data into a repository."""
2568
install_revisions(repository, [(rev, revision_tree, None)])
2571
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2572
"""Install all revision data into a repository.
2574
Accepts an iterable of revision, tree, signature tuples. The signature
2577
repository.start_write_group()
2579
inventory_cache = lru_cache.LRUCache(10)
2580
for n, (revision, revision_tree, signature) in enumerate(iterable):
2581
_install_revision(repository, revision, revision_tree, signature,
2584
pb.update('Transferring revisions', n + 1, num_revisions)
2586
repository.abort_write_group()
2589
repository.commit_write_group()
2592
def _install_revision(repository, rev, revision_tree, signature,
2594
"""Install all revision data into a repository."""
2595
present_parents = []
2597
for p_id in rev.parent_ids:
2598
if repository.has_revision(p_id):
2599
present_parents.append(p_id)
2600
parent_trees[p_id] = repository.revision_tree(p_id)
2602
parent_trees[p_id] = repository.revision_tree(
2603
_mod_revision.NULL_REVISION)
2605
inv = revision_tree.inventory
2606
entries = inv.iter_entries()
2607
# backwards compatibility hack: skip the root id.
2608
if not repository.supports_rich_root():
2609
path, root = entries.next()
2610
if root.revision != rev.revision_id:
2611
raise errors.IncompatibleRevision(repr(repository))
2613
for path, ie in entries:
2614
text_keys[(ie.file_id, ie.revision)] = ie
2615
text_parent_map = repository.texts.get_parent_map(text_keys)
2616
missing_texts = set(text_keys) - set(text_parent_map)
2617
# Add the texts that are not already present
2618
for text_key in missing_texts:
2619
ie = text_keys[text_key]
2621
# FIXME: TODO: The following loop overlaps/duplicates that done by
2622
# commit to determine parents. There is a latent/real bug here where
2623
# the parents inserted are not those commit would do - in particular
2624
# they are not filtered by heads(). RBC, AB
2625
for revision, tree in parent_trees.iteritems():
2626
if ie.file_id not in tree:
2628
parent_id = tree.inventory[ie.file_id].revision
2629
if parent_id in text_parents:
2631
text_parents.append((ie.file_id, parent_id))
2632
lines = revision_tree.get_file(ie.file_id).readlines()
2633
repository.texts.add_lines(text_key, text_parents, lines)
2635
# install the inventory
2636
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2637
# Cache this inventory
2638
inventory_cache[rev.revision_id] = inv
2640
basis_inv = inventory_cache[rev.parent_ids[0]]
2642
repository.add_inventory(rev.revision_id, inv, present_parents)
2644
delta = inv._make_delta(basis_inv)
2645
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2646
rev.revision_id, present_parents)
2648
repository.add_inventory(rev.revision_id, inv, present_parents)
2649
except errors.RevisionAlreadyPresent:
2651
if signature is not None:
2652
repository.add_signature_text(rev.revision_id, signature)
2653
repository.add_revision(rev.revision_id, rev, inv)
2656
class MetaDirRepository(Repository):
2657
"""Repositories in the new meta-dir layout.
2659
:ivar _transport: Transport for access to repository control files,
2660
typically pointing to .bzr/repository.
2663
def __init__(self, _format, a_bzrdir, control_files):
2664
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2665
self._transport = control_files._transport
2667
def is_shared(self):
2668
"""Return True if this repository is flagged as a shared repository."""
2669
return self._transport.has('shared-storage')
2672
def set_make_working_trees(self, new_value):
2673
"""Set the policy flag for making working trees when creating branches.
2675
This only applies to branches that use this repository.
2677
The default is 'True'.
2678
:param new_value: True to restore the default, False to disable making
2683
self._transport.delete('no-working-trees')
2684
except errors.NoSuchFile:
2687
self._transport.put_bytes('no-working-trees', '',
2688
mode=self.bzrdir._get_file_mode())
2690
def make_working_trees(self):
2691
"""Returns the policy for making working trees on new branches."""
2692
return not self._transport.has('no-working-trees')
2695
class MetaDirVersionedFileRepository(MetaDirRepository):
2696
"""Repositories in a meta-dir, that work via versioned file objects."""
2698
def __init__(self, _format, a_bzrdir, control_files):
2699
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2703
network_format_registry = registry.FormatRegistry()
2704
"""Registry of formats indexed by their network name.
2706
The network name for a repository format is an identifier that can be used when
2707
referring to formats with smart server operations. See
2708
RepositoryFormat.network_name() for more detail.
2712
format_registry = registry.FormatRegistry(network_format_registry)
2713
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2715
This can contain either format instances themselves, or classes/factories that
2716
can be called to obtain one.
2720
#####################################################################
2721
# Repository Formats
2723
class RepositoryFormat(object):
2724
"""A repository format.
2726
Formats provide four things:
2727
* An initialization routine to construct repository data on disk.
2728
* a optional format string which is used when the BzrDir supports
2730
* an open routine which returns a Repository instance.
2731
* A network name for referring to the format in smart server RPC
2734
There is one and only one Format subclass for each on-disk format. But
2735
there can be one Repository subclass that is used for several different
2736
formats. The _format attribute on a Repository instance can be used to
2737
determine the disk format.
2739
Formats are placed in a registry by their format string for reference
2740
during opening. These should be subclasses of RepositoryFormat for
2743
Once a format is deprecated, just deprecate the initialize and open
2744
methods on the format class. Do not deprecate the object, as the
2745
object may be created even when a repository instance hasn't been
2748
Common instance attributes:
2749
_matchingbzrdir - the bzrdir format that the repository format was
2750
originally written to work with. This can be used if manually
2751
constructing a bzrdir and repository, or more commonly for test suite
2755
# Set to True or False in derived classes. True indicates that the format
2756
# supports ghosts gracefully.
2757
supports_ghosts = None
2758
# Can this repository be given external locations to lookup additional
2759
# data. Set to True or False in derived classes.
2760
supports_external_lookups = None
2761
# Does this format support CHK bytestring lookups. Set to True or False in
2763
supports_chks = None
2764
# Should commit add an inventory, or an inventory delta to the repository.
2765
_commit_inv_deltas = True
2766
# What order should fetch operations request streams in?
2767
# The default is unordered as that is the cheapest for an origin to
2769
_fetch_order = 'unordered'
2770
# Does this repository format use deltas that can be fetched as-deltas ?
2771
# (E.g. knits, where the knit deltas can be transplanted intact.
2772
# We default to False, which will ensure that enough data to get
2773
# a full text out of any fetch stream will be grabbed.
2774
_fetch_uses_deltas = False
2775
# Should fetch trigger a reconcile after the fetch? Only needed for
2776
# some repository formats that can suffer internal inconsistencies.
2777
_fetch_reconcile = False
2778
# Does this format have < O(tree_size) delta generation. Used to hint what
2779
# code path for commit, amongst other things.
2783
return "<%s>" % self.__class__.__name__
2785
def __eq__(self, other):
2786
# format objects are generally stateless
2787
return isinstance(other, self.__class__)
2789
def __ne__(self, other):
2790
return not self == other
2793
def find_format(klass, a_bzrdir):
2794
"""Return the format for the repository object in a_bzrdir.
2796
This is used by bzr native formats that have a "format" file in
2797
the repository. Other methods may be used by different types of
2801
transport = a_bzrdir.get_repository_transport(None)
2802
format_string = transport.get("format").read()
2803
return format_registry.get(format_string)
2804
except errors.NoSuchFile:
2805
raise errors.NoRepositoryPresent(a_bzrdir)
2807
raise errors.UnknownFormatError(format=format_string,
2811
def register_format(klass, format):
2812
format_registry.register(format.get_format_string(), format)
2815
def unregister_format(klass, format):
2816
format_registry.remove(format.get_format_string())
2819
def get_default_format(klass):
2820
"""Return the current default format."""
2821
from bzrlib import bzrdir
2822
return bzrdir.format_registry.make_bzrdir('default').repository_format
2824
def get_format_string(self):
2825
"""Return the ASCII format string that identifies this format.
2827
Note that in pre format ?? repositories the format string is
2828
not permitted nor written to disk.
2830
raise NotImplementedError(self.get_format_string)
2832
def get_format_description(self):
2833
"""Return the short description for this format."""
2834
raise NotImplementedError(self.get_format_description)
2836
# TODO: this shouldn't be in the base class, it's specific to things that
2837
# use weaves or knits -- mbp 20070207
2838
def _get_versioned_file_store(self,
2843
versionedfile_class=None,
2844
versionedfile_kwargs={},
2846
if versionedfile_class is None:
2847
versionedfile_class = self._versionedfile_class
2848
weave_transport = control_files._transport.clone(name)
2849
dir_mode = control_files._dir_mode
2850
file_mode = control_files._file_mode
2851
return VersionedFileStore(weave_transport, prefixed=prefixed,
2853
file_mode=file_mode,
2854
versionedfile_class=versionedfile_class,
2855
versionedfile_kwargs=versionedfile_kwargs,
2858
def initialize(self, a_bzrdir, shared=False):
2859
"""Initialize a repository of this format in a_bzrdir.
2861
:param a_bzrdir: The bzrdir to put the new repository in it.
2862
:param shared: The repository should be initialized as a sharable one.
2863
:returns: The new repository object.
2865
This may raise UninitializableFormat if shared repository are not
2866
compatible the a_bzrdir.
2868
raise NotImplementedError(self.initialize)
2870
def is_supported(self):
2871
"""Is this format supported?
2873
Supported formats must be initializable and openable.
2874
Unsupported formats may not support initialization or committing or
2875
some other features depending on the reason for not being supported.
2879
def network_name(self):
2880
"""A simple byte string uniquely identifying this format for RPC calls.
2882
MetaDir repository formats use their disk format string to identify the
2883
repository over the wire. All in one formats such as bzr < 0.8, and
2884
foreign formats like svn/git and hg should use some marker which is
2885
unique and immutable.
2887
raise NotImplementedError(self.network_name)
2889
def check_conversion_target(self, target_format):
2890
raise NotImplementedError(self.check_conversion_target)
2892
def open(self, a_bzrdir, _found=False):
2893
"""Return an instance of this format for the bzrdir a_bzrdir.
2895
_found is a private parameter, do not use it.
2897
raise NotImplementedError(self.open)
2900
class MetaDirRepositoryFormat(RepositoryFormat):
2901
"""Common base class for the new repositories using the metadir layout."""
2903
rich_root_data = False
2904
supports_tree_reference = False
2905
supports_external_lookups = False
2908
def _matchingbzrdir(self):
2909
matching = bzrdir.BzrDirMetaFormat1()
2910
matching.repository_format = self
2914
super(MetaDirRepositoryFormat, self).__init__()
2916
def _create_control_files(self, a_bzrdir):
2917
"""Create the required files and the initial control_files object."""
2918
# FIXME: RBC 20060125 don't peek under the covers
2919
# NB: no need to escape relative paths that are url safe.
2920
repository_transport = a_bzrdir.get_repository_transport(self)
2921
control_files = lockable_files.LockableFiles(repository_transport,
2922
'lock', lockdir.LockDir)
2923
control_files.create_lock()
2924
return control_files
2926
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2927
"""Upload the initial blank content."""
2928
control_files = self._create_control_files(a_bzrdir)
2929
control_files.lock_write()
2930
transport = control_files._transport
2932
utf8_files += [('shared-storage', '')]
2934
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
2935
for (filename, content_stream) in files:
2936
transport.put_file(filename, content_stream,
2937
mode=a_bzrdir._get_file_mode())
2938
for (filename, content_bytes) in utf8_files:
2939
transport.put_bytes_non_atomic(filename, content_bytes,
2940
mode=a_bzrdir._get_file_mode())
2942
control_files.unlock()
2944
def network_name(self):
2945
"""Metadir formats have matching disk and network format strings."""
2946
return self.get_format_string()
2949
# Pre-0.8 formats that don't have a disk format string (because they are
2950
# versioned by the matching control directory). We use the control directories
2951
# disk format string as a key for the network_name because they meet the
2952
# constraints (simple string, unique, immutable).
2953
network_format_registry.register_lazy(
2954
"Bazaar-NG branch, format 5\n",
2955
'bzrlib.repofmt.weaverepo',
2956
'RepositoryFormat5',
2958
network_format_registry.register_lazy(
2959
"Bazaar-NG branch, format 6\n",
2960
'bzrlib.repofmt.weaverepo',
2961
'RepositoryFormat6',
2964
# formats which have no format string are not discoverable or independently
2965
# creatable on disk, so are not registered in format_registry. They're
2966
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2967
# needed, it's constructed directly by the BzrDir. Non-native formats where
2968
# the repository is not separately opened are similar.
2970
format_registry.register_lazy(
2971
'Bazaar-NG Repository format 7',
2972
'bzrlib.repofmt.weaverepo',
2976
format_registry.register_lazy(
2977
'Bazaar-NG Knit Repository Format 1',
2978
'bzrlib.repofmt.knitrepo',
2979
'RepositoryFormatKnit1',
2982
format_registry.register_lazy(
2983
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2984
'bzrlib.repofmt.knitrepo',
2985
'RepositoryFormatKnit3',
2988
format_registry.register_lazy(
2989
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2990
'bzrlib.repofmt.knitrepo',
2991
'RepositoryFormatKnit4',
2994
# Pack-based formats. There is one format for pre-subtrees, and one for
2995
# post-subtrees to allow ease of testing.
2996
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
2997
format_registry.register_lazy(
2998
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2999
'bzrlib.repofmt.pack_repo',
3000
'RepositoryFormatKnitPack1',
3002
format_registry.register_lazy(
3003
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3004
'bzrlib.repofmt.pack_repo',
3005
'RepositoryFormatKnitPack3',
3007
format_registry.register_lazy(
3008
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3009
'bzrlib.repofmt.pack_repo',
3010
'RepositoryFormatKnitPack4',
3012
format_registry.register_lazy(
3013
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3014
'bzrlib.repofmt.pack_repo',
3015
'RepositoryFormatKnitPack5',
3017
format_registry.register_lazy(
3018
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3019
'bzrlib.repofmt.pack_repo',
3020
'RepositoryFormatKnitPack5RichRoot',
3022
format_registry.register_lazy(
3023
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3024
'bzrlib.repofmt.pack_repo',
3025
'RepositoryFormatKnitPack5RichRootBroken',
3027
format_registry.register_lazy(
3028
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3029
'bzrlib.repofmt.pack_repo',
3030
'RepositoryFormatKnitPack6',
3032
format_registry.register_lazy(
3033
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3034
'bzrlib.repofmt.pack_repo',
3035
'RepositoryFormatKnitPack6RichRoot',
3038
# Development formats.
3039
# Obsolete but kept pending a CHK based subtree format.
3040
format_registry.register_lazy(
3041
("Bazaar development format 2 with subtree support "
3042
"(needs bzr.dev from before 1.8)\n"),
3043
'bzrlib.repofmt.pack_repo',
3044
'RepositoryFormatPackDevelopment2Subtree',
3047
# 1.14->1.16 go below here
3048
format_registry.register_lazy(
3049
'Bazaar development format - group compression and chk inventory'
3050
' (needs bzr.dev from 1.14)\n',
3051
'bzrlib.repofmt.groupcompress_repo',
3052
'RepositoryFormatCHK1',
3056
class InterRepository(InterObject):
3057
"""This class represents operations taking place between two repositories.
3059
Its instances have methods like copy_content and fetch, and contain
3060
references to the source and target repositories these operations can be
3063
Often we will provide convenience methods on 'repository' which carry out
3064
operations with another repository - they will always forward to
3065
InterRepository.get(other).method_name(parameters).
3068
_walk_to_common_revisions_batch_size = 50
3070
"""The available optimised InterRepository types."""
3073
def copy_content(self, revision_id=None):
3074
"""Make a complete copy of the content in self into destination.
3076
This is a destructive operation! Do not use it on existing
3079
:param revision_id: Only copy the content needed to construct
3080
revision_id and its parents.
3083
self.target.set_make_working_trees(self.source.make_working_trees())
3084
except NotImplementedError:
3086
self.target.fetch(self.source, revision_id=revision_id)
3089
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3091
"""Fetch the content required to construct revision_id.
3093
The content is copied from self.source to self.target.
3095
:param revision_id: if None all content is copied, if NULL_REVISION no
3097
:param pb: optional progress bar to use for progress reports. If not
3098
provided a default one will be created.
3101
from bzrlib.fetch import RepoFetcher
3102
f = RepoFetcher(to_repository=self.target,
3103
from_repository=self.source,
3104
last_revision=revision_id,
3105
fetch_spec=fetch_spec,
3106
pb=pb, find_ghosts=find_ghosts)
3108
def _walk_to_common_revisions(self, revision_ids):
3109
"""Walk out from revision_ids in source to revisions target has.
3111
:param revision_ids: The start point for the search.
3112
:return: A set of revision ids.
3114
target_graph = self.target.get_graph()
3115
revision_ids = frozenset(revision_ids)
3116
# Fast path for the case where all the revisions are already in the
3118
# (Although this does incur an extra round trip for the
3119
# fairly common case where the target doesn't already have the revision
3121
if set(target_graph.get_parent_map(revision_ids)) == revision_ids:
3122
return graph.SearchResult(revision_ids, set(), 0, set())
3123
missing_revs = set()
3124
source_graph = self.source.get_graph()
3125
# ensure we don't pay silly lookup costs.
3126
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3127
null_set = frozenset([_mod_revision.NULL_REVISION])
3128
searcher_exhausted = False
3132
# Iterate the searcher until we have enough next_revs
3133
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3135
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3136
next_revs.update(next_revs_part)
3137
ghosts.update(ghosts_part)
3138
except StopIteration:
3139
searcher_exhausted = True
3141
# If there are ghosts in the source graph, and the caller asked for
3142
# them, make sure that they are present in the target.
3143
# We don't care about other ghosts as we can't fetch them and
3144
# haven't been asked to.
3145
ghosts_to_check = set(revision_ids.intersection(ghosts))
3146
revs_to_get = set(next_revs).union(ghosts_to_check)
3148
have_revs = set(target_graph.get_parent_map(revs_to_get))
3149
# we always have NULL_REVISION present.
3150
have_revs = have_revs.union(null_set)
3151
# Check if the target is missing any ghosts we need.
3152
ghosts_to_check.difference_update(have_revs)
3154
# One of the caller's revision_ids is a ghost in both the
3155
# source and the target.
3156
raise errors.NoSuchRevision(
3157
self.source, ghosts_to_check.pop())
3158
missing_revs.update(next_revs - have_revs)
3159
# Because we may have walked past the original stop point, make
3160
# sure everything is stopped
3161
stop_revs = searcher.find_seen_ancestors(have_revs)
3162
searcher.stop_searching_any(stop_revs)
3163
if searcher_exhausted:
3165
return searcher.get_result()
3168
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3169
"""Return the revision ids that source has that target does not.
3171
:param revision_id: only return revision ids included by this
3173
:param find_ghosts: If True find missing revisions in deep history
3174
rather than just finding the surface difference.
3175
:return: A bzrlib.graph.SearchResult.
3177
# stop searching at found target revisions.
3178
if not find_ghosts and revision_id is not None:
3179
return self._walk_to_common_revisions([revision_id])
3180
# generic, possibly worst case, slow code path.
3181
target_ids = set(self.target.all_revision_ids())
3182
if revision_id is not None:
3183
source_ids = self.source.get_ancestry(revision_id)
3184
if source_ids[0] is not None:
3185
raise AssertionError()
3188
source_ids = self.source.all_revision_ids()
3189
result_set = set(source_ids).difference(target_ids)
3190
return self.source.revision_ids_to_search_result(result_set)
3193
def _same_model(source, target):
3194
"""True if source and target have the same data representation.
3196
Note: this is always called on the base class; overriding it in a
3197
subclass will have no effect.
3200
InterRepository._assert_same_model(source, target)
3202
except errors.IncompatibleRepositories, e:
3206
def _assert_same_model(source, target):
3207
"""Raise an exception if two repositories do not use the same model.
3209
if source.supports_rich_root() != target.supports_rich_root():
3210
raise errors.IncompatibleRepositories(source, target,
3211
"different rich-root support")
3212
if source._serializer != target._serializer:
3213
raise errors.IncompatibleRepositories(source, target,
3214
"different serializers")
3217
class InterSameDataRepository(InterRepository):
3218
"""Code for converting between repositories that represent the same data.
3220
Data format and model must match for this to work.
3224
def _get_repo_format_to_test(self):
3225
"""Repository format for testing with.
3227
InterSameData can pull from subtree to subtree and from non-subtree to
3228
non-subtree, so we test this with the richest repository format.
3230
from bzrlib.repofmt import knitrepo
3231
return knitrepo.RepositoryFormatKnit3()
3234
def is_compatible(source, target):
3235
return InterRepository._same_model(source, target)
3238
class InterWeaveRepo(InterSameDataRepository):
3239
"""Optimised code paths between Weave based repositories.
3241
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3242
implemented lazy inter-object optimisation.
3246
def _get_repo_format_to_test(self):
3247
from bzrlib.repofmt import weaverepo
3248
return weaverepo.RepositoryFormat7()
3251
def is_compatible(source, target):
3252
"""Be compatible with known Weave formats.
3254
We don't test for the stores being of specific types because that
3255
could lead to confusing results, and there is no need to be
3258
from bzrlib.repofmt.weaverepo import (
3264
return (isinstance(source._format, (RepositoryFormat5,
3266
RepositoryFormat7)) and
3267
isinstance(target._format, (RepositoryFormat5,
3269
RepositoryFormat7)))
3270
except AttributeError:
3274
def copy_content(self, revision_id=None):
3275
"""See InterRepository.copy_content()."""
3276
# weave specific optimised path:
3278
self.target.set_make_working_trees(self.source.make_working_trees())
3279
except (errors.RepositoryUpgradeRequired, NotImplemented):
3281
# FIXME do not peek!
3282
if self.source._transport.listable():
3283
pb = ui.ui_factory.nested_progress_bar()
3285
self.target.texts.insert_record_stream(
3286
self.source.texts.get_record_stream(
3287
self.source.texts.keys(), 'topological', False))
3288
pb.update('copying inventory', 0, 1)
3289
self.target.inventories.insert_record_stream(
3290
self.source.inventories.get_record_stream(
3291
self.source.inventories.keys(), 'topological', False))
3292
self.target.signatures.insert_record_stream(
3293
self.source.signatures.get_record_stream(
3294
self.source.signatures.keys(),
3296
self.target.revisions.insert_record_stream(
3297
self.source.revisions.get_record_stream(
3298
self.source.revisions.keys(),
3299
'topological', True))
3303
self.target.fetch(self.source, revision_id=revision_id)
3306
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3307
"""See InterRepository.missing_revision_ids()."""
3308
# we want all revisions to satisfy revision_id in source.
3309
# but we don't want to stat every file here and there.
3310
# we want then, all revisions other needs to satisfy revision_id
3311
# checked, but not those that we have locally.
3312
# so the first thing is to get a subset of the revisions to
3313
# satisfy revision_id in source, and then eliminate those that
3314
# we do already have.
3315
# this is slow on high latency connection to self, but as this
3316
# disk format scales terribly for push anyway due to rewriting
3317
# inventory.weave, this is considered acceptable.
3319
if revision_id is not None:
3320
source_ids = self.source.get_ancestry(revision_id)
3321
if source_ids[0] is not None:
3322
raise AssertionError()
3325
source_ids = self.source._all_possible_ids()
3326
source_ids_set = set(source_ids)
3327
# source_ids is the worst possible case we may need to pull.
3328
# now we want to filter source_ids against what we actually
3329
# have in target, but don't try to check for existence where we know
3330
# we do not have a revision as that would be pointless.
3331
target_ids = set(self.target._all_possible_ids())
3332
possibly_present_revisions = target_ids.intersection(source_ids_set)
3333
actually_present_revisions = set(
3334
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3335
required_revisions = source_ids_set.difference(actually_present_revisions)
3336
if revision_id is not None:
3337
# we used get_ancestry to determine source_ids then we are assured all
3338
# revisions referenced are present as they are installed in topological order.
3339
# and the tip revision was validated by get_ancestry.
3340
result_set = required_revisions
3342
# if we just grabbed the possibly available ids, then
3343
# we only have an estimate of whats available and need to validate
3344
# that against the revision records.
3346
self.source._eliminate_revisions_not_present(required_revisions))
3347
return self.source.revision_ids_to_search_result(result_set)
3350
class InterKnitRepo(InterSameDataRepository):
3351
"""Optimised code paths between Knit based repositories."""
3354
def _get_repo_format_to_test(self):
3355
from bzrlib.repofmt import knitrepo
3356
return knitrepo.RepositoryFormatKnit1()
3359
def is_compatible(source, target):
3360
"""Be compatible with known Knit formats.
3362
We don't test for the stores being of specific types because that
3363
could lead to confusing results, and there is no need to be
3366
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3368
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3369
isinstance(target._format, RepositoryFormatKnit))
3370
except AttributeError:
3372
return are_knits and InterRepository._same_model(source, target)
3375
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3376
"""See InterRepository.missing_revision_ids()."""
3377
if revision_id is not None:
3378
source_ids = self.source.get_ancestry(revision_id)
3379
if source_ids[0] is not None:
3380
raise AssertionError()
3383
source_ids = self.source.all_revision_ids()
3384
source_ids_set = set(source_ids)
3385
# source_ids is the worst possible case we may need to pull.
3386
# now we want to filter source_ids against what we actually
3387
# have in target, but don't try to check for existence where we know
3388
# we do not have a revision as that would be pointless.
3389
target_ids = set(self.target.all_revision_ids())
3390
possibly_present_revisions = target_ids.intersection(source_ids_set)
3391
actually_present_revisions = set(
3392
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3393
required_revisions = source_ids_set.difference(actually_present_revisions)
3394
if revision_id is not None:
3395
# we used get_ancestry to determine source_ids then we are assured all
3396
# revisions referenced are present as they are installed in topological order.
3397
# and the tip revision was validated by get_ancestry.
3398
result_set = required_revisions
3400
# if we just grabbed the possibly available ids, then
3401
# we only have an estimate of whats available and need to validate
3402
# that against the revision records.
3404
self.source._eliminate_revisions_not_present(required_revisions))
3405
return self.source.revision_ids_to_search_result(result_set)
3408
class InterPackRepo(InterSameDataRepository):
3409
"""Optimised code paths between Pack based repositories."""
3412
def _get_repo_format_to_test(self):
3413
from bzrlib.repofmt import pack_repo
3414
return pack_repo.RepositoryFormatKnitPack6RichRoot()
3417
def is_compatible(source, target):
3418
"""Be compatible with known Pack formats.
3420
We don't test for the stores being of specific types because that
3421
could lead to confusing results, and there is no need to be
3424
InterPackRepo does not support CHK based repositories.
3426
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3427
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1
3429
are_packs = (isinstance(source._format, RepositoryFormatPack) and
3430
isinstance(target._format, RepositoryFormatPack))
3431
not_packs = (isinstance(source._format, RepositoryFormatCHK1) or
3432
isinstance(target._format, RepositoryFormatCHK1))
3433
except AttributeError:
3435
if not_packs or not are_packs:
3437
return InterRepository._same_model(source, target)
3440
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3442
"""See InterRepository.fetch()."""
3443
if (len(self.source._fallback_repositories) > 0 or
3444
len(self.target._fallback_repositories) > 0):
3445
# The pack layer is not aware of fallback repositories, so when
3446
# fetching from a stacked repository or into a stacked repository
3447
# we use the generic fetch logic which uses the VersionedFiles
3448
# attributes on repository.
3449
from bzrlib.fetch import RepoFetcher
3450
fetcher = RepoFetcher(self.target, self.source, revision_id,
3451
pb, find_ghosts, fetch_spec=fetch_spec)
3452
if fetch_spec is not None:
3453
if len(list(fetch_spec.heads)) != 1:
3454
raise AssertionError(
3455
"InterPackRepo.fetch doesn't support "
3456
"fetching multiple heads yet.")
3457
revision_id = list(fetch_spec.heads)[0]
3459
if revision_id is None:
3461
# everything to do - use pack logic
3462
# to fetch from all packs to one without
3463
# inventory parsing etc, IFF nothing to be copied is in the target.
3465
source_revision_ids = frozenset(self.source.all_revision_ids())
3466
revision_ids = source_revision_ids - \
3467
frozenset(self.target.get_parent_map(source_revision_ids))
3468
revision_keys = [(revid,) for revid in revision_ids]
3469
index = self.target._pack_collection.revision_index.combined_index
3470
present_revision_ids = set(item[1][0] for item in
3471
index.iter_entries(revision_keys))
3472
revision_ids = set(revision_ids) - present_revision_ids
3473
# implementing the TODO will involve:
3474
# - detecting when all of a pack is selected
3475
# - avoiding as much as possible pre-selection, so the
3476
# more-core routines such as create_pack_from_packs can filter in
3477
# a just-in-time fashion. (though having a HEADS list on a
3478
# repository might make this a lot easier, because we could
3479
# sensibly detect 'new revisions' without doing a full index scan.
3480
elif _mod_revision.is_null(revision_id):
3485
revision_ids = self.search_missing_revision_ids(revision_id,
3486
find_ghosts=find_ghosts).get_keys()
3487
except errors.NoSuchRevision:
3488
raise errors.InstallFailed([revision_id])
3489
if len(revision_ids) == 0:
3491
return self._pack(self.source, self.target, revision_ids)
3493
def _pack(self, source, target, revision_ids):
3494
from bzrlib.repofmt.pack_repo import Packer
3495
packs = source._pack_collection.all_packs()
3496
pack = Packer(self.target._pack_collection, packs, '.fetch',
3497
revision_ids).pack()
3498
if pack is not None:
3499
self.target._pack_collection._save_pack_names()
3500
copied_revs = pack.get_revision_count()
3501
# Trigger an autopack. This may duplicate effort as we've just done
3502
# a pack creation, but for now it is simpler to think about as
3503
# 'upload data, then repack if needed'.
3504
self.target._pack_collection.autopack()
3505
return (copied_revs, [])
3510
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3511
"""See InterRepository.missing_revision_ids().
3513
:param find_ghosts: Find ghosts throughout the ancestry of
3516
if not find_ghosts and revision_id is not None:
3517
return self._walk_to_common_revisions([revision_id])
3518
elif revision_id is not None:
3519
# Find ghosts: search for revisions pointing from one repository to
3520
# the other, and vice versa, anywhere in the history of revision_id.
3521
graph = self.target.get_graph(other_repository=self.source)
3522
searcher = graph._make_breadth_first_searcher([revision_id])
3526
next_revs, ghosts = searcher.next_with_ghosts()
3527
except StopIteration:
3529
if revision_id in ghosts:
3530
raise errors.NoSuchRevision(self.source, revision_id)
3531
found_ids.update(next_revs)
3532
found_ids.update(ghosts)
3533
found_ids = frozenset(found_ids)
3534
# Double query here: should be able to avoid this by changing the
3535
# graph api further.
3536
result_set = found_ids - frozenset(
3537
self.target.get_parent_map(found_ids))
3539
source_ids = self.source.all_revision_ids()
3540
# source_ids is the worst possible case we may need to pull.
3541
# now we want to filter source_ids against what we actually
3542
# have in target, but don't try to check for existence where we know
3543
# we do not have a revision as that would be pointless.
3544
target_ids = set(self.target.all_revision_ids())
3545
result_set = set(source_ids).difference(target_ids)
3546
return self.source.revision_ids_to_search_result(result_set)
3549
class InterDifferingSerializer(InterRepository):
3552
def _get_repo_format_to_test(self):
3556
def is_compatible(source, target):
3557
"""Be compatible with Knit2 source and Knit3 target"""
3558
# This is redundant with format.check_conversion_target(), however that
3559
# raises an exception, and we just want to say "False" as in we won't
3560
# support converting between these formats.
3561
if source.supports_rich_root() and not target.supports_rich_root():
3563
if (source._format.supports_tree_reference
3564
and not target._format.supports_tree_reference):
3568
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3569
"""Get the best delta and base for this revision.
3571
:return: (basis_id, delta)
3573
possible_trees = [(parent_id, cache[parent_id])
3574
for parent_id in parent_ids
3575
if parent_id in cache]
3576
if len(possible_trees) == 0:
3577
# There either aren't any parents, or the parents aren't in the
3578
# cache, so just use the last converted tree
3579
possible_trees.append((basis_id, cache[basis_id]))
3581
for basis_id, basis_tree in possible_trees:
3582
delta = tree.inventory._make_delta(basis_tree.inventory)
3583
deltas.append((len(delta), basis_id, delta))
3585
return deltas[0][1:]
3587
def _get_parent_keys(self, root_key, parent_map):
3588
"""Get the parent keys for a given root id."""
3589
root_id, rev_id = root_key
3590
# Include direct parents of the revision, but only if they used
3593
for parent_id in parent_map[rev_id]:
3594
if parent_id == _mod_revision.NULL_REVISION:
3596
if parent_id not in self._revision_id_to_root_id:
3597
# We probably didn't read this revision, go spend the
3598
# extra effort to actually check
3600
tree = self.source.revision_tree(parent_id)
3601
except errors.NoSuchRevision:
3602
# Ghost, fill out _revision_id_to_root_id in case we
3603
# encounter this again.
3604
# But set parent_root_id to None since we don't really know
3605
parent_root_id = None
3607
parent_root_id = tree.get_root_id()
3608
self._revision_id_to_root_id[parent_id] = None
3610
parent_root_id = self._revision_id_to_root_id[parent_id]
3611
if root_id == parent_root_id or parent_root_id is None:
3612
parent_keys.append((root_id, parent_id))
3613
return tuple(parent_keys)
3615
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3616
for root_key in root_keys_to_create:
3617
parent_keys = self._get_parent_keys(root_key, parent_map)
3618
yield versionedfile.FulltextContentFactory(root_key,
3619
parent_keys, None, '')
3621
def _fetch_batch(self, revision_ids, basis_id, cache):
3622
"""Fetch across a few revisions.
3624
:param revision_ids: The revisions to copy
3625
:param basis_id: The revision_id of a tree that must be in cache, used
3626
as a basis for delta when no other base is available
3627
:param cache: A cache of RevisionTrees that we can use.
3628
:return: The revision_id of the last converted tree. The RevisionTree
3629
for it will be in cache
3631
# Walk though all revisions; get inventory deltas, copy referenced
3632
# texts that delta references, insert the delta, revision and
3634
root_keys_to_create = set()
3637
pending_revisions = []
3638
parent_map = self.source.get_parent_map(revision_ids)
3639
for tree in self.source.revision_trees(revision_ids):
3640
current_revision_id = tree.get_revision_id()
3641
parent_ids = parent_map.get(current_revision_id, ())
3642
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3644
if self._converting_to_rich_root:
3645
self._revision_id_to_root_id[current_revision_id] = \
3647
# Find text entries that need to be copied
3648
for old_path, new_path, file_id, entry in delta:
3649
if new_path is not None:
3652
if not self.target.supports_rich_root():
3653
# The target doesn't support rich root, so we don't
3656
if self._converting_to_rich_root:
3657
# This can't be copied normally, we have to insert
3659
root_keys_to_create.add((file_id, entry.revision))
3661
text_keys.add((file_id, entry.revision))
3662
revision = self.source.get_revision(current_revision_id)
3663
pending_deltas.append((basis_id, delta,
3664
current_revision_id, revision.parent_ids))
3665
pending_revisions.append(revision)
3666
cache[current_revision_id] = tree
3667
basis_id = current_revision_id
3669
from_texts = self.source.texts
3670
to_texts = self.target.texts
3671
if root_keys_to_create:
3672
root_stream = self._new_root_data_stream(root_keys_to_create,
3674
to_texts.insert_record_stream(root_stream)
3675
to_texts.insert_record_stream(from_texts.get_record_stream(
3676
text_keys, self.target._format._fetch_order,
3677
not self.target._format._fetch_uses_deltas))
3678
# insert inventory deltas
3679
for delta in pending_deltas:
3680
self.target.add_inventory_by_delta(*delta)
3681
if self.target._fallback_repositories:
3682
# Make sure this stacked repository has all the parent inventories
3683
# for the new revisions that we are about to insert. We do this
3684
# before adding the revisions so that no revision is added until
3685
# all the inventories it may depend on are added.
3687
revision_ids = set()
3688
for revision in pending_revisions:
3689
revision_ids.add(revision.revision_id)
3690
parent_ids.update(revision.parent_ids)
3691
parent_ids.difference_update(revision_ids)
3692
parent_ids.discard(_mod_revision.NULL_REVISION)
3693
parent_map = self.source.get_parent_map(parent_ids)
3694
for parent_tree in self.source.revision_trees(parent_ids):
3695
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, basis_id, cache)
3696
current_revision_id = parent_tree.get_revision_id()
3697
parents_parents = parent_map[current_revision_id]
3698
self.target.add_inventory_by_delta(
3699
basis_id, delta, current_revision_id, parents_parents)
3700
# insert signatures and revisions
3701
for revision in pending_revisions:
3703
signature = self.source.get_signature_text(
3704
revision.revision_id)
3705
self.target.add_signature_text(revision.revision_id,
3707
except errors.NoSuchRevision:
3709
self.target.add_revision(revision.revision_id, revision)
3712
def _fetch_all_revisions(self, revision_ids, pb):
3713
"""Fetch everything for the list of revisions.
3715
:param revision_ids: The list of revisions to fetch. Must be in
3717
:param pb: A ProgressBar
3720
basis_id, basis_tree = self._get_basis(revision_ids[0])
3722
cache = lru_cache.LRUCache(100)
3723
cache[basis_id] = basis_tree
3724
del basis_tree # We don't want to hang on to it here
3725
for offset in range(0, len(revision_ids), batch_size):
3726
self.target.start_write_group()
3728
pb.update('Transferring revisions', offset,
3730
batch = revision_ids[offset:offset+batch_size]
3731
basis_id = self._fetch_batch(batch, basis_id, cache)
3733
self.target.abort_write_group()
3736
self.target.commit_write_group()
3737
pb.update('Transferring revisions', len(revision_ids),
3741
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3743
"""See InterRepository.fetch()."""
3744
if fetch_spec is not None:
3745
raise AssertionError("Not implemented yet...")
3746
if (not self.source.supports_rich_root()
3747
and self.target.supports_rich_root()):
3748
self._converting_to_rich_root = True
3749
self._revision_id_to_root_id = {}
3751
self._converting_to_rich_root = False
3752
revision_ids = self.target.search_missing_revision_ids(self.source,
3753
revision_id, find_ghosts=find_ghosts).get_keys()
3754
if not revision_ids:
3756
revision_ids = tsort.topo_sort(
3757
self.source.get_graph().get_parent_map(revision_ids))
3758
if not revision_ids:
3760
# Walk though all revisions; get inventory deltas, copy referenced
3761
# texts that delta references, insert the delta, revision and
3763
first_rev = self.source.get_revision(revision_ids[0])
3765
my_pb = ui.ui_factory.nested_progress_bar()
3768
symbol_versioning.warn(
3769
symbol_versioning.deprecated_in((1, 14, 0))
3770
% "pb parameter to fetch()")
3773
self._fetch_all_revisions(revision_ids, pb)
3775
if my_pb is not None:
3777
return len(revision_ids), 0
3779
def _get_basis(self, first_revision_id):
3780
"""Get a revision and tree which exists in the target.
3782
This assumes that first_revision_id is selected for transmission
3783
because all other ancestors are already present. If we can't find an
3784
ancestor we fall back to NULL_REVISION since we know that is safe.
3786
:return: (basis_id, basis_tree)
3788
first_rev = self.source.get_revision(first_revision_id)
3790
basis_id = first_rev.parent_ids[0]
3791
# only valid as a basis if the target has it
3792
self.target.get_revision(basis_id)
3793
# Try to get a basis tree - if its a ghost it will hit the
3794
# NoSuchRevision case.
3795
basis_tree = self.source.revision_tree(basis_id)
3796
except (IndexError, errors.NoSuchRevision):
3797
basis_id = _mod_revision.NULL_REVISION
3798
basis_tree = self.source.revision_tree(basis_id)
3799
return basis_id, basis_tree
3802
InterRepository.register_optimiser(InterDifferingSerializer)
3803
InterRepository.register_optimiser(InterSameDataRepository)
3804
InterRepository.register_optimiser(InterWeaveRepo)
3805
InterRepository.register_optimiser(InterKnitRepo)
3806
InterRepository.register_optimiser(InterPackRepo)
3809
class CopyConverter(object):
3810
"""A repository conversion tool which just performs a copy of the content.
3812
This is slow but quite reliable.
3815
def __init__(self, target_format):
3816
"""Create a CopyConverter.
3818
:param target_format: The format the resulting repository should be.
3820
self.target_format = target_format
3822
def convert(self, repo, pb):
3823
"""Perform the conversion of to_convert, giving feedback via pb.
3825
:param to_convert: The disk object to convert.
3826
:param pb: a progress bar to use for progress information.
3831
# this is only useful with metadir layouts - separated repo content.
3832
# trigger an assertion if not such
3833
repo._format.get_format_string()
3834
self.repo_dir = repo.bzrdir
3835
self.step('Moving repository to repository.backup')
3836
self.repo_dir.transport.move('repository', 'repository.backup')
3837
backup_transport = self.repo_dir.transport.clone('repository.backup')
3838
repo._format.check_conversion_target(self.target_format)
3839
self.source_repo = repo._format.open(self.repo_dir,
3841
_override_transport=backup_transport)
3842
self.step('Creating new repository')
3843
converted = self.target_format.initialize(self.repo_dir,
3844
self.source_repo.is_shared())
3845
converted.lock_write()
3847
self.step('Copying content into repository.')
3848
self.source_repo.copy_content_into(converted)
3851
self.step('Deleting old repository content.')
3852
self.repo_dir.transport.delete_tree('repository.backup')
3853
self.pb.note('repository converted')
3855
def step(self, message):
3856
"""Update the pb by a step."""
3858
self.pb.update(message, self.count, self.total)
3870
def _unescaper(match, _map=_unescape_map):
3871
code = match.group(1)
3875
if not code.startswith('#'):
3877
return unichr(int(code[1:])).encode('utf8')
3883
def _unescape_xml(data):
3884
"""Unescape predefined XML entities in a string of data."""
3886
if _unescape_re is None:
3887
_unescape_re = re.compile('\&([^;]*);')
3888
return _unescape_re.sub(_unescaper, data)
3891
class _VersionedFileChecker(object):
3893
def __init__(self, repository, text_key_references=None):
3894
self.repository = repository
3895
self.text_index = self.repository._generate_text_key_index(
3896
text_key_references=text_key_references)
3898
def calculate_file_version_parents(self, text_key):
3899
"""Calculate the correct parents for a file version according to
3902
parent_keys = self.text_index[text_key]
3903
if parent_keys == [_mod_revision.NULL_REVISION]:
3905
return tuple(parent_keys)
3907
def check_file_version_parents(self, texts, progress_bar=None):
3908
"""Check the parents stored in a versioned file are correct.
3910
It also detects file versions that are not referenced by their
3911
corresponding revision's inventory.
3913
:returns: A tuple of (wrong_parents, dangling_file_versions).
3914
wrong_parents is a dict mapping {revision_id: (stored_parents,
3915
correct_parents)} for each revision_id where the stored parents
3916
are not correct. dangling_file_versions is a set of (file_id,
3917
revision_id) tuples for versions that are present in this versioned
3918
file, but not used by the corresponding inventory.
3921
self.file_ids = set([file_id for file_id, _ in
3922
self.text_index.iterkeys()])
3923
# text keys is now grouped by file_id
3924
n_weaves = len(self.file_ids)
3925
files_in_revisions = {}
3926
revisions_of_files = {}
3927
n_versions = len(self.text_index)
3928
progress_bar.update('loading text store', 0, n_versions)
3929
parent_map = self.repository.texts.get_parent_map(self.text_index)
3930
# On unlistable transports this could well be empty/error...
3931
text_keys = self.repository.texts.keys()
3932
unused_keys = frozenset(text_keys) - set(self.text_index)
3933
for num, key in enumerate(self.text_index.iterkeys()):
3934
if progress_bar is not None:
3935
progress_bar.update('checking text graph', num, n_versions)
3936
correct_parents = self.calculate_file_version_parents(key)
3938
knit_parents = parent_map[key]
3939
except errors.RevisionNotPresent:
3942
if correct_parents != knit_parents:
3943
wrong_parents[key] = (knit_parents, correct_parents)
3944
return wrong_parents, unused_keys
3947
def _old_get_graph(repository, revision_id):
3948
"""DO NOT USE. That is all. I'm serious."""
3949
graph = repository.get_graph()
3950
revision_graph = dict(((key, value) for key, value in
3951
graph.iter_ancestry([revision_id]) if value is not None))
3952
return _strip_NULL_ghosts(revision_graph)
3955
def _strip_NULL_ghosts(revision_graph):
3956
"""Also don't use this. more compatibility code for unmigrated clients."""
3957
# Filter ghosts, and null:
3958
if _mod_revision.NULL_REVISION in revision_graph:
3959
del revision_graph[_mod_revision.NULL_REVISION]
3960
for key, parents in revision_graph.items():
3961
revision_graph[key] = tuple(parent for parent in parents if parent
3963
return revision_graph
3966
class StreamSink(object):
3967
"""An object that can insert a stream into a repository.
3969
This interface handles the complexity of reserialising inventories and
3970
revisions from different formats, and allows unidirectional insertion into
3971
stacked repositories without looking for the missing basis parents
3975
def __init__(self, target_repo):
3976
self.target_repo = target_repo
3978
def insert_stream(self, stream, src_format, resume_tokens):
3979
"""Insert a stream's content into the target repository.
3981
:param src_format: a bzr repository format.
3983
:return: a list of resume tokens and an iterable of keys additional
3984
items required before the insertion can be completed.
3986
self.target_repo.lock_write()
3989
self.target_repo.resume_write_group(resume_tokens)
3991
self.target_repo.start_write_group()
3993
# locked_insert_stream performs a commit|suspend.
3994
return self._locked_insert_stream(stream, src_format)
3996
self.target_repo.abort_write_group(suppress_errors=True)
3999
self.target_repo.unlock()
4001
def _locked_insert_stream(self, stream, src_format):
4002
to_serializer = self.target_repo._format._serializer
4003
src_serializer = src_format._serializer
4004
if to_serializer == src_serializer:
4005
# If serializers match and the target is a pack repository, set the
4006
# write cache size on the new pack. This avoids poor performance
4007
# on transports where append is unbuffered (such as
4008
# RemoteTransport). This is safe to do because nothing should read
4009
# back from the target repository while a stream with matching
4010
# serialization is being inserted.
4011
# The exception is that a delta record from the source that should
4012
# be a fulltext may need to be expanded by the target (see
4013
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4014
# explicitly flush any buffered writes first in that rare case.
4016
new_pack = self.target_repo._pack_collection._new_pack
4017
except AttributeError:
4018
# Not a pack repository
4021
new_pack.set_write_cache_size(1024*1024)
4022
for substream_type, substream in stream:
4023
if substream_type == 'texts':
4024
self.target_repo.texts.insert_record_stream(substream)
4025
elif substream_type == 'inventories':
4026
if src_serializer == to_serializer:
4027
self.target_repo.inventories.insert_record_stream(
4030
self._extract_and_insert_inventories(
4031
substream, src_serializer)
4032
elif substream_type == 'chk_bytes':
4033
# XXX: This doesn't support conversions, as it assumes the
4034
# conversion was done in the fetch code.
4035
self.target_repo.chk_bytes.insert_record_stream(substream)
4036
elif substream_type == 'revisions':
4037
# This may fallback to extract-and-insert more often than
4038
# required if the serializers are different only in terms of
4040
if src_serializer == to_serializer:
4041
self.target_repo.revisions.insert_record_stream(
4044
self._extract_and_insert_revisions(substream,
4046
elif substream_type == 'signatures':
4047
self.target_repo.signatures.insert_record_stream(substream)
4049
raise AssertionError('kaboom! %s' % (substream_type,))
4050
# Find all the new revisions (including ones from resume_tokens)
4051
missing_keys = self.target_repo.get_missing_parent_inventories()
4053
for prefix, versioned_file in (
4054
('texts', self.target_repo.texts),
4055
('inventories', self.target_repo.inventories),
4056
('revisions', self.target_repo.revisions),
4057
('signatures', self.target_repo.signatures),
4059
missing_keys.update((prefix,) + key for key in
4060
versioned_file.get_missing_compression_parent_keys())
4061
except NotImplementedError:
4062
# cannot even attempt suspending, and missing would have failed
4063
# during stream insertion.
4064
missing_keys = set()
4067
# suspend the write group and tell the caller what we is
4068
# missing. We know we can suspend or else we would not have
4069
# entered this code path. (All repositories that can handle
4070
# missing keys can handle suspending a write group).
4071
write_group_tokens = self.target_repo.suspend_write_group()
4072
return write_group_tokens, missing_keys
4073
self.target_repo.commit_write_group()
4076
def _extract_and_insert_inventories(self, substream, serializer):
4077
"""Generate a new inventory versionedfile in target, converting data.
4079
The inventory is retrieved from the source, (deserializing it), and
4080
stored in the target (reserializing it in a different format).
4082
for record in substream:
4083
bytes = record.get_bytes_as('fulltext')
4084
revision_id = record.key[0]
4085
inv = serializer.read_inventory_from_string(bytes, revision_id)
4086
parents = [key[0] for key in record.parents]
4087
self.target_repo.add_inventory(revision_id, inv, parents)
4089
def _extract_and_insert_revisions(self, substream, serializer):
4090
for record in substream:
4091
bytes = record.get_bytes_as('fulltext')
4092
revision_id = record.key[0]
4093
rev = serializer.read_revision_from_string(bytes)
4094
if rev.revision_id != revision_id:
4095
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4096
self.target_repo.add_revision(revision_id, rev)
4099
if self.target_repo._format._fetch_reconcile:
4100
self.target_repo.reconcile()
4103
class StreamSource(object):
4104
"""A source of a stream for fetching between repositories."""
4106
def __init__(self, from_repository, to_format):
4107
"""Create a StreamSource streaming from from_repository."""
4108
self.from_repository = from_repository
4109
self.to_format = to_format
4111
def delta_on_metadata(self):
4112
"""Return True if delta's are permitted on metadata streams.
4114
That is on revisions and signatures.
4116
src_serializer = self.from_repository._format._serializer
4117
target_serializer = self.to_format._serializer
4118
return (self.to_format._fetch_uses_deltas and
4119
src_serializer == target_serializer)
4121
def _fetch_revision_texts(self, revs):
4122
# fetch signatures first and then the revision texts
4123
# may need to be a InterRevisionStore call here.
4124
from_sf = self.from_repository.signatures
4125
# A missing signature is just skipped.
4126
keys = [(rev_id,) for rev_id in revs]
4127
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4129
self.to_format._fetch_order,
4130
not self.to_format._fetch_uses_deltas))
4131
# If a revision has a delta, this is actually expanded inside the
4132
# insert_record_stream code now, which is an alternate fix for
4134
from_rf = self.from_repository.revisions
4135
revisions = from_rf.get_record_stream(
4137
self.to_format._fetch_order,
4138
not self.delta_on_metadata())
4139
return [('signatures', signatures), ('revisions', revisions)]
4141
def _generate_root_texts(self, revs):
4142
"""This will be called by __fetch between fetching weave texts and
4143
fetching the inventory weave.
4145
Subclasses should override this if they need to generate root texts
4146
after fetching weave texts.
4148
if self._rich_root_upgrade():
4150
return bzrlib.fetch.Inter1and2Helper(
4151
self.from_repository).generate_root_texts(revs)
4155
def get_stream(self, search):
4157
revs = search.get_keys()
4158
graph = self.from_repository.get_graph()
4159
revs = list(graph.iter_topo_order(revs))
4160
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4162
for knit_kind, file_id, revisions in data_to_fetch:
4163
if knit_kind != phase:
4165
# Make a new progress bar for this phase
4166
if knit_kind == "file":
4167
# Accumulate file texts
4168
text_keys.extend([(file_id, revision) for revision in
4170
elif knit_kind == "inventory":
4171
# Now copy the file texts.
4172
from_texts = self.from_repository.texts
4173
yield ('texts', from_texts.get_record_stream(
4174
text_keys, self.to_format._fetch_order,
4175
not self.to_format._fetch_uses_deltas))
4176
# Cause an error if a text occurs after we have done the
4179
# Before we process the inventory we generate the root
4180
# texts (if necessary) so that the inventories references
4182
for _ in self._generate_root_texts(revs):
4184
# NB: This currently reopens the inventory weave in source;
4185
# using a single stream interface instead would avoid this.
4186
from_weave = self.from_repository.inventories
4187
# we fetch only the referenced inventories because we do not
4188
# know for unselected inventories whether all their required
4189
# texts are present in the other repository - it could be
4191
for info in self._get_inventory_stream(revs):
4193
elif knit_kind == "signatures":
4194
# Nothing to do here; this will be taken care of when
4195
# _fetch_revision_texts happens.
4197
elif knit_kind == "revisions":
4198
for record in self._fetch_revision_texts(revs):
4201
raise AssertionError("Unknown knit kind %r" % knit_kind)
4203
def get_stream_for_missing_keys(self, missing_keys):
4204
# missing keys can only occur when we are byte copying and not
4205
# translating (because translation means we don't send
4206
# unreconstructable deltas ever).
4208
keys['texts'] = set()
4209
keys['revisions'] = set()
4210
keys['inventories'] = set()
4211
keys['signatures'] = set()
4212
for key in missing_keys:
4213
keys[key[0]].add(key[1:])
4214
if len(keys['revisions']):
4215
# If we allowed copying revisions at this point, we could end up
4216
# copying a revision without copying its required texts: a
4217
# violation of the requirements for repository integrity.
4218
raise AssertionError(
4219
'cannot copy revisions to fill in missing deltas %s' % (
4220
keys['revisions'],))
4221
for substream_kind, keys in keys.iteritems():
4222
vf = getattr(self.from_repository, substream_kind)
4223
# Ask for full texts always so that we don't need more round trips
4224
# after this stream.
4225
stream = vf.get_record_stream(keys,
4226
self.to_format._fetch_order, True)
4227
yield substream_kind, stream
4229
def inventory_fetch_order(self):
4230
if self._rich_root_upgrade():
4231
return 'topological'
4233
return self.to_format._fetch_order
4235
def _rich_root_upgrade(self):
4236
return (not self.from_repository._format.rich_root_data and
4237
self.to_format.rich_root_data)
4239
def _get_inventory_stream(self, revision_ids):
4240
from_format = self.from_repository._format
4241
if (from_format.supports_chks and self.to_format.supports_chks
4242
and (from_format._serializer == self.to_format._serializer)):
4243
# Both sides support chks, and they use the same serializer, so it
4244
# is safe to transmit the chk pages and inventory pages across
4246
return self._get_chk_inventory_stream(revision_ids)
4247
elif (not from_format.supports_chks):
4248
# Source repository doesn't support chks. So we can transmit the
4249
# inventories 'as-is' and either they are just accepted on the
4250
# target, or the Sink will properly convert it.
4251
return self._get_simple_inventory_stream(revision_ids)
4253
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4254
# inventories. Note that this should probably be done somehow
4255
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4256
# figure out how a non-chk repository could possibly handle
4257
# deserializing an inventory stream from a chk repo, as it
4258
# doesn't have a way to understand individual pages.
4259
return self._get_convertable_inventory_stream(revision_ids)
4261
def _get_simple_inventory_stream(self, revision_ids):
4262
from_weave = self.from_repository.inventories
4263
yield ('inventories', from_weave.get_record_stream(
4264
[(rev_id,) for rev_id in revision_ids],
4265
self.inventory_fetch_order(),
4266
not self.delta_on_metadata()))
4268
def _get_chk_inventory_stream(self, revision_ids):
4269
"""Fetch the inventory texts, along with the associated chk maps."""
4270
# We want an inventory outside of the search set, so that we can filter
4271
# out uninteresting chk pages. For now we use
4272
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4273
# could use that instead.
4274
start_rev_id = self.from_repository._find_revision_outside_set(
4276
start_rev_key = (start_rev_id,)
4277
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4278
if start_rev_id != _mod_revision.NULL_REVISION:
4279
inv_keys_to_fetch.append((start_rev_id,))
4280
# Any repo that supports chk_bytes must also support out-of-order
4281
# insertion. At least, that is how we expect it to work
4282
# We use get_record_stream instead of iter_inventories because we want
4283
# to be able to insert the stream as well. We could instead fetch
4284
# allowing deltas, and then iter_inventories, but we don't know whether
4285
# source or target is more 'local' anway.
4286
inv_stream = self.from_repository.inventories.get_record_stream(
4287
inv_keys_to_fetch, 'unordered',
4288
True) # We need them as full-texts so we can find their references
4289
uninteresting_chk_roots = set()
4290
interesting_chk_roots = set()
4291
def filter_inv_stream(inv_stream):
4292
for idx, record in enumerate(inv_stream):
4293
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4294
bytes = record.get_bytes_as('fulltext')
4295
chk_inv = inventory.CHKInventory.deserialise(
4296
self.from_repository.chk_bytes, bytes, record.key)
4297
if record.key == start_rev_key:
4298
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4299
p_id_map = chk_inv.parent_id_basename_to_file_id
4300
if p_id_map is not None:
4301
uninteresting_chk_roots.add(p_id_map.key())
4304
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4305
p_id_map = chk_inv.parent_id_basename_to_file_id
4306
if p_id_map is not None:
4307
interesting_chk_roots.add(p_id_map.key())
4308
### pb.update('fetch inventory', 0, 2)
4309
yield ('inventories', filter_inv_stream(inv_stream))
4310
# Now that we have worked out all of the interesting root nodes, grab
4311
# all of the interesting pages and insert them
4312
### pb.update('fetch inventory', 1, 2)
4313
interesting = chk_map.iter_interesting_nodes(
4314
self.from_repository.chk_bytes, interesting_chk_roots,
4315
uninteresting_chk_roots)
4316
def to_stream_adapter():
4317
"""Adapt the iter_interesting_nodes result to a single stream.
4319
iter_interesting_nodes returns records as it processes them, along
4320
with keys. However, we only want to return the records themselves.
4322
for record, items in interesting:
4323
if record is not None:
4325
# XXX: We could instead call get_record_stream(records.keys())
4326
# ATM, this will always insert the records as fulltexts, and
4327
# requires that you can hang on to records once you have gone
4328
# on to the next one. Further, it causes the target to
4329
# recompress the data. Testing shows it to be faster than
4330
# requesting the records again, though.
4331
yield ('chk_bytes', to_stream_adapter())
4332
### pb.update('fetch inventory', 2, 2)
4334
def _get_convertable_inventory_stream(self, revision_ids):
4335
# XXX: One of source or target is using chks, and they don't have
4336
# compatible serializations. The StreamSink code expects to be
4337
# able to convert on the target, so we need to put
4338
# bytes-on-the-wire that can be converted
4339
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4341
def _stream_invs_as_fulltexts(self, revision_ids):
4342
from_repo = self.from_repository
4343
from_serializer = from_repo._format._serializer
4344
revision_keys = [(rev_id,) for rev_id in revision_ids]
4345
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4346
for inv in self.from_repository.iter_inventories(revision_ids):
4347
# XXX: This is a bit hackish, but it works. Basically,
4348
# CHKSerializer 'accidentally' supports
4349
# read/write_inventory_to_string, even though that is never
4350
# the format that is stored on disk. It *does* give us a
4351
# single string representation for an inventory, so live with
4353
# This would be far better if we had a 'serialized inventory
4354
# delta' form. Then we could use 'inventory._make_delta', and
4355
# transmit that. This would both be faster to generate, and
4356
# result in fewer bytes-on-the-wire.
4357
as_bytes = from_serializer.write_inventory_to_string(inv)
4358
key = (inv.revision_id,)
4359
parent_keys = parent_map.get(key, ())
4360
yield versionedfile.FulltextContentFactory(
4361
key, parent_keys, None, as_bytes)