1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
39
revision as _mod_revision,
45
from bzrlib.bundle import serializer
46
from bzrlib.revisiontree import RevisionTree
47
from bzrlib.store.versioned import VersionedFileStore
48
from bzrlib.testament import Testament
51
from bzrlib.decorators import needs_read_lock, needs_write_lock
52
from bzrlib.inter import InterObject
53
from bzrlib.inventory import (
59
from bzrlib import registry
60
from bzrlib.symbol_versioning import (
63
from bzrlib.trace import (
64
log_exception_quietly, note, mutter, mutter_callsite, warning)
67
# Old formats display a warning, but only once
68
_deprecation_warning_done = False
71
class CommitBuilder(object):
72
"""Provides an interface to build up a commit.
74
This allows describing a tree to be committed without needing to
75
know the internals of the format of the repository.
78
# all clients should supply tree roots.
79
record_root_entry = True
80
# the default CommitBuilder does not manage trees whose root is versioned.
81
_versioned_root = False
83
def __init__(self, repository, parents, config, timestamp=None,
84
timezone=None, committer=None, revprops=None,
86
"""Initiate a CommitBuilder.
88
:param repository: Repository to commit to.
89
:param parents: Revision ids of the parents of the new revision.
90
:param config: Configuration to use.
91
:param timestamp: Optional timestamp recorded for commit.
92
:param timezone: Optional timezone for timestamp.
93
:param committer: Optional committer to set for commit.
94
:param revprops: Optional dictionary of revision properties.
95
:param revision_id: Optional revision id.
100
self._committer = self._config.username()
102
self._committer = committer
104
self.new_inventory = Inventory(None)
105
self._new_revision_id = revision_id
106
self.parents = parents
107
self.repository = repository
110
if revprops is not None:
111
self._validate_revprops(revprops)
112
self._revprops.update(revprops)
114
if timestamp is None:
115
timestamp = time.time()
116
# Restrict resolution to 1ms
117
self._timestamp = round(timestamp, 3)
120
self._timezone = osutils.local_time_offset()
122
self._timezone = int(timezone)
124
self._generate_revision_if_needed()
125
self.__heads = graph.HeadsCache(repository.get_graph()).heads
126
self._basis_delta = []
127
# API compatibility, older code that used CommitBuilder did not call
128
# .record_delete(), which means the delta that is computed would not be
129
# valid. Callers that will call record_delete() should call
130
# .will_record_deletes() to indicate that.
131
self._recording_deletes = False
132
# memo'd check for no-op commits.
133
self._any_changes = False
135
def any_changes(self):
136
"""Return True if any entries were changed.
138
This includes merge-only changes. It is the core for the --unchanged
141
:return: True if any changes have occured.
143
return self._any_changes
145
def _validate_unicode_text(self, text, context):
146
"""Verify things like commit messages don't have bogus characters."""
148
raise ValueError('Invalid value for %s: %r' % (context, text))
150
def _validate_revprops(self, revprops):
151
for key, value in revprops.iteritems():
152
# We know that the XML serializers do not round trip '\r'
153
# correctly, so refuse to accept them
154
if not isinstance(value, basestring):
155
raise ValueError('revision property (%s) is not a valid'
156
' (unicode) string: %r' % (key, value))
157
self._validate_unicode_text(value,
158
'revision property (%s)' % (key,))
160
def commit(self, message):
161
"""Make the actual commit.
163
:return: The revision id of the recorded revision.
165
self._validate_unicode_text(message, 'commit message')
166
rev = _mod_revision.Revision(
167
timestamp=self._timestamp,
168
timezone=self._timezone,
169
committer=self._committer,
171
inventory_sha1=self.inv_sha1,
172
revision_id=self._new_revision_id,
173
properties=self._revprops)
174
rev.parent_ids = self.parents
175
self.repository.add_revision(self._new_revision_id, rev,
176
self.new_inventory, self._config)
177
self.repository.commit_write_group()
178
return self._new_revision_id
181
"""Abort the commit that is being built.
183
self.repository.abort_write_group()
185
def revision_tree(self):
186
"""Return the tree that was just committed.
188
After calling commit() this can be called to get a RevisionTree
189
representing the newly committed tree. This is preferred to
190
calling Repository.revision_tree() because that may require
191
deserializing the inventory, while we already have a copy in
194
if self.new_inventory is None:
195
self.new_inventory = self.repository.get_inventory(
196
self._new_revision_id)
197
return RevisionTree(self.repository, self.new_inventory,
198
self._new_revision_id)
200
def finish_inventory(self):
201
"""Tell the builder that the inventory is finished.
203
:return: The inventory id in the repository, which can be used with
204
repository.get_inventory.
206
if self.new_inventory is None:
207
# an inventory delta was accumulated without creating a new
209
basis_id = self.basis_delta_revision
210
self.inv_sha1 = self.repository.add_inventory_by_delta(
211
basis_id, self._basis_delta, self._new_revision_id,
214
if self.new_inventory.root is None:
215
raise AssertionError('Root entry should be supplied to'
216
' record_entry_contents, as of bzr 0.10.')
217
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
218
self.new_inventory.revision_id = self._new_revision_id
219
self.inv_sha1 = self.repository.add_inventory(
220
self._new_revision_id,
224
return self._new_revision_id
226
def _gen_revision_id(self):
227
"""Return new revision-id."""
228
return generate_ids.gen_revision_id(self._config.username(),
231
def _generate_revision_if_needed(self):
232
"""Create a revision id if None was supplied.
234
If the repository can not support user-specified revision ids
235
they should override this function and raise CannotSetRevisionId
236
if _new_revision_id is not None.
238
:raises: CannotSetRevisionId
240
if self._new_revision_id is None:
241
self._new_revision_id = self._gen_revision_id()
242
self.random_revid = True
244
self.random_revid = False
246
def _heads(self, file_id, revision_ids):
247
"""Calculate the graph heads for revision_ids in the graph of file_id.
249
This can use either a per-file graph or a global revision graph as we
250
have an identity relationship between the two graphs.
252
return self.__heads(revision_ids)
254
def _check_root(self, ie, parent_invs, tree):
255
"""Helper for record_entry_contents.
257
:param ie: An entry being added.
258
:param parent_invs: The inventories of the parent revisions of the
260
:param tree: The tree that is being committed.
262
# In this revision format, root entries have no knit or weave When
263
# serializing out to disk and back in root.revision is always
265
ie.revision = self._new_revision_id
267
def _require_root_change(self, tree):
268
"""Enforce an appropriate root object change.
270
This is called once when record_iter_changes is called, if and only if
271
the root was not in the delta calculated by record_iter_changes.
273
:param tree: The tree which is being committed.
275
# NB: if there are no parents then this method is not called, so no
276
# need to guard on parents having length.
277
entry = entry_factory['directory'](tree.path2id(''), '',
279
entry.revision = self._new_revision_id
280
self._basis_delta.append(('', '', entry.file_id, entry))
282
def _get_delta(self, ie, basis_inv, path):
283
"""Get a delta against the basis inventory for ie."""
284
if ie.file_id not in basis_inv:
286
result = (None, path, ie.file_id, ie)
287
self._basis_delta.append(result)
289
elif ie != basis_inv[ie.file_id]:
291
# TODO: avoid tis id2path call.
292
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
293
self._basis_delta.append(result)
299
def get_basis_delta(self):
300
"""Return the complete inventory delta versus the basis inventory.
302
This has been built up with the calls to record_delete and
303
record_entry_contents. The client must have already called
304
will_record_deletes() to indicate that they will be generating a
307
:return: An inventory delta, suitable for use with apply_delta, or
308
Repository.add_inventory_by_delta, etc.
310
if not self._recording_deletes:
311
raise AssertionError("recording deletes not activated.")
312
return self._basis_delta
314
def record_delete(self, path, file_id):
315
"""Record that a delete occured against a basis tree.
317
This is an optional API - when used it adds items to the basis_delta
318
being accumulated by the commit builder. It cannot be called unless the
319
method will_record_deletes() has been called to inform the builder that
320
a delta is being supplied.
322
:param path: The path of the thing deleted.
323
:param file_id: The file id that was deleted.
325
if not self._recording_deletes:
326
raise AssertionError("recording deletes not activated.")
327
delta = (path, None, file_id, None)
328
self._basis_delta.append(delta)
329
self._any_changes = True
332
def will_record_deletes(self):
333
"""Tell the commit builder that deletes are being notified.
335
This enables the accumulation of an inventory delta; for the resulting
336
commit to be valid, deletes against the basis MUST be recorded via
337
builder.record_delete().
339
self._recording_deletes = True
341
basis_id = self.parents[0]
343
basis_id = _mod_revision.NULL_REVISION
344
self.basis_delta_revision = basis_id
346
def record_entry_contents(self, ie, parent_invs, path, tree,
348
"""Record the content of ie from tree into the commit if needed.
350
Side effect: sets ie.revision when unchanged
352
:param ie: An inventory entry present in the commit.
353
:param parent_invs: The inventories of the parent revisions of the
355
:param path: The path the entry is at in the tree.
356
:param tree: The tree which contains this entry and should be used to
358
:param content_summary: Summary data from the tree about the paths
359
content - stat, length, exec, sha/link target. This is only
360
accessed when the entry has a revision of None - that is when it is
361
a candidate to commit.
362
:return: A tuple (change_delta, version_recorded, fs_hash).
363
change_delta is an inventory_delta change for this entry against
364
the basis tree of the commit, or None if no change occured against
366
version_recorded is True if a new version of the entry has been
367
recorded. For instance, committing a merge where a file was only
368
changed on the other side will return (delta, False).
369
fs_hash is either None, or the hash details for the path (currently
370
a tuple of the contents sha1 and the statvalue returned by
371
tree.get_file_with_stat()).
373
if self.new_inventory.root is None:
374
if ie.parent_id is not None:
375
raise errors.RootMissing()
376
self._check_root(ie, parent_invs, tree)
377
if ie.revision is None:
378
kind = content_summary[0]
380
# ie is carried over from a prior commit
382
# XXX: repository specific check for nested tree support goes here - if
383
# the repo doesn't want nested trees we skip it ?
384
if (kind == 'tree-reference' and
385
not self.repository._format.supports_tree_reference):
386
# mismatch between commit builder logic and repository:
387
# this needs the entry creation pushed down into the builder.
388
raise NotImplementedError('Missing repository subtree support.')
389
self.new_inventory.add(ie)
391
# TODO: slow, take it out of the inner loop.
393
basis_inv = parent_invs[0]
395
basis_inv = Inventory(root_id=None)
397
# ie.revision is always None if the InventoryEntry is considered
398
# for committing. We may record the previous parents revision if the
399
# content is actually unchanged against a sole head.
400
if ie.revision is not None:
401
if not self._versioned_root and path == '':
402
# repositories that do not version the root set the root's
403
# revision to the new commit even when no change occurs (more
404
# specifically, they do not record a revision on the root; and
405
# the rev id is assigned to the root during deserialisation -
406
# this masks when a change may have occurred against the basis.
407
# To match this we always issue a delta, because the revision
408
# of the root will always be changing.
409
if ie.file_id in basis_inv:
410
delta = (basis_inv.id2path(ie.file_id), path,
414
delta = (None, path, ie.file_id, ie)
415
self._basis_delta.append(delta)
416
return delta, False, None
418
# we don't need to commit this, because the caller already
419
# determined that an existing revision of this file is
420
# appropriate. If its not being considered for committing then
421
# it and all its parents to the root must be unaltered so
422
# no-change against the basis.
423
if ie.revision == self._new_revision_id:
424
raise AssertionError("Impossible situation, a skipped "
425
"inventory entry (%r) claims to be modified in this "
426
"commit (%r).", (ie, self._new_revision_id))
427
return None, False, None
428
# XXX: Friction: parent_candidates should return a list not a dict
429
# so that we don't have to walk the inventories again.
430
parent_candiate_entries = ie.parent_candidates(parent_invs)
431
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
433
for inv in parent_invs:
434
if ie.file_id in inv:
435
old_rev = inv[ie.file_id].revision
436
if old_rev in head_set:
437
heads.append(inv[ie.file_id].revision)
438
head_set.remove(inv[ie.file_id].revision)
441
# now we check to see if we need to write a new record to the
443
# We write a new entry unless there is one head to the ancestors, and
444
# the kind-derived content is unchanged.
446
# Cheapest check first: no ancestors, or more the one head in the
447
# ancestors, we write a new node.
451
# There is a single head, look it up for comparison
452
parent_entry = parent_candiate_entries[heads[0]]
453
# if the non-content specific data has changed, we'll be writing a
455
if (parent_entry.parent_id != ie.parent_id or
456
parent_entry.name != ie.name):
458
# now we need to do content specific checks:
460
# if the kind changed the content obviously has
461
if kind != parent_entry.kind:
463
# Stat cache fingerprint feedback for the caller - None as we usually
464
# don't generate one.
467
if content_summary[2] is None:
468
raise ValueError("Files must not have executable = None")
470
if (# if the file length changed we have to store:
471
parent_entry.text_size != content_summary[1] or
472
# if the exec bit has changed we have to store:
473
parent_entry.executable != content_summary[2]):
475
elif parent_entry.text_sha1 == content_summary[3]:
476
# all meta and content is unchanged (using a hash cache
477
# hit to check the sha)
478
ie.revision = parent_entry.revision
479
ie.text_size = parent_entry.text_size
480
ie.text_sha1 = parent_entry.text_sha1
481
ie.executable = parent_entry.executable
482
return self._get_delta(ie, basis_inv, path), False, None
484
# Either there is only a hash change(no hash cache entry,
485
# or same size content change), or there is no change on
487
# Provide the parent's hash to the store layer, so that the
488
# content is unchanged we will not store a new node.
489
nostore_sha = parent_entry.text_sha1
491
# We want to record a new node regardless of the presence or
492
# absence of a content change in the file.
494
ie.executable = content_summary[2]
495
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
497
lines = file_obj.readlines()
501
ie.text_sha1, ie.text_size = self._add_text_to_weave(
502
ie.file_id, lines, heads, nostore_sha)
503
# Let the caller know we generated a stat fingerprint.
504
fingerprint = (ie.text_sha1, stat_value)
505
except errors.ExistingContent:
506
# Turns out that the file content was unchanged, and we were
507
# only going to store a new node if it was changed. Carry over
509
ie.revision = parent_entry.revision
510
ie.text_size = parent_entry.text_size
511
ie.text_sha1 = parent_entry.text_sha1
512
ie.executable = parent_entry.executable
513
return self._get_delta(ie, basis_inv, path), False, None
514
elif kind == 'directory':
516
# all data is meta here, nothing specific to directory, so
518
ie.revision = parent_entry.revision
519
return self._get_delta(ie, basis_inv, path), False, None
521
self._add_text_to_weave(ie.file_id, lines, heads, None)
522
elif kind == 'symlink':
523
current_link_target = content_summary[3]
525
# symlink target is not generic metadata, check if it has
527
if current_link_target != parent_entry.symlink_target:
530
# unchanged, carry over.
531
ie.revision = parent_entry.revision
532
ie.symlink_target = parent_entry.symlink_target
533
return self._get_delta(ie, basis_inv, path), False, None
534
ie.symlink_target = current_link_target
536
self._add_text_to_weave(ie.file_id, lines, heads, None)
537
elif kind == 'tree-reference':
539
if content_summary[3] != parent_entry.reference_revision:
542
# unchanged, carry over.
543
ie.reference_revision = parent_entry.reference_revision
544
ie.revision = parent_entry.revision
545
return self._get_delta(ie, basis_inv, path), False, None
546
ie.reference_revision = content_summary[3]
548
self._add_text_to_weave(ie.file_id, lines, heads, None)
550
raise NotImplementedError('unknown kind')
551
ie.revision = self._new_revision_id
552
self._any_changes = True
553
return self._get_delta(ie, basis_inv, path), True, fingerprint
555
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
556
_entry_factory=entry_factory):
557
"""Record a new tree via iter_changes.
559
:param tree: The tree to obtain text contents from for changed objects.
560
:param basis_revision_id: The revision id of the tree the iter_changes
561
has been generated against. Currently assumed to be the same
562
as self.parents[0] - if it is not, errors may occur.
563
:param iter_changes: An iter_changes iterator with the changes to apply
564
to basis_revision_id. The iterator must not include any items with
565
a current kind of None - missing items must be either filtered out
566
or errored-on beefore record_iter_changes sees the item.
567
:param _entry_factory: Private method to bind entry_factory locally for
569
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
572
# Create an inventory delta based on deltas between all the parents and
573
# deltas between all the parent inventories. We use inventory delta's
574
# between the inventory objects because iter_changes masks
575
# last-changed-field only changes.
577
# file_id -> change map, change is fileid, paths, changed, versioneds,
578
# parents, names, kinds, executables
580
# {file_id -> revision_id -> inventory entry, for entries in parent
581
# trees that are not parents[0]
585
revtrees = list(self.repository.revision_trees(self.parents))
586
except errors.NoSuchRevision:
587
# one or more ghosts, slow path.
589
for revision_id in self.parents:
591
revtrees.append(self.repository.revision_tree(revision_id))
592
except errors.NoSuchRevision:
594
basis_revision_id = _mod_revision.NULL_REVISION
596
revtrees.append(self.repository.revision_tree(
597
_mod_revision.NULL_REVISION))
598
# The basis inventory from a repository
600
basis_inv = revtrees[0].inventory
602
basis_inv = self.repository.revision_tree(
603
_mod_revision.NULL_REVISION).inventory
604
if len(self.parents) > 0:
605
if basis_revision_id != self.parents[0] and not ghost_basis:
607
"arbitrary basis parents not yet supported with merges")
608
for revtree in revtrees[1:]:
609
for change in revtree.inventory._make_delta(basis_inv):
610
if change[1] is None:
611
# Not present in this parent.
613
if change[2] not in merged_ids:
614
if change[0] is not None:
615
basis_entry = basis_inv[change[2]]
616
merged_ids[change[2]] = [
618
basis_entry.revision,
621
parent_entries[change[2]] = {
623
basis_entry.revision:basis_entry,
625
change[3].revision:change[3],
628
merged_ids[change[2]] = [change[3].revision]
629
parent_entries[change[2]] = {change[3].revision:change[3]}
631
merged_ids[change[2]].append(change[3].revision)
632
parent_entries[change[2]][change[3].revision] = change[3]
635
# Setup the changes from the tree:
636
# changes maps file_id -> (change, [parent revision_ids])
638
for change in iter_changes:
639
# This probably looks up in basis_inv way to much.
640
if change[1][0] is not None:
641
head_candidate = [basis_inv[change[0]].revision]
644
changes[change[0]] = change, merged_ids.get(change[0],
646
unchanged_merged = set(merged_ids) - set(changes)
647
# Extend the changes dict with synthetic changes to record merges of
649
for file_id in unchanged_merged:
650
# Record a merged version of these items that did not change vs the
651
# basis. This can be either identical parallel changes, or a revert
652
# of a specific file after a merge. The recorded content will be
653
# that of the current tree (which is the same as the basis), but
654
# the per-file graph will reflect a merge.
655
# NB:XXX: We are reconstructing path information we had, this
656
# should be preserved instead.
657
# inv delta change: (file_id, (path_in_source, path_in_target),
658
# changed_content, versioned, parent, name, kind,
661
basis_entry = basis_inv[file_id]
662
except errors.NoSuchId:
663
# a change from basis->some_parents but file_id isn't in basis
664
# so was new in the merge, which means it must have changed
665
# from basis -> current, and as it hasn't the add was reverted
666
# by the user. So we discard this change.
670
(basis_inv.id2path(file_id), tree.id2path(file_id)),
672
(basis_entry.parent_id, basis_entry.parent_id),
673
(basis_entry.name, basis_entry.name),
674
(basis_entry.kind, basis_entry.kind),
675
(basis_entry.executable, basis_entry.executable))
676
changes[file_id] = (change, merged_ids[file_id])
677
# changes contains tuples with the change and a set of inventory
678
# candidates for the file.
680
# old_path, new_path, file_id, new_inventory_entry
681
seen_root = False # Is the root in the basis delta?
682
inv_delta = self._basis_delta
683
modified_rev = self._new_revision_id
684
for change, head_candidates in changes.values():
685
if change[3][1]: # versioned in target.
686
# Several things may be happening here:
687
# We may have a fork in the per-file graph
688
# - record a change with the content from tree
689
# We may have a change against < all trees
690
# - carry over the tree that hasn't changed
691
# We may have a change against all trees
692
# - record the change with the content from tree
695
entry = _entry_factory[kind](file_id, change[5][1],
697
head_set = self._heads(change[0], set(head_candidates))
700
for head_candidate in head_candidates:
701
if head_candidate in head_set:
702
heads.append(head_candidate)
703
head_set.remove(head_candidate)
706
# Could be a carry-over situation:
707
parent_entry_revs = parent_entries.get(file_id, None)
708
if parent_entry_revs:
709
parent_entry = parent_entry_revs.get(heads[0], None)
712
if parent_entry is None:
713
# The parent iter_changes was called against is the one
714
# that is the per-file head, so any change is relevant
715
# iter_changes is valid.
716
carry_over_possible = False
718
# could be a carry over situation
719
# A change against the basis may just indicate a merge,
720
# we need to check the content against the source of the
721
# merge to determine if it was changed after the merge
723
if (parent_entry.kind != entry.kind or
724
parent_entry.parent_id != entry.parent_id or
725
parent_entry.name != entry.name):
726
# Metadata common to all entries has changed
727
# against per-file parent
728
carry_over_possible = False
730
carry_over_possible = True
731
# per-type checks for changes against the parent_entry
734
# Cannot be a carry-over situation
735
carry_over_possible = False
736
# Populate the entry in the delta
738
# XXX: There is still a small race here: If someone reverts the content of a file
739
# after iter_changes examines and decides it has changed,
740
# we will unconditionally record a new version even if some
741
# other process reverts it while commit is running (with
742
# the revert happening after iter_changes did it's
745
entry.executable = True
747
entry.executable = False
748
if (carry_over_possible and
749
parent_entry.executable == entry.executable):
750
# Check the file length, content hash after reading
752
nostore_sha = parent_entry.text_sha1
755
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
757
lines = file_obj.readlines()
761
entry.text_sha1, entry.text_size = self._add_text_to_weave(
762
file_id, lines, heads, nostore_sha)
763
yield file_id, change[1][1], (entry.text_sha1, stat_value)
764
except errors.ExistingContent:
765
# No content change against a carry_over parent
766
# Perhaps this should also yield a fs hash update?
768
entry.text_size = parent_entry.text_size
769
entry.text_sha1 = parent_entry.text_sha1
770
elif kind == 'symlink':
772
entry.symlink_target = tree.get_symlink_target(file_id)
773
if (carry_over_possible and
774
parent_entry.symlink_target == entry.symlink_target):
777
self._add_text_to_weave(change[0], [], heads, None)
778
elif kind == 'directory':
779
if carry_over_possible:
782
# Nothing to set on the entry.
783
# XXX: split into the Root and nonRoot versions.
784
if change[1][1] != '' or self.repository.supports_rich_root():
785
self._add_text_to_weave(change[0], [], heads, None)
786
elif kind == 'tree-reference':
787
if not self.repository._format.supports_tree_reference:
788
# This isn't quite sane as an error, but we shouldn't
789
# ever see this code path in practice: tree's don't
790
# permit references when the repo doesn't support tree
792
raise errors.UnsupportedOperation(tree.add_reference,
794
entry.reference_revision = \
795
tree.get_reference_revision(change[0])
796
if (carry_over_possible and
797
parent_entry.reference_revision == reference_revision):
800
self._add_text_to_weave(change[0], [], heads, None)
802
raise AssertionError('unknown kind %r' % kind)
804
entry.revision = modified_rev
806
entry.revision = parent_entry.revision
809
new_path = change[1][1]
810
inv_delta.append((change[1][0], new_path, change[0], entry))
813
self.new_inventory = None
815
self._any_changes = True
817
# housekeeping root entry changes do not affect no-change commits.
818
self._require_root_change(tree)
819
self.basis_delta_revision = basis_revision_id
821
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
822
# Note: as we read the content directly from the tree, we know its not
823
# been turned into unicode or badly split - but a broken tree
824
# implementation could give us bad output from readlines() so this is
825
# not a guarantee of safety. What would be better is always checking
826
# the content during test suite execution. RBC 20070912
827
parent_keys = tuple((file_id, parent) for parent in parents)
828
return self.repository.texts.add_lines(
829
(file_id, self._new_revision_id), parent_keys, new_lines,
830
nostore_sha=nostore_sha, random_id=self.random_revid,
831
check_content=False)[0:2]
834
class RootCommitBuilder(CommitBuilder):
835
"""This commitbuilder actually records the root id"""
837
# the root entry gets versioned properly by this builder.
838
_versioned_root = True
840
def _check_root(self, ie, parent_invs, tree):
841
"""Helper for record_entry_contents.
843
:param ie: An entry being added.
844
:param parent_invs: The inventories of the parent revisions of the
846
:param tree: The tree that is being committed.
849
def _require_root_change(self, tree):
850
"""Enforce an appropriate root object change.
852
This is called once when record_iter_changes is called, if and only if
853
the root was not in the delta calculated by record_iter_changes.
855
:param tree: The tree which is being committed.
857
# versioned roots do not change unless the tree found a change.
860
######################################################################
863
class Repository(object):
864
"""Repository holding history for one or more branches.
866
The repository holds and retrieves historical information including
867
revisions and file history. It's normally accessed only by the Branch,
868
which views a particular line of development through that history.
870
The Repository builds on top of some byte storage facilies (the revisions,
871
signatures, inventories, texts and chk_bytes attributes) and a Transport,
872
which respectively provide byte storage and a means to access the (possibly
875
The byte storage facilities are addressed via tuples, which we refer to
876
as 'keys' throughout the code base. Revision_keys, inventory_keys and
877
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
878
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
879
byte string made up of a hash identifier and a hash value.
880
We use this interface because it allows low friction with the underlying
881
code that implements disk indices, network encoding and other parts of
884
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
885
the serialised revisions for the repository. This can be used to obtain
886
revision graph information or to access raw serialised revisions.
887
The result of trying to insert data into the repository via this store
888
is undefined: it should be considered read-only except for implementors
890
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
891
the serialised signatures for the repository. This can be used to
892
obtain access to raw serialised signatures. The result of trying to
893
insert data into the repository via this store is undefined: it should
894
be considered read-only except for implementors of repositories.
895
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
896
the serialised inventories for the repository. This can be used to
897
obtain unserialised inventories. The result of trying to insert data
898
into the repository via this store is undefined: it should be
899
considered read-only except for implementors of repositories.
900
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
901
texts of files and directories for the repository. This can be used to
902
obtain file texts or file graphs. Note that Repository.iter_file_bytes
903
is usually a better interface for accessing file texts.
904
The result of trying to insert data into the repository via this store
905
is undefined: it should be considered read-only except for implementors
907
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
908
any data the repository chooses to store or have indexed by its hash.
909
The result of trying to insert data into the repository via this store
910
is undefined: it should be considered read-only except for implementors
912
:ivar _transport: Transport for file access to repository, typically
913
pointing to .bzr/repository.
916
# What class to use for a CommitBuilder. Often its simpler to change this
917
# in a Repository class subclass rather than to override
918
# get_commit_builder.
919
_commit_builder_class = CommitBuilder
920
# The search regex used by xml based repositories to determine what things
921
# where changed in a single commit.
922
_file_ids_altered_regex = lazy_regex.lazy_compile(
923
r'file_id="(?P<file_id>[^"]+)"'
924
r'.* revision="(?P<revision_id>[^"]+)"'
927
def abort_write_group(self, suppress_errors=False):
928
"""Commit the contents accrued within the current write group.
930
:param suppress_errors: if true, abort_write_group will catch and log
931
unexpected errors that happen during the abort, rather than
932
allowing them to propagate. Defaults to False.
934
:seealso: start_write_group.
936
if self._write_group is not self.get_transaction():
937
# has an unlock or relock occured ?
938
raise errors.BzrError(
939
'mismatched lock context and write group. %r, %r' %
940
(self._write_group, self.get_transaction()))
942
self._abort_write_group()
943
except Exception, exc:
944
self._write_group = None
945
if not suppress_errors:
947
mutter('abort_write_group failed')
948
log_exception_quietly()
949
note('bzr: ERROR (ignored): %s', exc)
950
self._write_group = None
952
def _abort_write_group(self):
953
"""Template method for per-repository write group cleanup.
955
This is called during abort before the write group is considered to be
956
finished and should cleanup any internal state accrued during the write
957
group. There is no requirement that data handed to the repository be
958
*not* made available - this is not a rollback - but neither should any
959
attempt be made to ensure that data added is fully commited. Abort is
960
invoked when an error has occured so futher disk or network operations
961
may not be possible or may error and if possible should not be
965
def add_fallback_repository(self, repository):
966
"""Add a repository to use for looking up data not held locally.
968
:param repository: A repository.
970
if not self._format.supports_external_lookups:
971
raise errors.UnstackableRepositoryFormat(self._format, self.base)
972
self._check_fallback_repository(repository)
973
self._fallback_repositories.append(repository)
974
self.texts.add_fallback_versioned_files(repository.texts)
975
self.inventories.add_fallback_versioned_files(repository.inventories)
976
self.revisions.add_fallback_versioned_files(repository.revisions)
977
self.signatures.add_fallback_versioned_files(repository.signatures)
978
if self.chk_bytes is not None:
979
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
981
def _check_fallback_repository(self, repository):
982
"""Check that this repository can fallback to repository safely.
984
Raise an error if not.
986
:param repository: A repository to fallback to.
988
return InterRepository._assert_same_model(self, repository)
990
def add_inventory(self, revision_id, inv, parents):
991
"""Add the inventory inv to the repository as revision_id.
993
:param parents: The revision ids of the parents that revision_id
994
is known to have and are in the repository already.
996
:returns: The validator(which is a sha1 digest, though what is sha'd is
997
repository format specific) of the serialized inventory.
999
if not self.is_in_write_group():
1000
raise AssertionError("%r not in write group" % (self,))
1001
_mod_revision.check_not_reserved_id(revision_id)
1002
if not (inv.revision_id is None or inv.revision_id == revision_id):
1003
raise AssertionError(
1004
"Mismatch between inventory revision"
1005
" id and insertion revid (%r, %r)"
1006
% (inv.revision_id, revision_id))
1007
if inv.root is None:
1008
raise AssertionError()
1009
return self._add_inventory_checked(revision_id, inv, parents)
1011
def _add_inventory_checked(self, revision_id, inv, parents):
1012
"""Add inv to the repository after checking the inputs.
1014
This function can be overridden to allow different inventory styles.
1016
:seealso: add_inventory, for the contract.
1018
inv_lines = self._serialise_inventory_to_lines(inv)
1019
return self._inventory_add_lines(revision_id, parents,
1020
inv_lines, check_content=False)
1022
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1023
parents, basis_inv=None, propagate_caches=False):
1024
"""Add a new inventory expressed as a delta against another revision.
1026
:param basis_revision_id: The inventory id the delta was created
1027
against. (This does not have to be a direct parent.)
1028
:param delta: The inventory delta (see Inventory.apply_delta for
1030
:param new_revision_id: The revision id that the inventory is being
1032
:param parents: The revision ids of the parents that revision_id is
1033
known to have and are in the repository already. These are supplied
1034
for repositories that depend on the inventory graph for revision
1035
graph access, as well as for those that pun ancestry with delta
1037
:param basis_inv: The basis inventory if it is already known,
1039
:param propagate_caches: If True, the caches for this inventory are
1040
copied to and updated for the result if possible.
1042
:returns: (validator, new_inv)
1043
The validator(which is a sha1 digest, though what is sha'd is
1044
repository format specific) of the serialized inventory, and the
1045
resulting inventory.
1047
if not self.is_in_write_group():
1048
raise AssertionError("%r not in write group" % (self,))
1049
_mod_revision.check_not_reserved_id(new_revision_id)
1050
basis_tree = self.revision_tree(basis_revision_id)
1051
basis_tree.lock_read()
1053
# Note that this mutates the inventory of basis_tree, which not all
1054
# inventory implementations may support: A better idiom would be to
1055
# return a new inventory, but as there is no revision tree cache in
1056
# repository this is safe for now - RBC 20081013
1057
if basis_inv is None:
1058
basis_inv = basis_tree.inventory
1059
basis_inv.apply_delta(delta)
1060
basis_inv.revision_id = new_revision_id
1061
return (self.add_inventory(new_revision_id, basis_inv, parents),
1066
def _inventory_add_lines(self, revision_id, parents, lines,
1067
check_content=True):
1068
"""Store lines in inv_vf and return the sha1 of the inventory."""
1069
parents = [(parent,) for parent in parents]
1070
return self.inventories.add_lines((revision_id,), parents, lines,
1071
check_content=check_content)[0]
1073
def add_revision(self, revision_id, rev, inv=None, config=None):
1074
"""Add rev to the revision store as revision_id.
1076
:param revision_id: the revision id to use.
1077
:param rev: The revision object.
1078
:param inv: The inventory for the revision. if None, it will be looked
1079
up in the inventory storer
1080
:param config: If None no digital signature will be created.
1081
If supplied its signature_needed method will be used
1082
to determine if a signature should be made.
1084
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1086
_mod_revision.check_not_reserved_id(revision_id)
1087
if config is not None and config.signature_needed():
1089
inv = self.get_inventory(revision_id)
1090
plaintext = Testament(rev, inv).as_short_text()
1091
self.store_revision_signature(
1092
gpg.GPGStrategy(config), plaintext, revision_id)
1093
# check inventory present
1094
if not self.inventories.get_parent_map([(revision_id,)]):
1096
raise errors.WeaveRevisionNotPresent(revision_id,
1099
# yes, this is not suitable for adding with ghosts.
1100
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1103
key = (revision_id,)
1104
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1105
self._add_revision(rev)
1107
def _add_revision(self, revision):
1108
text = self._serializer.write_revision_to_string(revision)
1109
key = (revision.revision_id,)
1110
parents = tuple((parent,) for parent in revision.parent_ids)
1111
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1113
def all_revision_ids(self):
1114
"""Returns a list of all the revision ids in the repository.
1116
This is conceptually deprecated because code should generally work on
1117
the graph reachable from a particular revision, and ignore any other
1118
revisions that might be present. There is no direct replacement
1121
if 'evil' in debug.debug_flags:
1122
mutter_callsite(2, "all_revision_ids is linear with history.")
1123
return self._all_revision_ids()
1125
def _all_revision_ids(self):
1126
"""Returns a list of all the revision ids in the repository.
1128
These are in as much topological order as the underlying store can
1131
raise NotImplementedError(self._all_revision_ids)
1133
def break_lock(self):
1134
"""Break a lock if one is present from another instance.
1136
Uses the ui factory to ask for confirmation if the lock may be from
1139
self.control_files.break_lock()
1142
def _eliminate_revisions_not_present(self, revision_ids):
1143
"""Check every revision id in revision_ids to see if we have it.
1145
Returns a set of the present revisions.
1148
graph = self.get_graph()
1149
parent_map = graph.get_parent_map(revision_ids)
1150
# The old API returned a list, should this actually be a set?
1151
return parent_map.keys()
1154
def create(a_bzrdir):
1155
"""Construct the current default format repository in a_bzrdir."""
1156
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1158
def __init__(self, _format, a_bzrdir, control_files):
1159
"""instantiate a Repository.
1161
:param _format: The format of the repository on disk.
1162
:param a_bzrdir: The BzrDir of the repository.
1164
In the future we will have a single api for all stores for
1165
getting file texts, inventories and revisions, then
1166
this construct will accept instances of those things.
1168
super(Repository, self).__init__()
1169
self._format = _format
1170
# the following are part of the public API for Repository:
1171
self.bzrdir = a_bzrdir
1172
self.control_files = control_files
1173
self._transport = control_files._transport
1174
self.base = self._transport.base
1176
self._reconcile_does_inventory_gc = True
1177
self._reconcile_fixes_text_parents = False
1178
self._reconcile_backsup_inventory = True
1179
# not right yet - should be more semantically clear ?
1181
# TODO: make sure to construct the right store classes, etc, depending
1182
# on whether escaping is required.
1183
self._warn_if_deprecated()
1184
self._write_group = None
1185
# Additional places to query for data.
1186
self._fallback_repositories = []
1187
# An InventoryEntry cache, used during deserialization
1188
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1191
return '%s(%r)' % (self.__class__.__name__,
1194
def has_same_location(self, other):
1195
"""Returns a boolean indicating if this repository is at the same
1196
location as another repository.
1198
This might return False even when two repository objects are accessing
1199
the same physical repository via different URLs.
1201
if self.__class__ is not other.__class__:
1203
return (self._transport.base == other._transport.base)
1205
def is_in_write_group(self):
1206
"""Return True if there is an open write group.
1208
:seealso: start_write_group.
1210
return self._write_group is not None
1212
def is_locked(self):
1213
return self.control_files.is_locked()
1215
def is_write_locked(self):
1216
"""Return True if this object is write locked."""
1217
return self.is_locked() and self.control_files._lock_mode == 'w'
1219
def lock_write(self, token=None):
1220
"""Lock this repository for writing.
1222
This causes caching within the repository obejct to start accumlating
1223
data during reads, and allows a 'write_group' to be obtained. Write
1224
groups must be used for actual data insertion.
1226
:param token: if this is already locked, then lock_write will fail
1227
unless the token matches the existing lock.
1228
:returns: a token if this instance supports tokens, otherwise None.
1229
:raises TokenLockingNotSupported: when a token is given but this
1230
instance doesn't support using token locks.
1231
:raises MismatchedToken: if the specified token doesn't match the token
1232
of the existing lock.
1233
:seealso: start_write_group.
1235
A token should be passed in if you know that you have locked the object
1236
some other way, and need to synchronise this object's state with that
1239
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1241
locked = self.is_locked()
1242
result = self.control_files.lock_write(token=token)
1243
for repo in self._fallback_repositories:
1244
# Writes don't affect fallback repos
1247
self._refresh_data()
1250
def lock_read(self):
1251
locked = self.is_locked()
1252
self.control_files.lock_read()
1253
for repo in self._fallback_repositories:
1256
self._refresh_data()
1258
def get_physical_lock_status(self):
1259
return self.control_files.get_physical_lock_status()
1261
def leave_lock_in_place(self):
1262
"""Tell this repository not to release the physical lock when this
1265
If lock_write doesn't return a token, then this method is not supported.
1267
self.control_files.leave_in_place()
1269
def dont_leave_lock_in_place(self):
1270
"""Tell this repository to release the physical lock when this
1271
object is unlocked, even if it didn't originally acquire it.
1273
If lock_write doesn't return a token, then this method is not supported.
1275
self.control_files.dont_leave_in_place()
1278
def gather_stats(self, revid=None, committers=None):
1279
"""Gather statistics from a revision id.
1281
:param revid: The revision id to gather statistics from, if None, then
1282
no revision specific statistics are gathered.
1283
:param committers: Optional parameter controlling whether to grab
1284
a count of committers from the revision specific statistics.
1285
:return: A dictionary of statistics. Currently this contains:
1286
committers: The number of committers if requested.
1287
firstrev: A tuple with timestamp, timezone for the penultimate left
1288
most ancestor of revid, if revid is not the NULL_REVISION.
1289
latestrev: A tuple with timestamp, timezone for revid, if revid is
1290
not the NULL_REVISION.
1291
revisions: The total revision count in the repository.
1292
size: An estimate disk size of the repository in bytes.
1295
if revid and committers:
1296
result['committers'] = 0
1297
if revid and revid != _mod_revision.NULL_REVISION:
1299
all_committers = set()
1300
revisions = self.get_ancestry(revid)
1301
# pop the leading None
1303
first_revision = None
1305
# ignore the revisions in the middle - just grab first and last
1306
revisions = revisions[0], revisions[-1]
1307
for revision in self.get_revisions(revisions):
1308
if not first_revision:
1309
first_revision = revision
1311
all_committers.add(revision.committer)
1312
last_revision = revision
1314
result['committers'] = len(all_committers)
1315
result['firstrev'] = (first_revision.timestamp,
1316
first_revision.timezone)
1317
result['latestrev'] = (last_revision.timestamp,
1318
last_revision.timezone)
1320
# now gather global repository information
1321
# XXX: This is available for many repos regardless of listability.
1322
if self.bzrdir.root_transport.listable():
1323
# XXX: do we want to __define len__() ?
1324
# Maybe the versionedfiles object should provide a different
1325
# method to get the number of keys.
1326
result['revisions'] = len(self.revisions.keys())
1327
# result['size'] = t
1330
def find_branches(self, using=False):
1331
"""Find branches underneath this repository.
1333
This will include branches inside other branches.
1335
:param using: If True, list only branches using this repository.
1337
if using and not self.is_shared():
1339
return [self.bzrdir.open_branch()]
1340
except errors.NotBranchError:
1342
class Evaluator(object):
1345
self.first_call = True
1347
def __call__(self, bzrdir):
1348
# On the first call, the parameter is always the bzrdir
1349
# containing the current repo.
1350
if not self.first_call:
1352
repository = bzrdir.open_repository()
1353
except errors.NoRepositoryPresent:
1356
return False, (None, repository)
1357
self.first_call = False
1359
value = (bzrdir.open_branch(), None)
1360
except errors.NotBranchError:
1361
value = (None, None)
1365
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1366
self.bzrdir.root_transport, evaluate=Evaluator()):
1367
if branch is not None:
1368
branches.append(branch)
1369
if not using and repository is not None:
1370
branches.extend(repository.find_branches())
1374
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1375
"""Return the revision ids that other has that this does not.
1377
These are returned in topological order.
1379
revision_id: only return revision ids included by revision_id.
1381
return InterRepository.get(other, self).search_missing_revision_ids(
1382
revision_id, find_ghosts)
1386
"""Open the repository rooted at base.
1388
For instance, if the repository is at URL/.bzr/repository,
1389
Repository.open(URL) -> a Repository instance.
1391
control = bzrdir.BzrDir.open(base)
1392
return control.open_repository()
1394
def copy_content_into(self, destination, revision_id=None):
1395
"""Make a complete copy of the content in self into destination.
1397
This is a destructive operation! Do not use it on existing
1400
return InterRepository.get(self, destination).copy_content(revision_id)
1402
def commit_write_group(self):
1403
"""Commit the contents accrued within the current write group.
1405
:seealso: start_write_group.
1407
if self._write_group is not self.get_transaction():
1408
# has an unlock or relock occured ?
1409
raise errors.BzrError('mismatched lock context %r and '
1411
(self.get_transaction(), self._write_group))
1412
self._commit_write_group()
1413
self._write_group = None
1415
def _commit_write_group(self):
1416
"""Template method for per-repository write group cleanup.
1418
This is called before the write group is considered to be
1419
finished and should ensure that all data handed to the repository
1420
for writing during the write group is safely committed (to the
1421
extent possible considering file system caching etc).
1424
def suspend_write_group(self):
1425
raise errors.UnsuspendableWriteGroup(self)
1427
def get_missing_parent_inventories(self):
1428
"""Return the keys of missing inventory parents for revisions added in
1431
A revision is not complete if the inventory delta for that revision
1432
cannot be calculated. Therefore if the parent inventories of a
1433
revision are not present, the revision is incomplete, and e.g. cannot
1434
be streamed by a smart server. This method finds missing inventory
1435
parents for revisions added in this write group.
1437
if not self._format.supports_external_lookups:
1438
# This is only an issue for stacked repositories
1440
if not self.is_in_write_group():
1441
raise AssertionError('not in a write group')
1443
# XXX: We assume that every added revision already has its
1444
# corresponding inventory, so we only check for parent inventories that
1445
# might be missing, rather than all inventories.
1446
parents = set(self.revisions._index.get_missing_parents())
1447
parents.discard(_mod_revision.NULL_REVISION)
1448
unstacked_inventories = self.inventories._index
1449
present_inventories = unstacked_inventories.get_parent_map(
1450
key[-1:] for key in parents)
1451
if len(parents.difference(present_inventories)) == 0:
1452
# No missing parent inventories.
1454
# Ok, now we have a list of missing inventories. But these only matter
1455
# if the inventories that reference them are missing some texts they
1456
# appear to introduce.
1457
# XXX: Texts referenced by all added inventories need to be present,
1458
# but at the moment we're only checking for texts referenced by
1459
# inventories at the graph's edge.
1460
import pdb; pdb.set_trace()
1461
key_deps = self.revisions._index._key_dependencies
1462
key_deps.add_keys(present_inventories)
1463
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1464
file_ids = self.fileids_altered_by_revision_ids(referrers)
1465
missing_texts = set()
1466
for file_id, version_ids in file_ids.iteritems():
1467
missing_texts.update(
1468
(file_id, version_id) for version_id in version_ids)
1469
present_texts = self.texts.get_parent_map(missing_texts)
1470
missing_texts.difference_update(present_texts)
1471
if not missing_texts:
1472
# No texts are missing, so all revisions and their deltas are
1475
# Alternatively the text versions could be returned as the missing
1476
# keys, but this is likely to be less data.
1477
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1480
def refresh_data(self):
1481
"""Re-read any data needed to to synchronise with disk.
1483
This method is intended to be called after another repository instance
1484
(such as one used by a smart server) has inserted data into the
1485
repository. It may not be called during a write group, but may be
1486
called at any other time.
1488
if self.is_in_write_group():
1489
raise errors.InternalBzrError(
1490
"May not refresh_data while in a write group.")
1491
self._refresh_data()
1493
def resume_write_group(self, tokens):
1494
if not self.is_write_locked():
1495
raise errors.NotWriteLocked(self)
1496
if self._write_group:
1497
raise errors.BzrError('already in a write group')
1498
self._resume_write_group(tokens)
1499
# so we can detect unlock/relock - the write group is now entered.
1500
self._write_group = self.get_transaction()
1502
def _resume_write_group(self, tokens):
1503
raise errors.UnsuspendableWriteGroup(self)
1505
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1507
"""Fetch the content required to construct revision_id from source.
1509
If revision_id is None and fetch_spec is None, then all content is
1512
fetch() may not be used when the repository is in a write group -
1513
either finish the current write group before using fetch, or use
1514
fetch before starting the write group.
1516
:param find_ghosts: Find and copy revisions in the source that are
1517
ghosts in the target (and not reachable directly by walking out to
1518
the first-present revision in target from revision_id).
1519
:param revision_id: If specified, all the content needed for this
1520
revision ID will be copied to the target. Fetch will determine for
1521
itself which content needs to be copied.
1522
:param fetch_spec: If specified, a SearchResult or
1523
PendingAncestryResult that describes which revisions to copy. This
1524
allows copying multiple heads at once. Mutually exclusive with
1527
if fetch_spec is not None and revision_id is not None:
1528
raise AssertionError(
1529
"fetch_spec and revision_id are mutually exclusive.")
1530
if self.is_in_write_group():
1531
raise errors.InternalBzrError(
1532
"May not fetch while in a write group.")
1533
# fast path same-url fetch operations
1534
if self.has_same_location(source) and fetch_spec is None:
1535
# check that last_revision is in 'from' and then return a
1537
if (revision_id is not None and
1538
not _mod_revision.is_null(revision_id)):
1539
self.get_revision(revision_id)
1541
# if there is no specific appropriate InterRepository, this will get
1542
# the InterRepository base class, which raises an
1543
# IncompatibleRepositories when asked to fetch.
1544
inter = InterRepository.get(source, self)
1545
return inter.fetch(revision_id=revision_id, pb=pb,
1546
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1548
def create_bundle(self, target, base, fileobj, format=None):
1549
return serializer.write_bundle(self, target, base, fileobj, format)
1551
def get_commit_builder(self, branch, parents, config, timestamp=None,
1552
timezone=None, committer=None, revprops=None,
1554
"""Obtain a CommitBuilder for this repository.
1556
:param branch: Branch to commit to.
1557
:param parents: Revision ids of the parents of the new revision.
1558
:param config: Configuration to use.
1559
:param timestamp: Optional timestamp recorded for commit.
1560
:param timezone: Optional timezone for timestamp.
1561
:param committer: Optional committer to set for commit.
1562
:param revprops: Optional dictionary of revision properties.
1563
:param revision_id: Optional revision id.
1565
result = self._commit_builder_class(self, parents, config,
1566
timestamp, timezone, committer, revprops, revision_id)
1567
self.start_write_group()
1571
if (self.control_files._lock_count == 1 and
1572
self.control_files._lock_mode == 'w'):
1573
if self._write_group is not None:
1574
self.abort_write_group()
1575
self.control_files.unlock()
1576
raise errors.BzrError(
1577
'Must end write groups before releasing write locks.')
1578
self.control_files.unlock()
1579
if self.control_files._lock_count == 0:
1580
self._inventory_entry_cache.clear()
1581
for repo in self._fallback_repositories:
1585
def clone(self, a_bzrdir, revision_id=None):
1586
"""Clone this repository into a_bzrdir using the current format.
1588
Currently no check is made that the format of this repository and
1589
the bzrdir format are compatible. FIXME RBC 20060201.
1591
:return: The newly created destination repository.
1593
# TODO: deprecate after 0.16; cloning this with all its settings is
1594
# probably not very useful -- mbp 20070423
1595
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1596
self.copy_content_into(dest_repo, revision_id)
1599
def start_write_group(self):
1600
"""Start a write group in the repository.
1602
Write groups are used by repositories which do not have a 1:1 mapping
1603
between file ids and backend store to manage the insertion of data from
1604
both fetch and commit operations.
1606
A write lock is required around the start_write_group/commit_write_group
1607
for the support of lock-requiring repository formats.
1609
One can only insert data into a repository inside a write group.
1613
if not self.is_write_locked():
1614
raise errors.NotWriteLocked(self)
1615
if self._write_group:
1616
raise errors.BzrError('already in a write group')
1617
self._start_write_group()
1618
# so we can detect unlock/relock - the write group is now entered.
1619
self._write_group = self.get_transaction()
1621
def _start_write_group(self):
1622
"""Template method for per-repository write group startup.
1624
This is called before the write group is considered to be
1629
def sprout(self, to_bzrdir, revision_id=None):
1630
"""Create a descendent repository for new development.
1632
Unlike clone, this does not copy the settings of the repository.
1634
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1635
dest_repo.fetch(self, revision_id=revision_id)
1638
def _create_sprouting_repo(self, a_bzrdir, shared):
1639
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1640
# use target default format.
1641
dest_repo = a_bzrdir.create_repository()
1643
# Most control formats need the repository to be specifically
1644
# created, but on some old all-in-one formats it's not needed
1646
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1647
except errors.UninitializableFormat:
1648
dest_repo = a_bzrdir.open_repository()
1651
def _get_sink(self):
1652
"""Return a sink for streaming into this repository."""
1653
return StreamSink(self)
1655
def _get_source(self, to_format):
1656
"""Return a source for streaming from this repository."""
1657
return StreamSource(self, to_format)
1660
def has_revision(self, revision_id):
1661
"""True if this repository has a copy of the revision."""
1662
return revision_id in self.has_revisions((revision_id,))
1665
def has_revisions(self, revision_ids):
1666
"""Probe to find out the presence of multiple revisions.
1668
:param revision_ids: An iterable of revision_ids.
1669
:return: A set of the revision_ids that were present.
1671
parent_map = self.revisions.get_parent_map(
1672
[(rev_id,) for rev_id in revision_ids])
1674
if _mod_revision.NULL_REVISION in revision_ids:
1675
result.add(_mod_revision.NULL_REVISION)
1676
result.update([key[0] for key in parent_map])
1680
def get_revision(self, revision_id):
1681
"""Return the Revision object for a named revision."""
1682
return self.get_revisions([revision_id])[0]
1685
def get_revision_reconcile(self, revision_id):
1686
"""'reconcile' helper routine that allows access to a revision always.
1688
This variant of get_revision does not cross check the weave graph
1689
against the revision one as get_revision does: but it should only
1690
be used by reconcile, or reconcile-alike commands that are correcting
1691
or testing the revision graph.
1693
return self._get_revisions([revision_id])[0]
1696
def get_revisions(self, revision_ids):
1697
"""Get many revisions at once."""
1698
return self._get_revisions(revision_ids)
1701
def _get_revisions(self, revision_ids):
1702
"""Core work logic to get many revisions without sanity checks."""
1703
for rev_id in revision_ids:
1704
if not rev_id or not isinstance(rev_id, basestring):
1705
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1706
keys = [(key,) for key in revision_ids]
1707
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1709
for record in stream:
1710
if record.storage_kind == 'absent':
1711
raise errors.NoSuchRevision(self, record.key[0])
1712
text = record.get_bytes_as('fulltext')
1713
rev = self._serializer.read_revision_from_string(text)
1714
revs[record.key[0]] = rev
1715
return [revs[revid] for revid in revision_ids]
1718
def get_revision_xml(self, revision_id):
1719
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1720
# would have already do it.
1721
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1722
# TODO: this can't just be replaced by:
1723
# return self._serializer.write_revision_to_string(
1724
# self.get_revision(revision_id))
1725
# as cStringIO preservers the encoding unlike write_revision_to_string
1726
# or some other call down the path.
1727
rev = self.get_revision(revision_id)
1728
rev_tmp = cStringIO.StringIO()
1729
# the current serializer..
1730
self._serializer.write_revision(rev, rev_tmp)
1732
return rev_tmp.getvalue()
1734
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1735
"""Produce a generator of revision deltas.
1737
Note that the input is a sequence of REVISIONS, not revision_ids.
1738
Trees will be held in memory until the generator exits.
1739
Each delta is relative to the revision's lefthand predecessor.
1741
:param specific_fileids: if not None, the result is filtered
1742
so that only those file-ids, their parents and their
1743
children are included.
1745
# Get the revision-ids of interest
1746
required_trees = set()
1747
for revision in revisions:
1748
required_trees.add(revision.revision_id)
1749
required_trees.update(revision.parent_ids[:1])
1751
# Get the matching filtered trees. Note that it's more
1752
# efficient to pass filtered trees to changes_from() rather
1753
# than doing the filtering afterwards. changes_from() could
1754
# arguably do the filtering itself but it's path-based, not
1755
# file-id based, so filtering before or afterwards is
1757
if specific_fileids is None:
1758
trees = dict((t.get_revision_id(), t) for
1759
t in self.revision_trees(required_trees))
1761
trees = dict((t.get_revision_id(), t) for
1762
t in self._filtered_revision_trees(required_trees,
1765
# Calculate the deltas
1766
for revision in revisions:
1767
if not revision.parent_ids:
1768
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1770
old_tree = trees[revision.parent_ids[0]]
1771
yield trees[revision.revision_id].changes_from(old_tree)
1774
def get_revision_delta(self, revision_id, specific_fileids=None):
1775
"""Return the delta for one revision.
1777
The delta is relative to the left-hand predecessor of the
1780
:param specific_fileids: if not None, the result is filtered
1781
so that only those file-ids, their parents and their
1782
children are included.
1784
r = self.get_revision(revision_id)
1785
return list(self.get_deltas_for_revisions([r],
1786
specific_fileids=specific_fileids))[0]
1789
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1790
signature = gpg_strategy.sign(plaintext)
1791
self.add_signature_text(revision_id, signature)
1794
def add_signature_text(self, revision_id, signature):
1795
self.signatures.add_lines((revision_id,), (),
1796
osutils.split_lines(signature))
1798
def find_text_key_references(self):
1799
"""Find the text key references within the repository.
1801
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1802
to whether they were referred to by the inventory of the
1803
revision_id that they contain. The inventory texts from all present
1804
revision ids are assessed to generate this report.
1806
revision_keys = self.revisions.keys()
1807
w = self.inventories
1808
pb = ui.ui_factory.nested_progress_bar()
1810
return self._find_text_key_references_from_xml_inventory_lines(
1811
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1815
def _find_text_key_references_from_xml_inventory_lines(self,
1817
"""Core routine for extracting references to texts from inventories.
1819
This performs the translation of xml lines to revision ids.
1821
:param line_iterator: An iterator of lines, origin_version_id
1822
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1823
to whether they were referred to by the inventory of the
1824
revision_id that they contain. Note that if that revision_id was
1825
not part of the line_iterator's output then False will be given -
1826
even though it may actually refer to that key.
1828
if not self._serializer.support_altered_by_hack:
1829
raise AssertionError(
1830
"_find_text_key_references_from_xml_inventory_lines only "
1831
"supported for branches which store inventory as unnested xml"
1832
", not on %r" % self)
1835
# this code needs to read every new line in every inventory for the
1836
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1837
# not present in one of those inventories is unnecessary but not
1838
# harmful because we are filtering by the revision id marker in the
1839
# inventory lines : we only select file ids altered in one of those
1840
# revisions. We don't need to see all lines in the inventory because
1841
# only those added in an inventory in rev X can contain a revision=X
1843
unescape_revid_cache = {}
1844
unescape_fileid_cache = {}
1846
# jam 20061218 In a big fetch, this handles hundreds of thousands
1847
# of lines, so it has had a lot of inlining and optimizing done.
1848
# Sorry that it is a little bit messy.
1849
# Move several functions to be local variables, since this is a long
1851
search = self._file_ids_altered_regex.search
1852
unescape = _unescape_xml
1853
setdefault = result.setdefault
1854
for line, line_key in line_iterator:
1855
match = search(line)
1858
# One call to match.group() returning multiple items is quite a
1859
# bit faster than 2 calls to match.group() each returning 1
1860
file_id, revision_id = match.group('file_id', 'revision_id')
1862
# Inlining the cache lookups helps a lot when you make 170,000
1863
# lines and 350k ids, versus 8.4 unique ids.
1864
# Using a cache helps in 2 ways:
1865
# 1) Avoids unnecessary decoding calls
1866
# 2) Re-uses cached strings, which helps in future set and
1868
# (2) is enough that removing encoding entirely along with
1869
# the cache (so we are using plain strings) results in no
1870
# performance improvement.
1872
revision_id = unescape_revid_cache[revision_id]
1874
unescaped = unescape(revision_id)
1875
unescape_revid_cache[revision_id] = unescaped
1876
revision_id = unescaped
1878
# Note that unconditionally unescaping means that we deserialise
1879
# every fileid, which for general 'pull' is not great, but we don't
1880
# really want to have some many fulltexts that this matters anyway.
1883
file_id = unescape_fileid_cache[file_id]
1885
unescaped = unescape(file_id)
1886
unescape_fileid_cache[file_id] = unescaped
1889
key = (file_id, revision_id)
1890
setdefault(key, False)
1891
if revision_id == line_key[-1]:
1895
def _inventory_xml_lines_for_keys(self, keys):
1896
"""Get a line iterator of the sort needed for findind references.
1898
Not relevant for non-xml inventory repositories.
1900
Ghosts in revision_keys are ignored.
1902
:param revision_keys: The revision keys for the inventories to inspect.
1903
:return: An iterator over (inventory line, revid) for the fulltexts of
1904
all of the xml inventories specified by revision_keys.
1906
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1907
for record in stream:
1908
if record.storage_kind != 'absent':
1909
chunks = record.get_bytes_as('chunked')
1910
revid = record.key[-1]
1911
lines = osutils.chunks_to_lines(chunks)
1915
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1917
"""Helper routine for fileids_altered_by_revision_ids.
1919
This performs the translation of xml lines to revision ids.
1921
:param line_iterator: An iterator of lines, origin_version_id
1922
:param revision_ids: The revision ids to filter for. This should be a
1923
set or other type which supports efficient __contains__ lookups, as
1924
the revision id from each parsed line will be looked up in the
1925
revision_ids filter.
1926
:return: a dictionary mapping altered file-ids to an iterable of
1927
revision_ids. Each altered file-ids has the exact revision_ids that
1928
altered it listed explicitly.
1930
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1931
line_iterator).iterkeys())
1932
# Note that revision_ids are revision keys.
1933
parent_maps = self.revisions.get_parent_map(revision_ids)
1935
map(parents.update, parent_maps.itervalues())
1936
parents.difference_update(revision_ids)
1937
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1938
self._inventory_xml_lines_for_keys(parents)))
1939
new_keys = seen - parent_seen
1941
setdefault = result.setdefault
1942
for key in new_keys:
1943
setdefault(key[0], set()).add(key[-1])
1946
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1947
"""Find the file ids and versions affected by revisions.
1949
:param revisions: an iterable containing revision ids.
1950
:param _inv_weave: The inventory weave from this repository or None.
1951
If None, the inventory weave will be opened automatically.
1952
:return: a dictionary mapping altered file-ids to an iterable of
1953
revision_ids. Each altered file-ids has the exact revision_ids that
1954
altered it listed explicitly.
1956
selected_keys = set((revid,) for revid in revision_ids)
1957
w = _inv_weave or self.inventories
1958
pb = ui.ui_factory.nested_progress_bar()
1960
return self._find_file_ids_from_xml_inventory_lines(
1961
w.iter_lines_added_or_present_in_keys(
1962
selected_keys, pb=pb),
1967
def iter_files_bytes(self, desired_files):
1968
"""Iterate through file versions.
1970
Files will not necessarily be returned in the order they occur in
1971
desired_files. No specific order is guaranteed.
1973
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1974
value supplied by the caller as part of desired_files. It should
1975
uniquely identify the file version in the caller's context. (Examples:
1976
an index number or a TreeTransform trans_id.)
1978
bytes_iterator is an iterable of bytestrings for the file. The
1979
kind of iterable and length of the bytestrings are unspecified, but for
1980
this implementation, it is a list of bytes produced by
1981
VersionedFile.get_record_stream().
1983
:param desired_files: a list of (file_id, revision_id, identifier)
1987
for file_id, revision_id, callable_data in desired_files:
1988
text_keys[(file_id, revision_id)] = callable_data
1989
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1990
if record.storage_kind == 'absent':
1991
raise errors.RevisionNotPresent(record.key, self)
1992
yield text_keys[record.key], record.get_bytes_as('chunked')
1994
def _generate_text_key_index(self, text_key_references=None,
1996
"""Generate a new text key index for the repository.
1998
This is an expensive function that will take considerable time to run.
2000
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2001
list of parents, also text keys. When a given key has no parents,
2002
the parents list will be [NULL_REVISION].
2004
# All revisions, to find inventory parents.
2005
if ancestors is None:
2006
graph = self.get_graph()
2007
ancestors = graph.get_parent_map(self.all_revision_ids())
2008
if text_key_references is None:
2009
text_key_references = self.find_text_key_references()
2010
pb = ui.ui_factory.nested_progress_bar()
2012
return self._do_generate_text_key_index(ancestors,
2013
text_key_references, pb)
2017
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2018
"""Helper for _generate_text_key_index to avoid deep nesting."""
2019
revision_order = tsort.topo_sort(ancestors)
2020
invalid_keys = set()
2022
for revision_id in revision_order:
2023
revision_keys[revision_id] = set()
2024
text_count = len(text_key_references)
2025
# a cache of the text keys to allow reuse; costs a dict of all the
2026
# keys, but saves a 2-tuple for every child of a given key.
2028
for text_key, valid in text_key_references.iteritems():
2030
invalid_keys.add(text_key)
2032
revision_keys[text_key[1]].add(text_key)
2033
text_key_cache[text_key] = text_key
2034
del text_key_references
2036
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2037
NULL_REVISION = _mod_revision.NULL_REVISION
2038
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2039
# too small for large or very branchy trees. However, for 55K path
2040
# trees, it would be easy to use too much memory trivially. Ideally we
2041
# could gauge this by looking at available real memory etc, but this is
2042
# always a tricky proposition.
2043
inventory_cache = lru_cache.LRUCache(10)
2044
batch_size = 10 # should be ~150MB on a 55K path tree
2045
batch_count = len(revision_order) / batch_size + 1
2047
pb.update("Calculating text parents", processed_texts, text_count)
2048
for offset in xrange(batch_count):
2049
to_query = revision_order[offset * batch_size:(offset + 1) *
2053
for rev_tree in self.revision_trees(to_query):
2054
revision_id = rev_tree.get_revision_id()
2055
parent_ids = ancestors[revision_id]
2056
for text_key in revision_keys[revision_id]:
2057
pb.update("Calculating text parents", processed_texts)
2058
processed_texts += 1
2059
candidate_parents = []
2060
for parent_id in parent_ids:
2061
parent_text_key = (text_key[0], parent_id)
2063
check_parent = parent_text_key not in \
2064
revision_keys[parent_id]
2066
# the parent parent_id is a ghost:
2067
check_parent = False
2068
# truncate the derived graph against this ghost.
2069
parent_text_key = None
2071
# look at the parent commit details inventories to
2072
# determine possible candidates in the per file graph.
2075
inv = inventory_cache[parent_id]
2077
inv = self.revision_tree(parent_id).inventory
2078
inventory_cache[parent_id] = inv
2080
parent_entry = inv[text_key[0]]
2081
except (KeyError, errors.NoSuchId):
2083
if parent_entry is not None:
2085
text_key[0], parent_entry.revision)
2087
parent_text_key = None
2088
if parent_text_key is not None:
2089
candidate_parents.append(
2090
text_key_cache[parent_text_key])
2091
parent_heads = text_graph.heads(candidate_parents)
2092
new_parents = list(parent_heads)
2093
new_parents.sort(key=lambda x:candidate_parents.index(x))
2094
if new_parents == []:
2095
new_parents = [NULL_REVISION]
2096
text_index[text_key] = new_parents
2098
for text_key in invalid_keys:
2099
text_index[text_key] = [NULL_REVISION]
2102
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2103
"""Get an iterable listing the keys of all the data introduced by a set
2106
The keys will be ordered so that the corresponding items can be safely
2107
fetched and inserted in that order.
2109
:returns: An iterable producing tuples of (knit-kind, file-id,
2110
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2111
'revisions'. file-id is None unless knit-kind is 'file'.
2113
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2116
for result in self._find_non_file_keys_to_fetch(revision_ids):
2119
def _find_file_keys_to_fetch(self, revision_ids, pb):
2120
# XXX: it's a bit weird to control the inventory weave caching in this
2121
# generator. Ideally the caching would be done in fetch.py I think. Or
2122
# maybe this generator should explicitly have the contract that it
2123
# should not be iterated until the previously yielded item has been
2125
inv_w = self.inventories
2127
# file ids that changed
2128
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2130
num_file_ids = len(file_ids)
2131
for file_id, altered_versions in file_ids.iteritems():
2133
pb.update("fetch texts", count, num_file_ids)
2135
yield ("file", file_id, altered_versions)
2137
def _find_non_file_keys_to_fetch(self, revision_ids):
2139
yield ("inventory", None, revision_ids)
2142
# XXX: Note ATM no callers actually pay attention to this return
2143
# instead they just use the list of revision ids and ignore
2144
# missing sigs. Consider removing this work entirely
2145
revisions_with_signatures = set(self.signatures.get_parent_map(
2146
[(r,) for r in revision_ids]))
2147
revisions_with_signatures = set(
2148
[r for (r,) in revisions_with_signatures])
2149
revisions_with_signatures.intersection_update(revision_ids)
2150
yield ("signatures", None, revisions_with_signatures)
2153
yield ("revisions", None, revision_ids)
2156
def get_inventory(self, revision_id):
2157
"""Get Inventory object by revision id."""
2158
return self.iter_inventories([revision_id]).next()
2160
def iter_inventories(self, revision_ids):
2161
"""Get many inventories by revision_ids.
2163
This will buffer some or all of the texts used in constructing the
2164
inventories in memory, but will only parse a single inventory at a
2167
:param revision_ids: The expected revision ids of the inventories.
2168
:return: An iterator of inventories.
2170
if ((None in revision_ids)
2171
or (_mod_revision.NULL_REVISION in revision_ids)):
2172
raise ValueError('cannot get null revision inventory')
2173
return self._iter_inventories(revision_ids)
2175
def _iter_inventories(self, revision_ids):
2176
"""single-document based inventory iteration."""
2177
for text, revision_id in self._iter_inventory_xmls(revision_ids):
2178
yield self.deserialise_inventory(revision_id, text)
2180
def _iter_inventory_xmls(self, revision_ids):
2181
keys = [(revision_id,) for revision_id in revision_ids]
2182
stream = self.inventories.get_record_stream(keys, 'unordered', True)
2184
for record in stream:
2185
if record.storage_kind != 'absent':
2186
text_chunks[record.key] = record.get_bytes_as('chunked')
2188
raise errors.NoSuchRevision(self, record.key)
2190
chunks = text_chunks.pop(key)
2191
yield ''.join(chunks), key[-1]
2193
def deserialise_inventory(self, revision_id, xml):
2194
"""Transform the xml into an inventory object.
2196
:param revision_id: The expected revision id of the inventory.
2197
:param xml: A serialised inventory.
2199
result = self._serializer.read_inventory_from_string(xml, revision_id,
2200
entry_cache=self._inventory_entry_cache)
2201
if result.revision_id != revision_id:
2202
raise AssertionError('revision id mismatch %s != %s' % (
2203
result.revision_id, revision_id))
2206
def serialise_inventory(self, inv):
2207
return self._serializer.write_inventory_to_string(inv)
2209
def _serialise_inventory_to_lines(self, inv):
2210
return self._serializer.write_inventory_to_lines(inv)
2212
def get_serializer_format(self):
2213
return self._serializer.format_num
2216
def get_inventory_xml(self, revision_id):
2217
"""Get inventory XML as a file object."""
2218
texts = self._iter_inventory_xmls([revision_id])
2220
text, revision_id = texts.next()
2221
except StopIteration:
2222
raise errors.HistoryMissing(self, 'inventory', revision_id)
2226
def get_inventory_sha1(self, revision_id):
2227
"""Return the sha1 hash of the inventory entry
2229
return self.get_revision(revision_id).inventory_sha1
2231
def iter_reverse_revision_history(self, revision_id):
2232
"""Iterate backwards through revision ids in the lefthand history
2234
:param revision_id: The revision id to start with. All its lefthand
2235
ancestors will be traversed.
2237
graph = self.get_graph()
2238
next_id = revision_id
2240
if next_id in (None, _mod_revision.NULL_REVISION):
2243
# Note: The following line may raise KeyError in the event of
2244
# truncated history. We decided not to have a try:except:raise
2245
# RevisionNotPresent here until we see a use for it, because of the
2246
# cost in an inner loop that is by its very nature O(history).
2247
# Robert Collins 20080326
2248
parents = graph.get_parent_map([next_id])[next_id]
2249
if len(parents) == 0:
2252
next_id = parents[0]
2255
def get_revision_inventory(self, revision_id):
2256
"""Return inventory of a past revision."""
2257
# TODO: Unify this with get_inventory()
2258
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2259
# must be the same as its revision, so this is trivial.
2260
if revision_id is None:
2261
# This does not make sense: if there is no revision,
2262
# then it is the current tree inventory surely ?!
2263
# and thus get_root_id() is something that looks at the last
2264
# commit on the branch, and the get_root_id is an inventory check.
2265
raise NotImplementedError
2266
# return Inventory(self.get_root_id())
2268
return self.get_inventory(revision_id)
2270
def is_shared(self):
2271
"""Return True if this repository is flagged as a shared repository."""
2272
raise NotImplementedError(self.is_shared)
2275
def reconcile(self, other=None, thorough=False):
2276
"""Reconcile this repository."""
2277
from bzrlib.reconcile import RepoReconciler
2278
reconciler = RepoReconciler(self, thorough=thorough)
2279
reconciler.reconcile()
2282
def _refresh_data(self):
2283
"""Helper called from lock_* to ensure coherency with disk.
2285
The default implementation does nothing; it is however possible
2286
for repositories to maintain loaded indices across multiple locks
2287
by checking inside their implementation of this method to see
2288
whether their indices are still valid. This depends of course on
2289
the disk format being validatable in this manner. This method is
2290
also called by the refresh_data() public interface to cause a refresh
2291
to occur while in a write lock so that data inserted by a smart server
2292
push operation is visible on the client's instance of the physical
2297
def revision_tree(self, revision_id):
2298
"""Return Tree for a revision on this branch.
2300
`revision_id` may be NULL_REVISION for the empty tree revision.
2302
revision_id = _mod_revision.ensure_null(revision_id)
2303
# TODO: refactor this to use an existing revision object
2304
# so we don't need to read it in twice.
2305
if revision_id == _mod_revision.NULL_REVISION:
2306
return RevisionTree(self, Inventory(root_id=None),
2307
_mod_revision.NULL_REVISION)
2309
inv = self.get_revision_inventory(revision_id)
2310
return RevisionTree(self, inv, revision_id)
2312
def revision_trees(self, revision_ids):
2313
"""Return Trees for revisions in this repository.
2315
:param revision_ids: a sequence of revision-ids;
2316
a revision-id may not be None or 'null:'
2318
inventories = self.iter_inventories(revision_ids)
2319
for inv in inventories:
2320
yield RevisionTree(self, inv, inv.revision_id)
2322
def _filtered_revision_trees(self, revision_ids, file_ids):
2323
"""Return Tree for a revision on this branch with only some files.
2325
:param revision_ids: a sequence of revision-ids;
2326
a revision-id may not be None or 'null:'
2327
:param file_ids: if not None, the result is filtered
2328
so that only those file-ids, their parents and their
2329
children are included.
2331
inventories = self.iter_inventories(revision_ids)
2332
for inv in inventories:
2333
# Should we introduce a FilteredRevisionTree class rather
2334
# than pre-filter the inventory here?
2335
filtered_inv = inv.filter(file_ids)
2336
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2339
def get_ancestry(self, revision_id, topo_sorted=True):
2340
"""Return a list of revision-ids integrated by a revision.
2342
The first element of the list is always None, indicating the origin
2343
revision. This might change when we have history horizons, or
2344
perhaps we should have a new API.
2346
This is topologically sorted.
2348
if _mod_revision.is_null(revision_id):
2350
if not self.has_revision(revision_id):
2351
raise errors.NoSuchRevision(self, revision_id)
2352
graph = self.get_graph()
2354
search = graph._make_breadth_first_searcher([revision_id])
2357
found, ghosts = search.next_with_ghosts()
2358
except StopIteration:
2361
if _mod_revision.NULL_REVISION in keys:
2362
keys.remove(_mod_revision.NULL_REVISION)
2364
parent_map = graph.get_parent_map(keys)
2365
keys = tsort.topo_sort(parent_map)
2366
return [None] + list(keys)
2369
"""Compress the data within the repository.
2371
This operation only makes sense for some repository types. For other
2372
types it should be a no-op that just returns.
2374
This stub method does not require a lock, but subclasses should use
2375
@needs_write_lock as this is a long running call its reasonable to
2376
implicitly lock for the user.
2379
def get_transaction(self):
2380
return self.control_files.get_transaction()
2382
def get_parent_map(self, revision_ids):
2383
"""See graph._StackedParentsProvider.get_parent_map"""
2384
# revisions index works in keys; this just works in revisions
2385
# therefore wrap and unwrap
2388
for revision_id in revision_ids:
2389
if revision_id == _mod_revision.NULL_REVISION:
2390
result[revision_id] = ()
2391
elif revision_id is None:
2392
raise ValueError('get_parent_map(None) is not valid')
2394
query_keys.append((revision_id ,))
2395
for ((revision_id,), parent_keys) in \
2396
self.revisions.get_parent_map(query_keys).iteritems():
2398
result[revision_id] = tuple(parent_revid
2399
for (parent_revid,) in parent_keys)
2401
result[revision_id] = (_mod_revision.NULL_REVISION,)
2404
def _make_parents_provider(self):
2407
def get_graph(self, other_repository=None):
2408
"""Return the graph walker for this repository format"""
2409
parents_provider = self._make_parents_provider()
2410
if (other_repository is not None and
2411
not self.has_same_location(other_repository)):
2412
parents_provider = graph._StackedParentsProvider(
2413
[parents_provider, other_repository._make_parents_provider()])
2414
return graph.Graph(parents_provider)
2416
def _get_versioned_file_checker(self, text_key_references=None):
2417
"""Return an object suitable for checking versioned files.
2419
:param text_key_references: if non-None, an already built
2420
dictionary mapping text keys ((fileid, revision_id) tuples)
2421
to whether they were referred to by the inventory of the
2422
revision_id that they contain. If None, this will be
2425
return _VersionedFileChecker(self,
2426
text_key_references=text_key_references)
2428
def revision_ids_to_search_result(self, result_set):
2429
"""Convert a set of revision ids to a graph SearchResult."""
2430
result_parents = set()
2431
for parents in self.get_graph().get_parent_map(
2432
result_set).itervalues():
2433
result_parents.update(parents)
2434
included_keys = result_set.intersection(result_parents)
2435
start_keys = result_set.difference(included_keys)
2436
exclude_keys = result_parents.difference(result_set)
2437
result = graph.SearchResult(start_keys, exclude_keys,
2438
len(result_set), result_set)
2442
def set_make_working_trees(self, new_value):
2443
"""Set the policy flag for making working trees when creating branches.
2445
This only applies to branches that use this repository.
2447
The default is 'True'.
2448
:param new_value: True to restore the default, False to disable making
2451
raise NotImplementedError(self.set_make_working_trees)
2453
def make_working_trees(self):
2454
"""Returns the policy for making working trees on new branches."""
2455
raise NotImplementedError(self.make_working_trees)
2458
def sign_revision(self, revision_id, gpg_strategy):
2459
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2460
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2463
def has_signature_for_revision_id(self, revision_id):
2464
"""Query for a revision signature for revision_id in the repository."""
2465
if not self.has_revision(revision_id):
2466
raise errors.NoSuchRevision(self, revision_id)
2467
sig_present = (1 == len(
2468
self.signatures.get_parent_map([(revision_id,)])))
2472
def get_signature_text(self, revision_id):
2473
"""Return the text for a signature."""
2474
stream = self.signatures.get_record_stream([(revision_id,)],
2476
record = stream.next()
2477
if record.storage_kind == 'absent':
2478
raise errors.NoSuchRevision(self, revision_id)
2479
return record.get_bytes_as('fulltext')
2482
def check(self, revision_ids=None):
2483
"""Check consistency of all history of given revision_ids.
2485
Different repository implementations should override _check().
2487
:param revision_ids: A non-empty list of revision_ids whose ancestry
2488
will be checked. Typically the last revision_id of a branch.
2490
return self._check(revision_ids)
2492
def _check(self, revision_ids):
2493
result = check.Check(self)
2497
def _warn_if_deprecated(self):
2498
global _deprecation_warning_done
2499
if _deprecation_warning_done:
2501
_deprecation_warning_done = True
2502
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2503
% (self._format, self.bzrdir.transport.base))
2505
def supports_rich_root(self):
2506
return self._format.rich_root_data
2508
def _check_ascii_revisionid(self, revision_id, method):
2509
"""Private helper for ascii-only repositories."""
2510
# weave repositories refuse to store revisionids that are non-ascii.
2511
if revision_id is not None:
2512
# weaves require ascii revision ids.
2513
if isinstance(revision_id, unicode):
2515
revision_id.encode('ascii')
2516
except UnicodeEncodeError:
2517
raise errors.NonAsciiRevisionId(method, self)
2520
revision_id.decode('ascii')
2521
except UnicodeDecodeError:
2522
raise errors.NonAsciiRevisionId(method, self)
2524
def revision_graph_can_have_wrong_parents(self):
2525
"""Is it possible for this repository to have a revision graph with
2528
If True, then this repository must also implement
2529
_find_inconsistent_revision_parents so that check and reconcile can
2530
check for inconsistencies before proceeding with other checks that may
2531
depend on the revision index being consistent.
2533
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2536
# remove these delegates a while after bzr 0.15
2537
def __make_delegated(name, from_module):
2538
def _deprecated_repository_forwarder():
2539
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2540
% (name, from_module),
2543
m = __import__(from_module, globals(), locals(), [name])
2545
return getattr(m, name)
2546
except AttributeError:
2547
raise AttributeError('module %s has no name %s'
2549
globals()[name] = _deprecated_repository_forwarder
2552
'AllInOneRepository',
2553
'WeaveMetaDirRepository',
2554
'PreSplitOutRepositoryFormat',
2555
'RepositoryFormat4',
2556
'RepositoryFormat5',
2557
'RepositoryFormat6',
2558
'RepositoryFormat7',
2560
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2564
'RepositoryFormatKnit',
2565
'RepositoryFormatKnit1',
2567
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2570
def install_revision(repository, rev, revision_tree):
2571
"""Install all revision data into a repository."""
2572
install_revisions(repository, [(rev, revision_tree, None)])
2575
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2576
"""Install all revision data into a repository.
2578
Accepts an iterable of revision, tree, signature tuples. The signature
2581
repository.start_write_group()
2583
inventory_cache = lru_cache.LRUCache(10)
2584
for n, (revision, revision_tree, signature) in enumerate(iterable):
2585
_install_revision(repository, revision, revision_tree, signature,
2588
pb.update('Transferring revisions', n + 1, num_revisions)
2590
repository.abort_write_group()
2593
repository.commit_write_group()
2596
def _install_revision(repository, rev, revision_tree, signature,
2598
"""Install all revision data into a repository."""
2599
present_parents = []
2601
for p_id in rev.parent_ids:
2602
if repository.has_revision(p_id):
2603
present_parents.append(p_id)
2604
parent_trees[p_id] = repository.revision_tree(p_id)
2606
parent_trees[p_id] = repository.revision_tree(
2607
_mod_revision.NULL_REVISION)
2609
inv = revision_tree.inventory
2610
entries = inv.iter_entries()
2611
# backwards compatibility hack: skip the root id.
2612
if not repository.supports_rich_root():
2613
path, root = entries.next()
2614
if root.revision != rev.revision_id:
2615
raise errors.IncompatibleRevision(repr(repository))
2617
for path, ie in entries:
2618
text_keys[(ie.file_id, ie.revision)] = ie
2619
text_parent_map = repository.texts.get_parent_map(text_keys)
2620
missing_texts = set(text_keys) - set(text_parent_map)
2621
# Add the texts that are not already present
2622
for text_key in missing_texts:
2623
ie = text_keys[text_key]
2625
# FIXME: TODO: The following loop overlaps/duplicates that done by
2626
# commit to determine parents. There is a latent/real bug here where
2627
# the parents inserted are not those commit would do - in particular
2628
# they are not filtered by heads(). RBC, AB
2629
for revision, tree in parent_trees.iteritems():
2630
if ie.file_id not in tree:
2632
parent_id = tree.inventory[ie.file_id].revision
2633
if parent_id in text_parents:
2635
text_parents.append((ie.file_id, parent_id))
2636
lines = revision_tree.get_file(ie.file_id).readlines()
2637
repository.texts.add_lines(text_key, text_parents, lines)
2639
# install the inventory
2640
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2641
# Cache this inventory
2642
inventory_cache[rev.revision_id] = inv
2644
basis_inv = inventory_cache[rev.parent_ids[0]]
2646
repository.add_inventory(rev.revision_id, inv, present_parents)
2648
delta = inv._make_delta(basis_inv)
2649
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2650
rev.revision_id, present_parents)
2652
repository.add_inventory(rev.revision_id, inv, present_parents)
2653
except errors.RevisionAlreadyPresent:
2655
if signature is not None:
2656
repository.add_signature_text(rev.revision_id, signature)
2657
repository.add_revision(rev.revision_id, rev, inv)
2660
class MetaDirRepository(Repository):
2661
"""Repositories in the new meta-dir layout.
2663
:ivar _transport: Transport for access to repository control files,
2664
typically pointing to .bzr/repository.
2667
def __init__(self, _format, a_bzrdir, control_files):
2668
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2669
self._transport = control_files._transport
2671
def is_shared(self):
2672
"""Return True if this repository is flagged as a shared repository."""
2673
return self._transport.has('shared-storage')
2676
def set_make_working_trees(self, new_value):
2677
"""Set the policy flag for making working trees when creating branches.
2679
This only applies to branches that use this repository.
2681
The default is 'True'.
2682
:param new_value: True to restore the default, False to disable making
2687
self._transport.delete('no-working-trees')
2688
except errors.NoSuchFile:
2691
self._transport.put_bytes('no-working-trees', '',
2692
mode=self.bzrdir._get_file_mode())
2694
def make_working_trees(self):
2695
"""Returns the policy for making working trees on new branches."""
2696
return not self._transport.has('no-working-trees')
2699
class MetaDirVersionedFileRepository(MetaDirRepository):
2700
"""Repositories in a meta-dir, that work via versioned file objects."""
2702
def __init__(self, _format, a_bzrdir, control_files):
2703
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2707
network_format_registry = registry.FormatRegistry()
2708
"""Registry of formats indexed by their network name.
2710
The network name for a repository format is an identifier that can be used when
2711
referring to formats with smart server operations. See
2712
RepositoryFormat.network_name() for more detail.
2716
format_registry = registry.FormatRegistry(network_format_registry)
2717
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2719
This can contain either format instances themselves, or classes/factories that
2720
can be called to obtain one.
2724
#####################################################################
2725
# Repository Formats
2727
class RepositoryFormat(object):
2728
"""A repository format.
2730
Formats provide four things:
2731
* An initialization routine to construct repository data on disk.
2732
* a optional format string which is used when the BzrDir supports
2734
* an open routine which returns a Repository instance.
2735
* A network name for referring to the format in smart server RPC
2738
There is one and only one Format subclass for each on-disk format. But
2739
there can be one Repository subclass that is used for several different
2740
formats. The _format attribute on a Repository instance can be used to
2741
determine the disk format.
2743
Formats are placed in a registry by their format string for reference
2744
during opening. These should be subclasses of RepositoryFormat for
2747
Once a format is deprecated, just deprecate the initialize and open
2748
methods on the format class. Do not deprecate the object, as the
2749
object may be created even when a repository instance hasn't been
2752
Common instance attributes:
2753
_matchingbzrdir - the bzrdir format that the repository format was
2754
originally written to work with. This can be used if manually
2755
constructing a bzrdir and repository, or more commonly for test suite
2759
# Set to True or False in derived classes. True indicates that the format
2760
# supports ghosts gracefully.
2761
supports_ghosts = None
2762
# Can this repository be given external locations to lookup additional
2763
# data. Set to True or False in derived classes.
2764
supports_external_lookups = None
2765
# Does this format support CHK bytestring lookups. Set to True or False in
2767
supports_chks = None
2768
# Should commit add an inventory, or an inventory delta to the repository.
2769
_commit_inv_deltas = True
2770
# What order should fetch operations request streams in?
2771
# The default is unordered as that is the cheapest for an origin to
2773
_fetch_order = 'unordered'
2774
# Does this repository format use deltas that can be fetched as-deltas ?
2775
# (E.g. knits, where the knit deltas can be transplanted intact.
2776
# We default to False, which will ensure that enough data to get
2777
# a full text out of any fetch stream will be grabbed.
2778
_fetch_uses_deltas = False
2779
# Should fetch trigger a reconcile after the fetch? Only needed for
2780
# some repository formats that can suffer internal inconsistencies.
2781
_fetch_reconcile = False
2782
# Does this format have < O(tree_size) delta generation. Used to hint what
2783
# code path for commit, amongst other things.
2787
return "<%s>" % self.__class__.__name__
2789
def __eq__(self, other):
2790
# format objects are generally stateless
2791
return isinstance(other, self.__class__)
2793
def __ne__(self, other):
2794
return not self == other
2797
def find_format(klass, a_bzrdir):
2798
"""Return the format for the repository object in a_bzrdir.
2800
This is used by bzr native formats that have a "format" file in
2801
the repository. Other methods may be used by different types of
2805
transport = a_bzrdir.get_repository_transport(None)
2806
format_string = transport.get("format").read()
2807
return format_registry.get(format_string)
2808
except errors.NoSuchFile:
2809
raise errors.NoRepositoryPresent(a_bzrdir)
2811
raise errors.UnknownFormatError(format=format_string,
2815
def register_format(klass, format):
2816
format_registry.register(format.get_format_string(), format)
2819
def unregister_format(klass, format):
2820
format_registry.remove(format.get_format_string())
2823
def get_default_format(klass):
2824
"""Return the current default format."""
2825
from bzrlib import bzrdir
2826
return bzrdir.format_registry.make_bzrdir('default').repository_format
2828
def get_format_string(self):
2829
"""Return the ASCII format string that identifies this format.
2831
Note that in pre format ?? repositories the format string is
2832
not permitted nor written to disk.
2834
raise NotImplementedError(self.get_format_string)
2836
def get_format_description(self):
2837
"""Return the short description for this format."""
2838
raise NotImplementedError(self.get_format_description)
2840
# TODO: this shouldn't be in the base class, it's specific to things that
2841
# use weaves or knits -- mbp 20070207
2842
def _get_versioned_file_store(self,
2847
versionedfile_class=None,
2848
versionedfile_kwargs={},
2850
if versionedfile_class is None:
2851
versionedfile_class = self._versionedfile_class
2852
weave_transport = control_files._transport.clone(name)
2853
dir_mode = control_files._dir_mode
2854
file_mode = control_files._file_mode
2855
return VersionedFileStore(weave_transport, prefixed=prefixed,
2857
file_mode=file_mode,
2858
versionedfile_class=versionedfile_class,
2859
versionedfile_kwargs=versionedfile_kwargs,
2862
def initialize(self, a_bzrdir, shared=False):
2863
"""Initialize a repository of this format in a_bzrdir.
2865
:param a_bzrdir: The bzrdir to put the new repository in it.
2866
:param shared: The repository should be initialized as a sharable one.
2867
:returns: The new repository object.
2869
This may raise UninitializableFormat if shared repository are not
2870
compatible the a_bzrdir.
2872
raise NotImplementedError(self.initialize)
2874
def is_supported(self):
2875
"""Is this format supported?
2877
Supported formats must be initializable and openable.
2878
Unsupported formats may not support initialization or committing or
2879
some other features depending on the reason for not being supported.
2883
def network_name(self):
2884
"""A simple byte string uniquely identifying this format for RPC calls.
2886
MetaDir repository formats use their disk format string to identify the
2887
repository over the wire. All in one formats such as bzr < 0.8, and
2888
foreign formats like svn/git and hg should use some marker which is
2889
unique and immutable.
2891
raise NotImplementedError(self.network_name)
2893
def check_conversion_target(self, target_format):
2894
raise NotImplementedError(self.check_conversion_target)
2896
def open(self, a_bzrdir, _found=False):
2897
"""Return an instance of this format for the bzrdir a_bzrdir.
2899
_found is a private parameter, do not use it.
2901
raise NotImplementedError(self.open)
2904
class MetaDirRepositoryFormat(RepositoryFormat):
2905
"""Common base class for the new repositories using the metadir layout."""
2907
rich_root_data = False
2908
supports_tree_reference = False
2909
supports_external_lookups = False
2912
def _matchingbzrdir(self):
2913
matching = bzrdir.BzrDirMetaFormat1()
2914
matching.repository_format = self
2918
super(MetaDirRepositoryFormat, self).__init__()
2920
def _create_control_files(self, a_bzrdir):
2921
"""Create the required files and the initial control_files object."""
2922
# FIXME: RBC 20060125 don't peek under the covers
2923
# NB: no need to escape relative paths that are url safe.
2924
repository_transport = a_bzrdir.get_repository_transport(self)
2925
control_files = lockable_files.LockableFiles(repository_transport,
2926
'lock', lockdir.LockDir)
2927
control_files.create_lock()
2928
return control_files
2930
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2931
"""Upload the initial blank content."""
2932
control_files = self._create_control_files(a_bzrdir)
2933
control_files.lock_write()
2934
transport = control_files._transport
2936
utf8_files += [('shared-storage', '')]
2938
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
2939
for (filename, content_stream) in files:
2940
transport.put_file(filename, content_stream,
2941
mode=a_bzrdir._get_file_mode())
2942
for (filename, content_bytes) in utf8_files:
2943
transport.put_bytes_non_atomic(filename, content_bytes,
2944
mode=a_bzrdir._get_file_mode())
2946
control_files.unlock()
2948
def network_name(self):
2949
"""Metadir formats have matching disk and network format strings."""
2950
return self.get_format_string()
2953
# Pre-0.8 formats that don't have a disk format string (because they are
2954
# versioned by the matching control directory). We use the control directories
2955
# disk format string as a key for the network_name because they meet the
2956
# constraints (simple string, unique, immutable).
2957
network_format_registry.register_lazy(
2958
"Bazaar-NG branch, format 5\n",
2959
'bzrlib.repofmt.weaverepo',
2960
'RepositoryFormat5',
2962
network_format_registry.register_lazy(
2963
"Bazaar-NG branch, format 6\n",
2964
'bzrlib.repofmt.weaverepo',
2965
'RepositoryFormat6',
2968
# formats which have no format string are not discoverable or independently
2969
# creatable on disk, so are not registered in format_registry. They're
2970
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2971
# needed, it's constructed directly by the BzrDir. Non-native formats where
2972
# the repository is not separately opened are similar.
2974
format_registry.register_lazy(
2975
'Bazaar-NG Repository format 7',
2976
'bzrlib.repofmt.weaverepo',
2980
format_registry.register_lazy(
2981
'Bazaar-NG Knit Repository Format 1',
2982
'bzrlib.repofmt.knitrepo',
2983
'RepositoryFormatKnit1',
2986
format_registry.register_lazy(
2987
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2988
'bzrlib.repofmt.knitrepo',
2989
'RepositoryFormatKnit3',
2992
format_registry.register_lazy(
2993
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2994
'bzrlib.repofmt.knitrepo',
2995
'RepositoryFormatKnit4',
2998
# Pack-based formats. There is one format for pre-subtrees, and one for
2999
# post-subtrees to allow ease of testing.
3000
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3001
format_registry.register_lazy(
3002
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3003
'bzrlib.repofmt.pack_repo',
3004
'RepositoryFormatKnitPack1',
3006
format_registry.register_lazy(
3007
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3008
'bzrlib.repofmt.pack_repo',
3009
'RepositoryFormatKnitPack3',
3011
format_registry.register_lazy(
3012
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3013
'bzrlib.repofmt.pack_repo',
3014
'RepositoryFormatKnitPack4',
3016
format_registry.register_lazy(
3017
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3018
'bzrlib.repofmt.pack_repo',
3019
'RepositoryFormatKnitPack5',
3021
format_registry.register_lazy(
3022
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3023
'bzrlib.repofmt.pack_repo',
3024
'RepositoryFormatKnitPack5RichRoot',
3026
format_registry.register_lazy(
3027
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3028
'bzrlib.repofmt.pack_repo',
3029
'RepositoryFormatKnitPack5RichRootBroken',
3031
format_registry.register_lazy(
3032
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3033
'bzrlib.repofmt.pack_repo',
3034
'RepositoryFormatKnitPack6',
3036
format_registry.register_lazy(
3037
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3038
'bzrlib.repofmt.pack_repo',
3039
'RepositoryFormatKnitPack6RichRoot',
3042
# Development formats.
3043
# Obsolete but kept pending a CHK based subtree format.
3044
format_registry.register_lazy(
3045
("Bazaar development format 2 with subtree support "
3046
"(needs bzr.dev from before 1.8)\n"),
3047
'bzrlib.repofmt.pack_repo',
3048
'RepositoryFormatPackDevelopment2Subtree',
3051
# 1.14->1.16 go below here
3052
format_registry.register_lazy(
3053
'Bazaar development format - group compression and chk inventory'
3054
' (needs bzr.dev from 1.14)\n',
3055
'bzrlib.repofmt.groupcompress_repo',
3056
'RepositoryFormatCHK1',
3060
class InterRepository(InterObject):
3061
"""This class represents operations taking place between two repositories.
3063
Its instances have methods like copy_content and fetch, and contain
3064
references to the source and target repositories these operations can be
3067
Often we will provide convenience methods on 'repository' which carry out
3068
operations with another repository - they will always forward to
3069
InterRepository.get(other).method_name(parameters).
3072
_walk_to_common_revisions_batch_size = 50
3074
"""The available optimised InterRepository types."""
3077
def copy_content(self, revision_id=None):
3078
"""Make a complete copy of the content in self into destination.
3080
This is a destructive operation! Do not use it on existing
3083
:param revision_id: Only copy the content needed to construct
3084
revision_id and its parents.
3087
self.target.set_make_working_trees(self.source.make_working_trees())
3088
except NotImplementedError:
3090
self.target.fetch(self.source, revision_id=revision_id)
3093
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3095
"""Fetch the content required to construct revision_id.
3097
The content is copied from self.source to self.target.
3099
:param revision_id: if None all content is copied, if NULL_REVISION no
3101
:param pb: optional progress bar to use for progress reports. If not
3102
provided a default one will be created.
3105
from bzrlib.fetch import RepoFetcher
3106
f = RepoFetcher(to_repository=self.target,
3107
from_repository=self.source,
3108
last_revision=revision_id,
3109
fetch_spec=fetch_spec,
3110
pb=pb, find_ghosts=find_ghosts)
3112
def _walk_to_common_revisions(self, revision_ids):
3113
"""Walk out from revision_ids in source to revisions target has.
3115
:param revision_ids: The start point for the search.
3116
:return: A set of revision ids.
3118
target_graph = self.target.get_graph()
3119
revision_ids = frozenset(revision_ids)
3120
missing_revs = set()
3121
source_graph = self.source.get_graph()
3122
# ensure we don't pay silly lookup costs.
3123
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3124
null_set = frozenset([_mod_revision.NULL_REVISION])
3125
searcher_exhausted = False
3129
# Iterate the searcher until we have enough next_revs
3130
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3132
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3133
next_revs.update(next_revs_part)
3134
ghosts.update(ghosts_part)
3135
except StopIteration:
3136
searcher_exhausted = True
3138
# If there are ghosts in the source graph, and the caller asked for
3139
# them, make sure that they are present in the target.
3140
# We don't care about other ghosts as we can't fetch them and
3141
# haven't been asked to.
3142
ghosts_to_check = set(revision_ids.intersection(ghosts))
3143
revs_to_get = set(next_revs).union(ghosts_to_check)
3145
have_revs = set(target_graph.get_parent_map(revs_to_get))
3146
# we always have NULL_REVISION present.
3147
have_revs = have_revs.union(null_set)
3148
# Check if the target is missing any ghosts we need.
3149
ghosts_to_check.difference_update(have_revs)
3151
# One of the caller's revision_ids is a ghost in both the
3152
# source and the target.
3153
raise errors.NoSuchRevision(
3154
self.source, ghosts_to_check.pop())
3155
missing_revs.update(next_revs - have_revs)
3156
# Because we may have walked past the original stop point, make
3157
# sure everything is stopped
3158
stop_revs = searcher.find_seen_ancestors(have_revs)
3159
searcher.stop_searching_any(stop_revs)
3160
if searcher_exhausted:
3162
return searcher.get_result()
3165
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3166
"""Return the revision ids that source has that target does not.
3168
:param revision_id: only return revision ids included by this
3170
:param find_ghosts: If True find missing revisions in deep history
3171
rather than just finding the surface difference.
3172
:return: A bzrlib.graph.SearchResult.
3174
# stop searching at found target revisions.
3175
if not find_ghosts and revision_id is not None:
3176
return self._walk_to_common_revisions([revision_id])
3177
# generic, possibly worst case, slow code path.
3178
target_ids = set(self.target.all_revision_ids())
3179
if revision_id is not None:
3180
source_ids = self.source.get_ancestry(revision_id)
3181
if source_ids[0] is not None:
3182
raise AssertionError()
3185
source_ids = self.source.all_revision_ids()
3186
result_set = set(source_ids).difference(target_ids)
3187
return self.source.revision_ids_to_search_result(result_set)
3190
def _same_model(source, target):
3191
"""True if source and target have the same data representation.
3193
Note: this is always called on the base class; overriding it in a
3194
subclass will have no effect.
3197
InterRepository._assert_same_model(source, target)
3199
except errors.IncompatibleRepositories, e:
3203
def _assert_same_model(source, target):
3204
"""Raise an exception if two repositories do not use the same model.
3206
if source.supports_rich_root() != target.supports_rich_root():
3207
raise errors.IncompatibleRepositories(source, target,
3208
"different rich-root support")
3209
if source._serializer != target._serializer:
3210
raise errors.IncompatibleRepositories(source, target,
3211
"different serializers")
3214
class InterSameDataRepository(InterRepository):
3215
"""Code for converting between repositories that represent the same data.
3217
Data format and model must match for this to work.
3221
def _get_repo_format_to_test(self):
3222
"""Repository format for testing with.
3224
InterSameData can pull from subtree to subtree and from non-subtree to
3225
non-subtree, so we test this with the richest repository format.
3227
from bzrlib.repofmt import knitrepo
3228
return knitrepo.RepositoryFormatKnit3()
3231
def is_compatible(source, target):
3232
return InterRepository._same_model(source, target)
3235
class InterWeaveRepo(InterSameDataRepository):
3236
"""Optimised code paths between Weave based repositories.
3238
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3239
implemented lazy inter-object optimisation.
3243
def _get_repo_format_to_test(self):
3244
from bzrlib.repofmt import weaverepo
3245
return weaverepo.RepositoryFormat7()
3248
def is_compatible(source, target):
3249
"""Be compatible with known Weave formats.
3251
We don't test for the stores being of specific types because that
3252
could lead to confusing results, and there is no need to be
3255
from bzrlib.repofmt.weaverepo import (
3261
return (isinstance(source._format, (RepositoryFormat5,
3263
RepositoryFormat7)) and
3264
isinstance(target._format, (RepositoryFormat5,
3266
RepositoryFormat7)))
3267
except AttributeError:
3271
def copy_content(self, revision_id=None):
3272
"""See InterRepository.copy_content()."""
3273
# weave specific optimised path:
3275
self.target.set_make_working_trees(self.source.make_working_trees())
3276
except (errors.RepositoryUpgradeRequired, NotImplemented):
3278
# FIXME do not peek!
3279
if self.source._transport.listable():
3280
pb = ui.ui_factory.nested_progress_bar()
3282
self.target.texts.insert_record_stream(
3283
self.source.texts.get_record_stream(
3284
self.source.texts.keys(), 'topological', False))
3285
pb.update('copying inventory', 0, 1)
3286
self.target.inventories.insert_record_stream(
3287
self.source.inventories.get_record_stream(
3288
self.source.inventories.keys(), 'topological', False))
3289
self.target.signatures.insert_record_stream(
3290
self.source.signatures.get_record_stream(
3291
self.source.signatures.keys(),
3293
self.target.revisions.insert_record_stream(
3294
self.source.revisions.get_record_stream(
3295
self.source.revisions.keys(),
3296
'topological', True))
3300
self.target.fetch(self.source, revision_id=revision_id)
3303
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3304
"""See InterRepository.missing_revision_ids()."""
3305
# we want all revisions to satisfy revision_id in source.
3306
# but we don't want to stat every file here and there.
3307
# we want then, all revisions other needs to satisfy revision_id
3308
# checked, but not those that we have locally.
3309
# so the first thing is to get a subset of the revisions to
3310
# satisfy revision_id in source, and then eliminate those that
3311
# we do already have.
3312
# this is slow on high latency connection to self, but as this
3313
# disk format scales terribly for push anyway due to rewriting
3314
# inventory.weave, this is considered acceptable.
3316
if revision_id is not None:
3317
source_ids = self.source.get_ancestry(revision_id)
3318
if source_ids[0] is not None:
3319
raise AssertionError()
3322
source_ids = self.source._all_possible_ids()
3323
source_ids_set = set(source_ids)
3324
# source_ids is the worst possible case we may need to pull.
3325
# now we want to filter source_ids against what we actually
3326
# have in target, but don't try to check for existence where we know
3327
# we do not have a revision as that would be pointless.
3328
target_ids = set(self.target._all_possible_ids())
3329
possibly_present_revisions = target_ids.intersection(source_ids_set)
3330
actually_present_revisions = set(
3331
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3332
required_revisions = source_ids_set.difference(actually_present_revisions)
3333
if revision_id is not None:
3334
# we used get_ancestry to determine source_ids then we are assured all
3335
# revisions referenced are present as they are installed in topological order.
3336
# and the tip revision was validated by get_ancestry.
3337
result_set = required_revisions
3339
# if we just grabbed the possibly available ids, then
3340
# we only have an estimate of whats available and need to validate
3341
# that against the revision records.
3343
self.source._eliminate_revisions_not_present(required_revisions))
3344
return self.source.revision_ids_to_search_result(result_set)
3347
class InterKnitRepo(InterSameDataRepository):
3348
"""Optimised code paths between Knit based repositories."""
3351
def _get_repo_format_to_test(self):
3352
from bzrlib.repofmt import knitrepo
3353
return knitrepo.RepositoryFormatKnit1()
3356
def is_compatible(source, target):
3357
"""Be compatible with known Knit formats.
3359
We don't test for the stores being of specific types because that
3360
could lead to confusing results, and there is no need to be
3363
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3365
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3366
isinstance(target._format, RepositoryFormatKnit))
3367
except AttributeError:
3369
return are_knits and InterRepository._same_model(source, target)
3372
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3373
"""See InterRepository.missing_revision_ids()."""
3374
if revision_id is not None:
3375
source_ids = self.source.get_ancestry(revision_id)
3376
if source_ids[0] is not None:
3377
raise AssertionError()
3380
source_ids = self.source.all_revision_ids()
3381
source_ids_set = set(source_ids)
3382
# source_ids is the worst possible case we may need to pull.
3383
# now we want to filter source_ids against what we actually
3384
# have in target, but don't try to check for existence where we know
3385
# we do not have a revision as that would be pointless.
3386
target_ids = set(self.target.all_revision_ids())
3387
possibly_present_revisions = target_ids.intersection(source_ids_set)
3388
actually_present_revisions = set(
3389
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3390
required_revisions = source_ids_set.difference(actually_present_revisions)
3391
if revision_id is not None:
3392
# we used get_ancestry to determine source_ids then we are assured all
3393
# revisions referenced are present as they are installed in topological order.
3394
# and the tip revision was validated by get_ancestry.
3395
result_set = required_revisions
3397
# if we just grabbed the possibly available ids, then
3398
# we only have an estimate of whats available and need to validate
3399
# that against the revision records.
3401
self.source._eliminate_revisions_not_present(required_revisions))
3402
return self.source.revision_ids_to_search_result(result_set)
3405
class InterPackRepo(InterSameDataRepository):
3406
"""Optimised code paths between Pack based repositories."""
3409
def _get_repo_format_to_test(self):
3410
from bzrlib.repofmt import pack_repo
3411
return pack_repo.RepositoryFormatKnitPack6RichRoot()
3414
def is_compatible(source, target):
3415
"""Be compatible with known Pack formats.
3417
We don't test for the stores being of specific types because that
3418
could lead to confusing results, and there is no need to be
3421
InterPackRepo does not support CHK based repositories.
3423
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3424
from bzrlib.repofmt.groupcompress_repo import RepositoryFormatCHK1
3426
are_packs = (isinstance(source._format, RepositoryFormatPack) and
3427
isinstance(target._format, RepositoryFormatPack))
3428
not_packs = (isinstance(source._format, RepositoryFormatCHK1) or
3429
isinstance(target._format, RepositoryFormatCHK1))
3430
except AttributeError:
3432
if not_packs or not are_packs:
3434
return InterRepository._same_model(source, target)
3437
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3439
"""See InterRepository.fetch()."""
3440
if (len(self.source._fallback_repositories) > 0 or
3441
len(self.target._fallback_repositories) > 0):
3442
# The pack layer is not aware of fallback repositories, so when
3443
# fetching from a stacked repository or into a stacked repository
3444
# we use the generic fetch logic which uses the VersionedFiles
3445
# attributes on repository.
3446
from bzrlib.fetch import RepoFetcher
3447
fetcher = RepoFetcher(self.target, self.source, revision_id,
3448
pb, find_ghosts, fetch_spec=fetch_spec)
3449
if fetch_spec is not None:
3450
if len(list(fetch_spec.heads)) != 1:
3451
raise AssertionError(
3452
"InterPackRepo.fetch doesn't support "
3453
"fetching multiple heads yet.")
3454
revision_id = list(fetch_spec.heads)[0]
3456
if revision_id is None:
3458
# everything to do - use pack logic
3459
# to fetch from all packs to one without
3460
# inventory parsing etc, IFF nothing to be copied is in the target.
3462
source_revision_ids = frozenset(self.source.all_revision_ids())
3463
revision_ids = source_revision_ids - \
3464
frozenset(self.target.get_parent_map(source_revision_ids))
3465
revision_keys = [(revid,) for revid in revision_ids]
3466
index = self.target._pack_collection.revision_index.combined_index
3467
present_revision_ids = set(item[1][0] for item in
3468
index.iter_entries(revision_keys))
3469
revision_ids = set(revision_ids) - present_revision_ids
3470
# implementing the TODO will involve:
3471
# - detecting when all of a pack is selected
3472
# - avoiding as much as possible pre-selection, so the
3473
# more-core routines such as create_pack_from_packs can filter in
3474
# a just-in-time fashion. (though having a HEADS list on a
3475
# repository might make this a lot easier, because we could
3476
# sensibly detect 'new revisions' without doing a full index scan.
3477
elif _mod_revision.is_null(revision_id):
3481
revision_ids = self.search_missing_revision_ids(revision_id,
3482
find_ghosts=find_ghosts).get_keys()
3483
if len(revision_ids) == 0:
3485
return self._pack(self.source, self.target, revision_ids)
3487
def _pack(self, source, target, revision_ids):
3488
from bzrlib.repofmt.pack_repo import Packer
3489
packs = source._pack_collection.all_packs()
3490
pack = Packer(self.target._pack_collection, packs, '.fetch',
3491
revision_ids).pack()
3492
if pack is not None:
3493
self.target._pack_collection._save_pack_names()
3494
copied_revs = pack.get_revision_count()
3495
# Trigger an autopack. This may duplicate effort as we've just done
3496
# a pack creation, but for now it is simpler to think about as
3497
# 'upload data, then repack if needed'.
3498
self.target._pack_collection.autopack()
3499
return (copied_revs, [])
3504
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3505
"""See InterRepository.missing_revision_ids().
3507
:param find_ghosts: Find ghosts throughout the ancestry of
3510
if not find_ghosts and revision_id is not None:
3511
return self._walk_to_common_revisions([revision_id])
3512
elif revision_id is not None:
3513
# Find ghosts: search for revisions pointing from one repository to
3514
# the other, and vice versa, anywhere in the history of revision_id.
3515
graph = self.target.get_graph(other_repository=self.source)
3516
searcher = graph._make_breadth_first_searcher([revision_id])
3520
next_revs, ghosts = searcher.next_with_ghosts()
3521
except StopIteration:
3523
if revision_id in ghosts:
3524
raise errors.NoSuchRevision(self.source, revision_id)
3525
found_ids.update(next_revs)
3526
found_ids.update(ghosts)
3527
found_ids = frozenset(found_ids)
3528
# Double query here: should be able to avoid this by changing the
3529
# graph api further.
3530
result_set = found_ids - frozenset(
3531
self.target.get_parent_map(found_ids))
3533
source_ids = self.source.all_revision_ids()
3534
# source_ids is the worst possible case we may need to pull.
3535
# now we want to filter source_ids against what we actually
3536
# have in target, but don't try to check for existence where we know
3537
# we do not have a revision as that would be pointless.
3538
target_ids = set(self.target.all_revision_ids())
3539
result_set = set(source_ids).difference(target_ids)
3540
return self.source.revision_ids_to_search_result(result_set)
3543
class InterDifferingSerializer(InterRepository):
3546
def _get_repo_format_to_test(self):
3550
def is_compatible(source, target):
3551
"""Be compatible with Knit2 source and Knit3 target"""
3552
# This is redundant with format.check_conversion_target(), however that
3553
# raises an exception, and we just want to say "False" as in we won't
3554
# support converting between these formats.
3555
if source.supports_rich_root() and not target.supports_rich_root():
3557
if (source._format.supports_tree_reference
3558
and not target._format.supports_tree_reference):
3562
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3563
"""Get the best delta and base for this revision.
3565
:return: (basis_id, delta)
3567
possible_trees = [(parent_id, cache[parent_id])
3568
for parent_id in parent_ids
3569
if parent_id in cache]
3570
if len(possible_trees) == 0:
3571
# There either aren't any parents, or the parents aren't in the
3572
# cache, so just use the last converted tree
3573
possible_trees.append((basis_id, cache[basis_id]))
3575
for basis_id, basis_tree in possible_trees:
3576
delta = tree.inventory._make_delta(basis_tree.inventory)
3577
deltas.append((len(delta), basis_id, delta))
3579
return deltas[0][1:]
3581
def _get_parent_keys(self, root_key, parent_map):
3582
"""Get the parent keys for a given root id."""
3583
root_id, rev_id = root_key
3584
# Include direct parents of the revision, but only if they used
3585
# the same root_id and are heads.
3587
for parent_id in parent_map[rev_id]:
3588
if parent_id == _mod_revision.NULL_REVISION:
3590
if parent_id not in self._revision_id_to_root_id:
3591
# We probably didn't read this revision, go spend the
3592
# extra effort to actually check
3594
tree = self.source.revision_tree(parent_id)
3595
except errors.NoSuchRevision:
3596
# Ghost, fill out _revision_id_to_root_id in case we
3597
# encounter this again.
3598
# But set parent_root_id to None since we don't really know
3599
parent_root_id = None
3601
parent_root_id = tree.get_root_id()
3602
self._revision_id_to_root_id[parent_id] = None
3604
parent_root_id = self._revision_id_to_root_id[parent_id]
3605
if root_id == parent_root_id:
3606
# With stacking we _might_ want to refer to a non-local
3607
# revision, but this code path only applies when we have the
3608
# full content available, so ghosts really are ghosts, not just
3609
# the edge of local data.
3610
parent_keys.append((parent_id,))
3612
# root_id may be in the parent anyway.
3614
tree = self.source.revision_tree(parent_id)
3615
except errors.NoSuchRevision:
3616
# ghost, can't refer to it.
3620
parent_keys.append((tree.inventory[root_id].revision,))
3621
except errors.NoSuchId:
3624
g = graph.Graph(self.source.revisions)
3625
heads = g.heads(parent_keys)
3627
for key in parent_keys:
3628
if key in heads and key not in selected_keys:
3629
selected_keys.append(key)
3630
return tuple([(root_id,)+ key for key in selected_keys])
3632
def _new_root_data_stream(self, root_keys_to_create, parent_map):
3633
for root_key in root_keys_to_create:
3634
parent_keys = self._get_parent_keys(root_key, parent_map)
3635
yield versionedfile.FulltextContentFactory(root_key,
3636
parent_keys, None, '')
3638
def _fetch_batch(self, revision_ids, basis_id, cache):
3639
"""Fetch across a few revisions.
3641
:param revision_ids: The revisions to copy
3642
:param basis_id: The revision_id of a tree that must be in cache, used
3643
as a basis for delta when no other base is available
3644
:param cache: A cache of RevisionTrees that we can use.
3645
:return: The revision_id of the last converted tree. The RevisionTree
3646
for it will be in cache
3648
# Walk though all revisions; get inventory deltas, copy referenced
3649
# texts that delta references, insert the delta, revision and
3651
root_keys_to_create = set()
3654
pending_revisions = []
3655
parent_map = self.source.get_parent_map(revision_ids)
3656
for tree in self.source.revision_trees(revision_ids):
3657
current_revision_id = tree.get_revision_id()
3658
parent_ids = parent_map.get(current_revision_id, ())
3659
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3661
if self._converting_to_rich_root:
3662
self._revision_id_to_root_id[current_revision_id] = \
3664
# Find text entries that need to be copied
3665
for old_path, new_path, file_id, entry in delta:
3666
if new_path is not None:
3669
if not self.target.supports_rich_root():
3670
# The target doesn't support rich root, so we don't
3673
if self._converting_to_rich_root:
3674
# This can't be copied normally, we have to insert
3676
root_keys_to_create.add((file_id, entry.revision))
3678
text_keys.add((file_id, entry.revision))
3679
revision = self.source.get_revision(current_revision_id)
3680
pending_deltas.append((basis_id, delta,
3681
current_revision_id, revision.parent_ids))
3682
pending_revisions.append(revision)
3683
cache[current_revision_id] = tree
3684
basis_id = current_revision_id
3686
from_texts = self.source.texts
3687
to_texts = self.target.texts
3688
if root_keys_to_create:
3689
root_stream = self._new_root_data_stream(root_keys_to_create,
3691
to_texts.insert_record_stream(root_stream)
3692
to_texts.insert_record_stream(from_texts.get_record_stream(
3693
text_keys, self.target._format._fetch_order,
3694
not self.target._format._fetch_uses_deltas))
3695
# insert inventory deltas
3696
for delta in pending_deltas:
3697
self.target.add_inventory_by_delta(*delta)
3698
if self.target._fallback_repositories:
3699
# Make sure this stacked repository has all the parent inventories
3700
# for the new revisions that we are about to insert. We do this
3701
# before adding the revisions so that no revision is added until
3702
# all the inventories it may depend on are added.
3704
revision_ids = set()
3705
for revision in pending_revisions:
3706
revision_ids.add(revision.revision_id)
3707
parent_ids.update(revision.parent_ids)
3708
parent_ids.difference_update(revision_ids)
3709
parent_ids.discard(_mod_revision.NULL_REVISION)
3710
parent_map = self.source.get_parent_map(parent_ids)
3711
for parent_tree in self.source.revision_trees(parent_ids):
3712
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, basis_id, cache)
3713
current_revision_id = parent_tree.get_revision_id()
3714
parents_parents = parent_map[current_revision_id]
3715
self.target.add_inventory_by_delta(
3716
basis_id, delta, current_revision_id, parents_parents)
3717
# insert signatures and revisions
3718
for revision in pending_revisions:
3720
signature = self.source.get_signature_text(
3721
revision.revision_id)
3722
self.target.add_signature_text(revision.revision_id,
3724
except errors.NoSuchRevision:
3726
self.target.add_revision(revision.revision_id, revision)
3729
def _fetch_all_revisions(self, revision_ids, pb):
3730
"""Fetch everything for the list of revisions.
3732
:param revision_ids: The list of revisions to fetch. Must be in
3734
:param pb: A ProgressBar
3737
basis_id, basis_tree = self._get_basis(revision_ids[0])
3739
cache = lru_cache.LRUCache(100)
3740
cache[basis_id] = basis_tree
3741
del basis_tree # We don't want to hang on to it here
3742
for offset in range(0, len(revision_ids), batch_size):
3743
self.target.start_write_group()
3745
pb.update('Transferring revisions', offset,
3747
batch = revision_ids[offset:offset+batch_size]
3748
basis_id = self._fetch_batch(batch, basis_id, cache)
3750
self.target.abort_write_group()
3753
self.target.commit_write_group()
3754
pb.update('Transferring revisions', len(revision_ids),
3758
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3760
"""See InterRepository.fetch()."""
3761
if fetch_spec is not None:
3762
raise AssertionError("Not implemented yet...")
3763
if (not self.source.supports_rich_root()
3764
and self.target.supports_rich_root()):
3765
self._converting_to_rich_root = True
3766
self._revision_id_to_root_id = {}
3768
self._converting_to_rich_root = False
3769
revision_ids = self.target.search_missing_revision_ids(self.source,
3770
revision_id, find_ghosts=find_ghosts).get_keys()
3771
if not revision_ids:
3773
revision_ids = tsort.topo_sort(
3774
self.source.get_graph().get_parent_map(revision_ids))
3775
if not revision_ids:
3777
# Walk though all revisions; get inventory deltas, copy referenced
3778
# texts that delta references, insert the delta, revision and
3780
first_rev = self.source.get_revision(revision_ids[0])
3782
my_pb = ui.ui_factory.nested_progress_bar()
3785
symbol_versioning.warn(
3786
symbol_versioning.deprecated_in((1, 14, 0))
3787
% "pb parameter to fetch()")
3790
self._fetch_all_revisions(revision_ids, pb)
3792
if my_pb is not None:
3794
return len(revision_ids), 0
3796
def _get_basis(self, first_revision_id):
3797
"""Get a revision and tree which exists in the target.
3799
This assumes that first_revision_id is selected for transmission
3800
because all other ancestors are already present. If we can't find an
3801
ancestor we fall back to NULL_REVISION since we know that is safe.
3803
:return: (basis_id, basis_tree)
3805
first_rev = self.source.get_revision(first_revision_id)
3807
basis_id = first_rev.parent_ids[0]
3808
# only valid as a basis if the target has it
3809
self.target.get_revision(basis_id)
3810
# Try to get a basis tree - if its a ghost it will hit the
3811
# NoSuchRevision case.
3812
basis_tree = self.source.revision_tree(basis_id)
3813
except (IndexError, errors.NoSuchRevision):
3814
basis_id = _mod_revision.NULL_REVISION
3815
basis_tree = self.source.revision_tree(basis_id)
3816
return basis_id, basis_tree
3819
InterRepository.register_optimiser(InterDifferingSerializer)
3820
InterRepository.register_optimiser(InterSameDataRepository)
3821
InterRepository.register_optimiser(InterWeaveRepo)
3822
InterRepository.register_optimiser(InterKnitRepo)
3823
InterRepository.register_optimiser(InterPackRepo)
3826
class CopyConverter(object):
3827
"""A repository conversion tool which just performs a copy of the content.
3829
This is slow but quite reliable.
3832
def __init__(self, target_format):
3833
"""Create a CopyConverter.
3835
:param target_format: The format the resulting repository should be.
3837
self.target_format = target_format
3839
def convert(self, repo, pb):
3840
"""Perform the conversion of to_convert, giving feedback via pb.
3842
:param to_convert: The disk object to convert.
3843
:param pb: a progress bar to use for progress information.
3848
# this is only useful with metadir layouts - separated repo content.
3849
# trigger an assertion if not such
3850
repo._format.get_format_string()
3851
self.repo_dir = repo.bzrdir
3852
self.step('Moving repository to repository.backup')
3853
self.repo_dir.transport.move('repository', 'repository.backup')
3854
backup_transport = self.repo_dir.transport.clone('repository.backup')
3855
repo._format.check_conversion_target(self.target_format)
3856
self.source_repo = repo._format.open(self.repo_dir,
3858
_override_transport=backup_transport)
3859
self.step('Creating new repository')
3860
converted = self.target_format.initialize(self.repo_dir,
3861
self.source_repo.is_shared())
3862
converted.lock_write()
3864
self.step('Copying content into repository.')
3865
self.source_repo.copy_content_into(converted)
3868
self.step('Deleting old repository content.')
3869
self.repo_dir.transport.delete_tree('repository.backup')
3870
self.pb.note('repository converted')
3872
def step(self, message):
3873
"""Update the pb by a step."""
3875
self.pb.update(message, self.count, self.total)
3887
def _unescaper(match, _map=_unescape_map):
3888
code = match.group(1)
3892
if not code.startswith('#'):
3894
return unichr(int(code[1:])).encode('utf8')
3900
def _unescape_xml(data):
3901
"""Unescape predefined XML entities in a string of data."""
3903
if _unescape_re is None:
3904
_unescape_re = re.compile('\&([^;]*);')
3905
return _unescape_re.sub(_unescaper, data)
3908
class _VersionedFileChecker(object):
3910
def __init__(self, repository, text_key_references=None):
3911
self.repository = repository
3912
self.text_index = self.repository._generate_text_key_index(
3913
text_key_references=text_key_references)
3915
def calculate_file_version_parents(self, text_key):
3916
"""Calculate the correct parents for a file version according to
3919
parent_keys = self.text_index[text_key]
3920
if parent_keys == [_mod_revision.NULL_REVISION]:
3922
return tuple(parent_keys)
3924
def check_file_version_parents(self, texts, progress_bar=None):
3925
"""Check the parents stored in a versioned file are correct.
3927
It also detects file versions that are not referenced by their
3928
corresponding revision's inventory.
3930
:returns: A tuple of (wrong_parents, dangling_file_versions).
3931
wrong_parents is a dict mapping {revision_id: (stored_parents,
3932
correct_parents)} for each revision_id where the stored parents
3933
are not correct. dangling_file_versions is a set of (file_id,
3934
revision_id) tuples for versions that are present in this versioned
3935
file, but not used by the corresponding inventory.
3938
self.file_ids = set([file_id for file_id, _ in
3939
self.text_index.iterkeys()])
3940
# text keys is now grouped by file_id
3941
n_weaves = len(self.file_ids)
3942
files_in_revisions = {}
3943
revisions_of_files = {}
3944
n_versions = len(self.text_index)
3945
progress_bar.update('loading text store', 0, n_versions)
3946
parent_map = self.repository.texts.get_parent_map(self.text_index)
3947
# On unlistable transports this could well be empty/error...
3948
text_keys = self.repository.texts.keys()
3949
unused_keys = frozenset(text_keys) - set(self.text_index)
3950
for num, key in enumerate(self.text_index.iterkeys()):
3951
if progress_bar is not None:
3952
progress_bar.update('checking text graph', num, n_versions)
3953
correct_parents = self.calculate_file_version_parents(key)
3955
knit_parents = parent_map[key]
3956
except errors.RevisionNotPresent:
3959
if correct_parents != knit_parents:
3960
wrong_parents[key] = (knit_parents, correct_parents)
3961
return wrong_parents, unused_keys
3964
def _old_get_graph(repository, revision_id):
3965
"""DO NOT USE. That is all. I'm serious."""
3966
graph = repository.get_graph()
3967
revision_graph = dict(((key, value) for key, value in
3968
graph.iter_ancestry([revision_id]) if value is not None))
3969
return _strip_NULL_ghosts(revision_graph)
3972
def _strip_NULL_ghosts(revision_graph):
3973
"""Also don't use this. more compatibility code for unmigrated clients."""
3974
# Filter ghosts, and null:
3975
if _mod_revision.NULL_REVISION in revision_graph:
3976
del revision_graph[_mod_revision.NULL_REVISION]
3977
for key, parents in revision_graph.items():
3978
revision_graph[key] = tuple(parent for parent in parents if parent
3980
return revision_graph
3983
class StreamSink(object):
3984
"""An object that can insert a stream into a repository.
3986
This interface handles the complexity of reserialising inventories and
3987
revisions from different formats, and allows unidirectional insertion into
3988
stacked repositories without looking for the missing basis parents
3992
def __init__(self, target_repo):
3993
self.target_repo = target_repo
3995
def insert_stream(self, stream, src_format, resume_tokens):
3996
"""Insert a stream's content into the target repository.
3998
:param src_format: a bzr repository format.
4000
:return: a list of resume tokens and an iterable of keys additional
4001
items required before the insertion can be completed.
4003
self.target_repo.lock_write()
4006
self.target_repo.resume_write_group(resume_tokens)
4008
self.target_repo.start_write_group()
4010
# locked_insert_stream performs a commit|suspend.
4011
return self._locked_insert_stream(stream, src_format)
4013
self.target_repo.abort_write_group(suppress_errors=True)
4016
self.target_repo.unlock()
4018
def _locked_insert_stream(self, stream, src_format):
4019
to_serializer = self.target_repo._format._serializer
4020
src_serializer = src_format._serializer
4022
if to_serializer == src_serializer:
4023
# If serializers match and the target is a pack repository, set the
4024
# write cache size on the new pack. This avoids poor performance
4025
# on transports where append is unbuffered (such as
4026
# RemoteTransport). This is safe to do because nothing should read
4027
# back from the target repository while a stream with matching
4028
# serialization is being inserted.
4029
# The exception is that a delta record from the source that should
4030
# be a fulltext may need to be expanded by the target (see
4031
# test_fetch_revisions_with_deltas_into_pack); but we take care to
4032
# explicitly flush any buffered writes first in that rare case.
4034
new_pack = self.target_repo._pack_collection._new_pack
4035
except AttributeError:
4036
# Not a pack repository
4039
new_pack.set_write_cache_size(1024*1024)
4040
for substream_type, substream in stream:
4041
if substream_type == 'texts':
4042
self.target_repo.texts.insert_record_stream(substream)
4043
elif substream_type == 'inventories':
4044
if src_serializer == to_serializer:
4045
self.target_repo.inventories.insert_record_stream(
4048
self._extract_and_insert_inventories(
4049
substream, src_serializer)
4050
elif substream_type == 'chk_bytes':
4051
# XXX: This doesn't support conversions, as it assumes the
4052
# conversion was done in the fetch code.
4053
self.target_repo.chk_bytes.insert_record_stream(substream)
4054
elif substream_type == 'revisions':
4055
# This may fallback to extract-and-insert more often than
4056
# required if the serializers are different only in terms of
4058
if src_serializer == to_serializer:
4059
self.target_repo.revisions.insert_record_stream(
4062
self._extract_and_insert_revisions(substream,
4064
elif substream_type == 'signatures':
4065
self.target_repo.signatures.insert_record_stream(substream)
4067
raise AssertionError('kaboom! %s' % (substream_type,))
4068
# Done inserting data, and the missing_keys calculations will try to
4069
# read back from the inserted data, so flush the writes to the new pack
4070
# (if this is pack format).
4071
if new_pack is not None:
4072
new_pack._write_data('', flush=True)
4073
# Find all the new revisions (including ones from resume_tokens)
4074
missing_keys = self.target_repo.get_missing_parent_inventories()
4076
for prefix, versioned_file in (
4077
('texts', self.target_repo.texts),
4078
('inventories', self.target_repo.inventories),
4079
('revisions', self.target_repo.revisions),
4080
('signatures', self.target_repo.signatures),
4081
('chk_bytes', self.target_repo.chk_bytes),
4083
if versioned_file is None:
4085
missing_keys.update((prefix,) + key for key in
4086
versioned_file.get_missing_compression_parent_keys())
4087
except NotImplementedError:
4088
# cannot even attempt suspending, and missing would have failed
4089
# during stream insertion.
4090
missing_keys = set()
4093
# suspend the write group and tell the caller what we is
4094
# missing. We know we can suspend or else we would not have
4095
# entered this code path. (All repositories that can handle
4096
# missing keys can handle suspending a write group).
4097
write_group_tokens = self.target_repo.suspend_write_group()
4098
return write_group_tokens, missing_keys
4099
self.target_repo.commit_write_group()
4102
def _extract_and_insert_inventories(self, substream, serializer):
4103
"""Generate a new inventory versionedfile in target, converting data.
4105
The inventory is retrieved from the source, (deserializing it), and
4106
stored in the target (reserializing it in a different format).
4108
for record in substream:
4109
bytes = record.get_bytes_as('fulltext')
4110
revision_id = record.key[0]
4111
inv = serializer.read_inventory_from_string(bytes, revision_id)
4112
parents = [key[0] for key in record.parents]
4113
self.target_repo.add_inventory(revision_id, inv, parents)
4115
def _extract_and_insert_revisions(self, substream, serializer):
4116
for record in substream:
4117
bytes = record.get_bytes_as('fulltext')
4118
revision_id = record.key[0]
4119
rev = serializer.read_revision_from_string(bytes)
4120
if rev.revision_id != revision_id:
4121
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4122
self.target_repo.add_revision(revision_id, rev)
4125
if self.target_repo._format._fetch_reconcile:
4126
self.target_repo.reconcile()
4129
class StreamSource(object):
4130
"""A source of a stream for fetching between repositories."""
4132
def __init__(self, from_repository, to_format):
4133
"""Create a StreamSource streaming from from_repository."""
4134
self.from_repository = from_repository
4135
self.to_format = to_format
4137
def delta_on_metadata(self):
4138
"""Return True if delta's are permitted on metadata streams.
4140
That is on revisions and signatures.
4142
src_serializer = self.from_repository._format._serializer
4143
target_serializer = self.to_format._serializer
4144
return (self.to_format._fetch_uses_deltas and
4145
src_serializer == target_serializer)
4147
def _fetch_revision_texts(self, revs):
4148
# fetch signatures first and then the revision texts
4149
# may need to be a InterRevisionStore call here.
4150
from_sf = self.from_repository.signatures
4151
# A missing signature is just skipped.
4152
keys = [(rev_id,) for rev_id in revs]
4153
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4155
self.to_format._fetch_order,
4156
not self.to_format._fetch_uses_deltas))
4157
# If a revision has a delta, this is actually expanded inside the
4158
# insert_record_stream code now, which is an alternate fix for
4160
from_rf = self.from_repository.revisions
4161
revisions = from_rf.get_record_stream(
4163
self.to_format._fetch_order,
4164
not self.delta_on_metadata())
4165
return [('signatures', signatures), ('revisions', revisions)]
4167
def _generate_root_texts(self, revs):
4168
"""This will be called by __fetch between fetching weave texts and
4169
fetching the inventory weave.
4171
Subclasses should override this if they need to generate root texts
4172
after fetching weave texts.
4174
if self._rich_root_upgrade():
4176
return bzrlib.fetch.Inter1and2Helper(
4177
self.from_repository).generate_root_texts(revs)
4181
def get_stream(self, search):
4183
revs = search.get_keys()
4184
graph = self.from_repository.get_graph()
4185
revs = list(graph.iter_topo_order(revs))
4186
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4188
for knit_kind, file_id, revisions in data_to_fetch:
4189
if knit_kind != phase:
4191
# Make a new progress bar for this phase
4192
if knit_kind == "file":
4193
# Accumulate file texts
4194
text_keys.extend([(file_id, revision) for revision in
4196
elif knit_kind == "inventory":
4197
# Now copy the file texts.
4198
from_texts = self.from_repository.texts
4199
yield ('texts', from_texts.get_record_stream(
4200
text_keys, self.to_format._fetch_order,
4201
not self.to_format._fetch_uses_deltas))
4202
# Cause an error if a text occurs after we have done the
4205
# Before we process the inventory we generate the root
4206
# texts (if necessary) so that the inventories references
4208
for _ in self._generate_root_texts(revs):
4210
# NB: This currently reopens the inventory weave in source;
4211
# using a single stream interface instead would avoid this.
4212
from_weave = self.from_repository.inventories
4213
# we fetch only the referenced inventories because we do not
4214
# know for unselected inventories whether all their required
4215
# texts are present in the other repository - it could be
4217
for info in self._get_inventory_stream(revs):
4219
elif knit_kind == "signatures":
4220
# Nothing to do here; this will be taken care of when
4221
# _fetch_revision_texts happens.
4223
elif knit_kind == "revisions":
4224
for record in self._fetch_revision_texts(revs):
4227
raise AssertionError("Unknown knit kind %r" % knit_kind)
4229
def get_stream_for_missing_keys(self, missing_keys):
4230
# missing keys can only occur when we are byte copying and not
4231
# translating (because translation means we don't send
4232
# unreconstructable deltas ever).
4234
keys['texts'] = set()
4235
keys['revisions'] = set()
4236
keys['inventories'] = set()
4237
keys['chk_bytes'] = set()
4238
keys['signatures'] = set()
4239
for key in missing_keys:
4240
keys[key[0]].add(key[1:])
4241
if len(keys['revisions']):
4242
# If we allowed copying revisions at this point, we could end up
4243
# copying a revision without copying its required texts: a
4244
# violation of the requirements for repository integrity.
4245
raise AssertionError(
4246
'cannot copy revisions to fill in missing deltas %s' % (
4247
keys['revisions'],))
4248
for substream_kind, keys in keys.iteritems():
4249
vf = getattr(self.from_repository, substream_kind)
4250
if vf is None and keys:
4251
raise AssertionError(
4252
"cannot fill in keys for a versioned file we don't"
4253
" have: %s needs %s" % (substream_kind, keys))
4255
# No need to stream something we don't have
4257
# Ask for full texts always so that we don't need more round trips
4258
# after this stream.
4259
stream = vf.get_record_stream(keys,
4260
self.to_format._fetch_order, True)
4261
yield substream_kind, stream
4263
def inventory_fetch_order(self):
4264
if self._rich_root_upgrade():
4265
return 'topological'
4267
return self.to_format._fetch_order
4269
def _rich_root_upgrade(self):
4270
return (not self.from_repository._format.rich_root_data and
4271
self.to_format.rich_root_data)
4273
def _get_inventory_stream(self, revision_ids):
4274
from_format = self.from_repository._format
4275
if (from_format.supports_chks and self.to_format.supports_chks
4276
and (from_format._serializer == self.to_format._serializer)):
4277
# Both sides support chks, and they use the same serializer, so it
4278
# is safe to transmit the chk pages and inventory pages across
4280
return self._get_chk_inventory_stream(revision_ids)
4281
elif (not from_format.supports_chks):
4282
# Source repository doesn't support chks. So we can transmit the
4283
# inventories 'as-is' and either they are just accepted on the
4284
# target, or the Sink will properly convert it.
4285
return self._get_simple_inventory_stream(revision_ids)
4287
# XXX: Hack to make not-chk->chk fetch: copy the inventories as
4288
# inventories. Note that this should probably be done somehow
4289
# as part of bzrlib.repository.StreamSink. Except JAM couldn't
4290
# figure out how a non-chk repository could possibly handle
4291
# deserializing an inventory stream from a chk repo, as it
4292
# doesn't have a way to understand individual pages.
4293
return self._get_convertable_inventory_stream(revision_ids)
4295
def _get_simple_inventory_stream(self, revision_ids):
4296
from_weave = self.from_repository.inventories
4297
yield ('inventories', from_weave.get_record_stream(
4298
[(rev_id,) for rev_id in revision_ids],
4299
self.inventory_fetch_order(),
4300
not self.delta_on_metadata()))
4302
def _get_chk_inventory_stream(self, revision_ids):
4303
"""Fetch the inventory texts, along with the associated chk maps."""
4304
# We want an inventory outside of the search set, so that we can filter
4305
# out uninteresting chk pages. For now we use
4306
# _find_revision_outside_set, but if we had a Search with cut_revs, we
4307
# could use that instead.
4308
start_rev_id = self.from_repository._find_revision_outside_set(
4310
start_rev_key = (start_rev_id,)
4311
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]
4312
if start_rev_id != _mod_revision.NULL_REVISION:
4313
inv_keys_to_fetch.append((start_rev_id,))
4314
# Any repo that supports chk_bytes must also support out-of-order
4315
# insertion. At least, that is how we expect it to work
4316
# We use get_record_stream instead of iter_inventories because we want
4317
# to be able to insert the stream as well. We could instead fetch
4318
# allowing deltas, and then iter_inventories, but we don't know whether
4319
# source or target is more 'local' anway.
4320
inv_stream = self.from_repository.inventories.get_record_stream(
4321
inv_keys_to_fetch, 'unordered',
4322
True) # We need them as full-texts so we can find their references
4323
uninteresting_chk_roots = set()
4324
interesting_chk_roots = set()
4325
def filter_inv_stream(inv_stream):
4326
for idx, record in enumerate(inv_stream):
4327
### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
4328
bytes = record.get_bytes_as('fulltext')
4329
chk_inv = inventory.CHKInventory.deserialise(
4330
self.from_repository.chk_bytes, bytes, record.key)
4331
if record.key == start_rev_key:
4332
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())
4333
p_id_map = chk_inv.parent_id_basename_to_file_id
4334
if p_id_map is not None:
4335
uninteresting_chk_roots.add(p_id_map.key())
4338
interesting_chk_roots.add(chk_inv.id_to_entry.key())
4339
p_id_map = chk_inv.parent_id_basename_to_file_id
4340
if p_id_map is not None:
4341
interesting_chk_roots.add(p_id_map.key())
4342
### pb.update('fetch inventory', 0, 2)
4343
yield ('inventories', filter_inv_stream(inv_stream))
4344
# Now that we have worked out all of the interesting root nodes, grab
4345
# all of the interesting pages and insert them
4346
### pb.update('fetch inventory', 1, 2)
4347
interesting = chk_map.iter_interesting_nodes(
4348
self.from_repository.chk_bytes, interesting_chk_roots,
4349
uninteresting_chk_roots)
4350
def to_stream_adapter():
4351
"""Adapt the iter_interesting_nodes result to a single stream.
4353
iter_interesting_nodes returns records as it processes them, along
4354
with keys. However, we only want to return the records themselves.
4356
for record, items in interesting:
4357
if record is not None:
4359
# XXX: We could instead call get_record_stream(records.keys())
4360
# ATM, this will always insert the records as fulltexts, and
4361
# requires that you can hang on to records once you have gone
4362
# on to the next one. Further, it causes the target to
4363
# recompress the data. Testing shows it to be faster than
4364
# requesting the records again, though.
4365
yield ('chk_bytes', to_stream_adapter())
4366
### pb.update('fetch inventory', 2, 2)
4368
def _get_convertable_inventory_stream(self, revision_ids):
4369
# XXX: One of source or target is using chks, and they don't have
4370
# compatible serializations. The StreamSink code expects to be
4371
# able to convert on the target, so we need to put
4372
# bytes-on-the-wire that can be converted
4373
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))
4375
def _stream_invs_as_fulltexts(self, revision_ids):
4376
from_repo = self.from_repository
4377
from_serializer = from_repo._format._serializer
4378
revision_keys = [(rev_id,) for rev_id in revision_ids]
4379
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4380
for inv in self.from_repository.iter_inventories(revision_ids):
4381
# XXX: This is a bit hackish, but it works. Basically,
4382
# CHKSerializer 'accidentally' supports
4383
# read/write_inventory_to_string, even though that is never
4384
# the format that is stored on disk. It *does* give us a
4385
# single string representation for an inventory, so live with
4387
# This would be far better if we had a 'serialized inventory
4388
# delta' form. Then we could use 'inventory._make_delta', and
4389
# transmit that. This would both be faster to generate, and
4390
# result in fewer bytes-on-the-wire.
4391
as_bytes = from_serializer.write_inventory_to_string(inv)
4392
key = (inv.revision_id,)
4393
parent_keys = parent_map.get(key, ())
4394
yield versionedfile.FulltextContentFactory(
4395
key, parent_keys, None, as_bytes)