1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.testament import Testament
52
from bzrlib.decorators import needs_read_lock, needs_write_lock
53
from bzrlib.inter import InterObject
54
from bzrlib.inventory import (
60
from bzrlib import registry
61
from bzrlib.trace import (
62
log_exception_quietly, note, mutter, mutter_callsite, warning)
65
# Old formats display a warning, but only once
66
_deprecation_warning_done = False
69
class CommitBuilder(object):
70
"""Provides an interface to build up a commit.
72
This allows describing a tree to be committed without needing to
73
know the internals of the format of the repository.
76
# all clients should supply tree roots.
77
record_root_entry = True
78
# the default CommitBuilder does not manage trees whose root is versioned.
79
_versioned_root = False
81
def __init__(self, repository, parents, config, timestamp=None,
82
timezone=None, committer=None, revprops=None,
84
"""Initiate a CommitBuilder.
86
:param repository: Repository to commit to.
87
:param parents: Revision ids of the parents of the new revision.
88
:param config: Configuration to use.
89
:param timestamp: Optional timestamp recorded for commit.
90
:param timezone: Optional timezone for timestamp.
91
:param committer: Optional committer to set for commit.
92
:param revprops: Optional dictionary of revision properties.
93
:param revision_id: Optional revision id.
98
self._committer = self._config.username()
100
self._committer = committer
102
self.new_inventory = Inventory(None)
103
self._new_revision_id = revision_id
104
self.parents = parents
105
self.repository = repository
108
if revprops is not None:
109
self._validate_revprops(revprops)
110
self._revprops.update(revprops)
112
if timestamp is None:
113
timestamp = time.time()
114
# Restrict resolution to 1ms
115
self._timestamp = round(timestamp, 3)
118
self._timezone = osutils.local_time_offset()
120
self._timezone = int(timezone)
122
self._generate_revision_if_needed()
123
self.__heads = graph.HeadsCache(repository.get_graph()).heads
124
self._basis_delta = []
125
# API compatibility, older code that used CommitBuilder did not call
126
# .record_delete(), which means the delta that is computed would not be
127
# valid. Callers that will call record_delete() should call
128
# .will_record_deletes() to indicate that.
129
self._recording_deletes = False
130
# memo'd check for no-op commits.
131
self._any_changes = False
133
def any_changes(self):
134
"""Return True if any entries were changed.
136
This includes merge-only changes. It is the core for the --unchanged
139
:return: True if any changes have occured.
141
return self._any_changes
143
def _validate_unicode_text(self, text, context):
144
"""Verify things like commit messages don't have bogus characters."""
146
raise ValueError('Invalid value for %s: %r' % (context, text))
148
def _validate_revprops(self, revprops):
149
for key, value in revprops.iteritems():
150
# We know that the XML serializers do not round trip '\r'
151
# correctly, so refuse to accept them
152
if not isinstance(value, basestring):
153
raise ValueError('revision property (%s) is not a valid'
154
' (unicode) string: %r' % (key, value))
155
self._validate_unicode_text(value,
156
'revision property (%s)' % (key,))
158
def commit(self, message):
159
"""Make the actual commit.
161
:return: The revision id of the recorded revision.
163
self._validate_unicode_text(message, 'commit message')
164
rev = _mod_revision.Revision(
165
timestamp=self._timestamp,
166
timezone=self._timezone,
167
committer=self._committer,
169
inventory_sha1=self.inv_sha1,
170
revision_id=self._new_revision_id,
171
properties=self._revprops)
172
rev.parent_ids = self.parents
173
self.repository.add_revision(self._new_revision_id, rev,
174
self.new_inventory, self._config)
175
self.repository.commit_write_group()
176
return self._new_revision_id
179
"""Abort the commit that is being built.
181
self.repository.abort_write_group()
183
def revision_tree(self):
184
"""Return the tree that was just committed.
186
After calling commit() this can be called to get a RevisionTree
187
representing the newly committed tree. This is preferred to
188
calling Repository.revision_tree() because that may require
189
deserializing the inventory, while we already have a copy in
192
if self.new_inventory is None:
193
self.new_inventory = self.repository.get_inventory(
194
self._new_revision_id)
195
return RevisionTree(self.repository, self.new_inventory,
196
self._new_revision_id)
198
def finish_inventory(self):
199
"""Tell the builder that the inventory is finished.
201
:return: The inventory id in the repository, which can be used with
202
repository.get_inventory.
204
if self.new_inventory is None:
205
# an inventory delta was accumulated without creating a new
207
basis_id = self.basis_delta_revision
208
self.inv_sha1 = self.repository.add_inventory_by_delta(
209
basis_id, self._basis_delta, self._new_revision_id,
212
if self.new_inventory.root is None:
213
raise AssertionError('Root entry should be supplied to'
214
' record_entry_contents, as of bzr 0.10.')
215
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
216
self.new_inventory.revision_id = self._new_revision_id
217
self.inv_sha1 = self.repository.add_inventory(
218
self._new_revision_id,
222
return self._new_revision_id
224
def _gen_revision_id(self):
225
"""Return new revision-id."""
226
return generate_ids.gen_revision_id(self._config.username(),
229
def _generate_revision_if_needed(self):
230
"""Create a revision id if None was supplied.
232
If the repository can not support user-specified revision ids
233
they should override this function and raise CannotSetRevisionId
234
if _new_revision_id is not None.
236
:raises: CannotSetRevisionId
238
if self._new_revision_id is None:
239
self._new_revision_id = self._gen_revision_id()
240
self.random_revid = True
242
self.random_revid = False
244
def _heads(self, file_id, revision_ids):
245
"""Calculate the graph heads for revision_ids in the graph of file_id.
247
This can use either a per-file graph or a global revision graph as we
248
have an identity relationship between the two graphs.
250
return self.__heads(revision_ids)
252
def _check_root(self, ie, parent_invs, tree):
253
"""Helper for record_entry_contents.
255
:param ie: An entry being added.
256
:param parent_invs: The inventories of the parent revisions of the
258
:param tree: The tree that is being committed.
260
# In this revision format, root entries have no knit or weave When
261
# serializing out to disk and back in root.revision is always
263
ie.revision = self._new_revision_id
265
def _require_root_change(self, tree):
266
"""Enforce an appropriate root object change.
268
This is called once when record_iter_changes is called, if and only if
269
the root was not in the delta calculated by record_iter_changes.
271
:param tree: The tree which is being committed.
273
# NB: if there are no parents then this method is not called, so no
274
# need to guard on parents having length.
275
entry = entry_factory['directory'](tree.path2id(''), '',
277
entry.revision = self._new_revision_id
278
self._basis_delta.append(('', '', entry.file_id, entry))
280
def _get_delta(self, ie, basis_inv, path):
281
"""Get a delta against the basis inventory for ie."""
282
if ie.file_id not in basis_inv:
284
result = (None, path, ie.file_id, ie)
285
self._basis_delta.append(result)
287
elif ie != basis_inv[ie.file_id]:
289
# TODO: avoid tis id2path call.
290
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
291
self._basis_delta.append(result)
297
def get_basis_delta(self):
298
"""Return the complete inventory delta versus the basis inventory.
300
This has been built up with the calls to record_delete and
301
record_entry_contents. The client must have already called
302
will_record_deletes() to indicate that they will be generating a
305
:return: An inventory delta, suitable for use with apply_delta, or
306
Repository.add_inventory_by_delta, etc.
308
if not self._recording_deletes:
309
raise AssertionError("recording deletes not activated.")
310
return self._basis_delta
312
def record_delete(self, path, file_id):
313
"""Record that a delete occured against a basis tree.
315
This is an optional API - when used it adds items to the basis_delta
316
being accumulated by the commit builder. It cannot be called unless the
317
method will_record_deletes() has been called to inform the builder that
318
a delta is being supplied.
320
:param path: The path of the thing deleted.
321
:param file_id: The file id that was deleted.
323
if not self._recording_deletes:
324
raise AssertionError("recording deletes not activated.")
325
delta = (path, None, file_id, None)
326
self._basis_delta.append(delta)
327
self._any_changes = True
330
def will_record_deletes(self):
331
"""Tell the commit builder that deletes are being notified.
333
This enables the accumulation of an inventory delta; for the resulting
334
commit to be valid, deletes against the basis MUST be recorded via
335
builder.record_delete().
337
self._recording_deletes = True
339
basis_id = self.parents[0]
341
basis_id = _mod_revision.NULL_REVISION
342
self.basis_delta_revision = basis_id
344
def record_entry_contents(self, ie, parent_invs, path, tree,
346
"""Record the content of ie from tree into the commit if needed.
348
Side effect: sets ie.revision when unchanged
350
:param ie: An inventory entry present in the commit.
351
:param parent_invs: The inventories of the parent revisions of the
353
:param path: The path the entry is at in the tree.
354
:param tree: The tree which contains this entry and should be used to
356
:param content_summary: Summary data from the tree about the paths
357
content - stat, length, exec, sha/link target. This is only
358
accessed when the entry has a revision of None - that is when it is
359
a candidate to commit.
360
:return: A tuple (change_delta, version_recorded, fs_hash).
361
change_delta is an inventory_delta change for this entry against
362
the basis tree of the commit, or None if no change occured against
364
version_recorded is True if a new version of the entry has been
365
recorded. For instance, committing a merge where a file was only
366
changed on the other side will return (delta, False).
367
fs_hash is either None, or the hash details for the path (currently
368
a tuple of the contents sha1 and the statvalue returned by
369
tree.get_file_with_stat()).
371
if self.new_inventory.root is None:
372
if ie.parent_id is not None:
373
raise errors.RootMissing()
374
self._check_root(ie, parent_invs, tree)
375
if ie.revision is None:
376
kind = content_summary[0]
378
# ie is carried over from a prior commit
380
# XXX: repository specific check for nested tree support goes here - if
381
# the repo doesn't want nested trees we skip it ?
382
if (kind == 'tree-reference' and
383
not self.repository._format.supports_tree_reference):
384
# mismatch between commit builder logic and repository:
385
# this needs the entry creation pushed down into the builder.
386
raise NotImplementedError('Missing repository subtree support.')
387
self.new_inventory.add(ie)
389
# TODO: slow, take it out of the inner loop.
391
basis_inv = parent_invs[0]
393
basis_inv = Inventory(root_id=None)
395
# ie.revision is always None if the InventoryEntry is considered
396
# for committing. We may record the previous parents revision if the
397
# content is actually unchanged against a sole head.
398
if ie.revision is not None:
399
if not self._versioned_root and path == '':
400
# repositories that do not version the root set the root's
401
# revision to the new commit even when no change occurs (more
402
# specifically, they do not record a revision on the root; and
403
# the rev id is assigned to the root during deserialisation -
404
# this masks when a change may have occurred against the basis.
405
# To match this we always issue a delta, because the revision
406
# of the root will always be changing.
407
if ie.file_id in basis_inv:
408
delta = (basis_inv.id2path(ie.file_id), path,
412
delta = (None, path, ie.file_id, ie)
413
self._basis_delta.append(delta)
414
return delta, False, None
416
# we don't need to commit this, because the caller already
417
# determined that an existing revision of this file is
418
# appropriate. If its not being considered for committing then
419
# it and all its parents to the root must be unaltered so
420
# no-change against the basis.
421
if ie.revision == self._new_revision_id:
422
raise AssertionError("Impossible situation, a skipped "
423
"inventory entry (%r) claims to be modified in this "
424
"commit (%r).", (ie, self._new_revision_id))
425
return None, False, None
426
# XXX: Friction: parent_candidates should return a list not a dict
427
# so that we don't have to walk the inventories again.
428
parent_candiate_entries = ie.parent_candidates(parent_invs)
429
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
431
for inv in parent_invs:
432
if ie.file_id in inv:
433
old_rev = inv[ie.file_id].revision
434
if old_rev in head_set:
435
heads.append(inv[ie.file_id].revision)
436
head_set.remove(inv[ie.file_id].revision)
439
# now we check to see if we need to write a new record to the
441
# We write a new entry unless there is one head to the ancestors, and
442
# the kind-derived content is unchanged.
444
# Cheapest check first: no ancestors, or more the one head in the
445
# ancestors, we write a new node.
449
# There is a single head, look it up for comparison
450
parent_entry = parent_candiate_entries[heads[0]]
451
# if the non-content specific data has changed, we'll be writing a
453
if (parent_entry.parent_id != ie.parent_id or
454
parent_entry.name != ie.name):
456
# now we need to do content specific checks:
458
# if the kind changed the content obviously has
459
if kind != parent_entry.kind:
461
# Stat cache fingerprint feedback for the caller - None as we usually
462
# don't generate one.
465
if content_summary[2] is None:
466
raise ValueError("Files must not have executable = None")
468
if (# if the file length changed we have to store:
469
parent_entry.text_size != content_summary[1] or
470
# if the exec bit has changed we have to store:
471
parent_entry.executable != content_summary[2]):
473
elif parent_entry.text_sha1 == content_summary[3]:
474
# all meta and content is unchanged (using a hash cache
475
# hit to check the sha)
476
ie.revision = parent_entry.revision
477
ie.text_size = parent_entry.text_size
478
ie.text_sha1 = parent_entry.text_sha1
479
ie.executable = parent_entry.executable
480
return self._get_delta(ie, basis_inv, path), False, None
482
# Either there is only a hash change(no hash cache entry,
483
# or same size content change), or there is no change on
485
# Provide the parent's hash to the store layer, so that the
486
# content is unchanged we will not store a new node.
487
nostore_sha = parent_entry.text_sha1
489
# We want to record a new node regardless of the presence or
490
# absence of a content change in the file.
492
ie.executable = content_summary[2]
493
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
495
text = file_obj.read()
499
ie.text_sha1, ie.text_size = self._add_text_to_weave(
500
ie.file_id, text, heads, nostore_sha)
501
# Let the caller know we generated a stat fingerprint.
502
fingerprint = (ie.text_sha1, stat_value)
503
except errors.ExistingContent:
504
# Turns out that the file content was unchanged, and we were
505
# only going to store a new node if it was changed. Carry over
507
ie.revision = parent_entry.revision
508
ie.text_size = parent_entry.text_size
509
ie.text_sha1 = parent_entry.text_sha1
510
ie.executable = parent_entry.executable
511
return self._get_delta(ie, basis_inv, path), False, None
512
elif kind == 'directory':
514
# all data is meta here, nothing specific to directory, so
516
ie.revision = parent_entry.revision
517
return self._get_delta(ie, basis_inv, path), False, None
518
self._add_text_to_weave(ie.file_id, '', heads, None)
519
elif kind == 'symlink':
520
current_link_target = content_summary[3]
522
# symlink target is not generic metadata, check if it has
524
if current_link_target != parent_entry.symlink_target:
527
# unchanged, carry over.
528
ie.revision = parent_entry.revision
529
ie.symlink_target = parent_entry.symlink_target
530
return self._get_delta(ie, basis_inv, path), False, None
531
ie.symlink_target = current_link_target
532
self._add_text_to_weave(ie.file_id, '', heads, None)
533
elif kind == 'tree-reference':
535
if content_summary[3] != parent_entry.reference_revision:
538
# unchanged, carry over.
539
ie.reference_revision = parent_entry.reference_revision
540
ie.revision = parent_entry.revision
541
return self._get_delta(ie, basis_inv, path), False, None
542
ie.reference_revision = content_summary[3]
543
self._add_text_to_weave(ie.file_id, '', heads, None)
545
raise NotImplementedError('unknown kind')
546
ie.revision = self._new_revision_id
547
self._any_changes = True
548
return self._get_delta(ie, basis_inv, path), True, fingerprint
550
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
551
_entry_factory=entry_factory):
552
"""Record a new tree via iter_changes.
554
:param tree: The tree to obtain text contents from for changed objects.
555
:param basis_revision_id: The revision id of the tree the iter_changes
556
has been generated against. Currently assumed to be the same
557
as self.parents[0] - if it is not, errors may occur.
558
:param iter_changes: An iter_changes iterator with the changes to apply
559
to basis_revision_id. The iterator must not include any items with
560
a current kind of None - missing items must be either filtered out
561
or errored-on beefore record_iter_changes sees the item.
562
:param _entry_factory: Private method to bind entry_factory locally for
564
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
567
# Create an inventory delta based on deltas between all the parents and
568
# deltas between all the parent inventories. We use inventory delta's
569
# between the inventory objects because iter_changes masks
570
# last-changed-field only changes.
572
# file_id -> change map, change is fileid, paths, changed, versioneds,
573
# parents, names, kinds, executables
575
# {file_id -> revision_id -> inventory entry, for entries in parent
576
# trees that are not parents[0]
580
revtrees = list(self.repository.revision_trees(self.parents))
581
except errors.NoSuchRevision:
582
# one or more ghosts, slow path.
584
for revision_id in self.parents:
586
revtrees.append(self.repository.revision_tree(revision_id))
587
except errors.NoSuchRevision:
589
basis_revision_id = _mod_revision.NULL_REVISION
591
revtrees.append(self.repository.revision_tree(
592
_mod_revision.NULL_REVISION))
593
# The basis inventory from a repository
595
basis_inv = revtrees[0].inventory
597
basis_inv = self.repository.revision_tree(
598
_mod_revision.NULL_REVISION).inventory
599
if len(self.parents) > 0:
600
if basis_revision_id != self.parents[0] and not ghost_basis:
602
"arbitrary basis parents not yet supported with merges")
603
for revtree in revtrees[1:]:
604
for change in revtree.inventory._make_delta(basis_inv):
605
if change[1] is None:
606
# Not present in this parent.
608
if change[2] not in merged_ids:
609
if change[0] is not None:
610
basis_entry = basis_inv[change[2]]
611
merged_ids[change[2]] = [
613
basis_entry.revision,
616
parent_entries[change[2]] = {
618
basis_entry.revision:basis_entry,
620
change[3].revision:change[3],
623
merged_ids[change[2]] = [change[3].revision]
624
parent_entries[change[2]] = {change[3].revision:change[3]}
626
merged_ids[change[2]].append(change[3].revision)
627
parent_entries[change[2]][change[3].revision] = change[3]
630
# Setup the changes from the tree:
631
# changes maps file_id -> (change, [parent revision_ids])
633
for change in iter_changes:
634
# This probably looks up in basis_inv way to much.
635
if change[1][0] is not None:
636
head_candidate = [basis_inv[change[0]].revision]
639
changes[change[0]] = change, merged_ids.get(change[0],
641
unchanged_merged = set(merged_ids) - set(changes)
642
# Extend the changes dict with synthetic changes to record merges of
644
for file_id in unchanged_merged:
645
# Record a merged version of these items that did not change vs the
646
# basis. This can be either identical parallel changes, or a revert
647
# of a specific file after a merge. The recorded content will be
648
# that of the current tree (which is the same as the basis), but
649
# the per-file graph will reflect a merge.
650
# NB:XXX: We are reconstructing path information we had, this
651
# should be preserved instead.
652
# inv delta change: (file_id, (path_in_source, path_in_target),
653
# changed_content, versioned, parent, name, kind,
656
basis_entry = basis_inv[file_id]
657
except errors.NoSuchId:
658
# a change from basis->some_parents but file_id isn't in basis
659
# so was new in the merge, which means it must have changed
660
# from basis -> current, and as it hasn't the add was reverted
661
# by the user. So we discard this change.
665
(basis_inv.id2path(file_id), tree.id2path(file_id)),
667
(basis_entry.parent_id, basis_entry.parent_id),
668
(basis_entry.name, basis_entry.name),
669
(basis_entry.kind, basis_entry.kind),
670
(basis_entry.executable, basis_entry.executable))
671
changes[file_id] = (change, merged_ids[file_id])
672
# changes contains tuples with the change and a set of inventory
673
# candidates for the file.
675
# old_path, new_path, file_id, new_inventory_entry
676
seen_root = False # Is the root in the basis delta?
677
inv_delta = self._basis_delta
678
modified_rev = self._new_revision_id
679
for change, head_candidates in changes.values():
680
if change[3][1]: # versioned in target.
681
# Several things may be happening here:
682
# We may have a fork in the per-file graph
683
# - record a change with the content from tree
684
# We may have a change against < all trees
685
# - carry over the tree that hasn't changed
686
# We may have a change against all trees
687
# - record the change with the content from tree
690
entry = _entry_factory[kind](file_id, change[5][1],
692
head_set = self._heads(change[0], set(head_candidates))
695
for head_candidate in head_candidates:
696
if head_candidate in head_set:
697
heads.append(head_candidate)
698
head_set.remove(head_candidate)
701
# Could be a carry-over situation:
702
parent_entry_revs = parent_entries.get(file_id, None)
703
if parent_entry_revs:
704
parent_entry = parent_entry_revs.get(heads[0], None)
707
if parent_entry is None:
708
# The parent iter_changes was called against is the one
709
# that is the per-file head, so any change is relevant
710
# iter_changes is valid.
711
carry_over_possible = False
713
# could be a carry over situation
714
# A change against the basis may just indicate a merge,
715
# we need to check the content against the source of the
716
# merge to determine if it was changed after the merge
718
if (parent_entry.kind != entry.kind or
719
parent_entry.parent_id != entry.parent_id or
720
parent_entry.name != entry.name):
721
# Metadata common to all entries has changed
722
# against per-file parent
723
carry_over_possible = False
725
carry_over_possible = True
726
# per-type checks for changes against the parent_entry
729
# Cannot be a carry-over situation
730
carry_over_possible = False
731
# Populate the entry in the delta
733
# XXX: There is still a small race here: If someone reverts the content of a file
734
# after iter_changes examines and decides it has changed,
735
# we will unconditionally record a new version even if some
736
# other process reverts it while commit is running (with
737
# the revert happening after iter_changes did it's
740
entry.executable = True
742
entry.executable = False
743
if (carry_over_possible and
744
parent_entry.executable == entry.executable):
745
# Check the file length, content hash after reading
747
nostore_sha = parent_entry.text_sha1
750
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
752
text = file_obj.read()
756
entry.text_sha1, entry.text_size = self._add_text_to_weave(
757
file_id, text, heads, nostore_sha)
758
yield file_id, change[1][1], (entry.text_sha1, stat_value)
759
except errors.ExistingContent:
760
# No content change against a carry_over parent
761
# Perhaps this should also yield a fs hash update?
763
entry.text_size = parent_entry.text_size
764
entry.text_sha1 = parent_entry.text_sha1
765
elif kind == 'symlink':
767
entry.symlink_target = tree.get_symlink_target(file_id)
768
if (carry_over_possible and
769
parent_entry.symlink_target == entry.symlink_target):
772
self._add_text_to_weave(change[0], '', heads, None)
773
elif kind == 'directory':
774
if carry_over_possible:
777
# Nothing to set on the entry.
778
# XXX: split into the Root and nonRoot versions.
779
if change[1][1] != '' or self.repository.supports_rich_root():
780
self._add_text_to_weave(change[0], '', heads, None)
781
elif kind == 'tree-reference':
782
if not self.repository._format.supports_tree_reference:
783
# This isn't quite sane as an error, but we shouldn't
784
# ever see this code path in practice: tree's don't
785
# permit references when the repo doesn't support tree
787
raise errors.UnsupportedOperation(tree.add_reference,
789
reference_revision = tree.get_reference_revision(change[0])
790
entry.reference_revision = reference_revision
791
if (carry_over_possible and
792
parent_entry.reference_revision == reference_revision):
795
self._add_text_to_weave(change[0], '', heads, None)
797
raise AssertionError('unknown kind %r' % kind)
799
entry.revision = modified_rev
801
entry.revision = parent_entry.revision
804
new_path = change[1][1]
805
inv_delta.append((change[1][0], new_path, change[0], entry))
808
self.new_inventory = None
810
self._any_changes = True
812
# housekeeping root entry changes do not affect no-change commits.
813
self._require_root_change(tree)
814
self.basis_delta_revision = basis_revision_id
816
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
817
parent_keys = tuple([(file_id, parent) for parent in parents])
818
return self.repository.texts._add_text(
819
(file_id, self._new_revision_id), parent_keys, new_text,
820
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
823
class RootCommitBuilder(CommitBuilder):
824
"""This commitbuilder actually records the root id"""
826
# the root entry gets versioned properly by this builder.
827
_versioned_root = True
829
def _check_root(self, ie, parent_invs, tree):
830
"""Helper for record_entry_contents.
832
:param ie: An entry being added.
833
:param parent_invs: The inventories of the parent revisions of the
835
:param tree: The tree that is being committed.
838
def _require_root_change(self, tree):
839
"""Enforce an appropriate root object change.
841
This is called once when record_iter_changes is called, if and only if
842
the root was not in the delta calculated by record_iter_changes.
844
:param tree: The tree which is being committed.
846
# versioned roots do not change unless the tree found a change.
849
######################################################################
853
class Repository(object):
854
"""Repository holding history for one or more branches.
856
The repository holds and retrieves historical information including
857
revisions and file history. It's normally accessed only by the Branch,
858
which views a particular line of development through that history.
860
The Repository builds on top of some byte storage facilies (the revisions,
861
signatures, inventories, texts and chk_bytes attributes) and a Transport,
862
which respectively provide byte storage and a means to access the (possibly
865
The byte storage facilities are addressed via tuples, which we refer to
866
as 'keys' throughout the code base. Revision_keys, inventory_keys and
867
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
868
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
869
byte string made up of a hash identifier and a hash value.
870
We use this interface because it allows low friction with the underlying
871
code that implements disk indices, network encoding and other parts of
874
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
875
the serialised revisions for the repository. This can be used to obtain
876
revision graph information or to access raw serialised revisions.
877
The result of trying to insert data into the repository via this store
878
is undefined: it should be considered read-only except for implementors
880
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
881
the serialised signatures for the repository. This can be used to
882
obtain access to raw serialised signatures. The result of trying to
883
insert data into the repository via this store is undefined: it should
884
be considered read-only except for implementors of repositories.
885
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
886
the serialised inventories for the repository. This can be used to
887
obtain unserialised inventories. The result of trying to insert data
888
into the repository via this store is undefined: it should be
889
considered read-only except for implementors of repositories.
890
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
891
texts of files and directories for the repository. This can be used to
892
obtain file texts or file graphs. Note that Repository.iter_file_bytes
893
is usually a better interface for accessing file texts.
894
The result of trying to insert data into the repository via this store
895
is undefined: it should be considered read-only except for implementors
897
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
898
any data the repository chooses to store or have indexed by its hash.
899
The result of trying to insert data into the repository via this store
900
is undefined: it should be considered read-only except for implementors
902
:ivar _transport: Transport for file access to repository, typically
903
pointing to .bzr/repository.
906
# What class to use for a CommitBuilder. Often its simpler to change this
907
# in a Repository class subclass rather than to override
908
# get_commit_builder.
909
_commit_builder_class = CommitBuilder
910
# The search regex used by xml based repositories to determine what things
911
# where changed in a single commit.
912
_file_ids_altered_regex = lazy_regex.lazy_compile(
913
r'file_id="(?P<file_id>[^"]+)"'
914
r'.* revision="(?P<revision_id>[^"]+)"'
917
def abort_write_group(self, suppress_errors=False):
918
"""Commit the contents accrued within the current write group.
920
:param suppress_errors: if true, abort_write_group will catch and log
921
unexpected errors that happen during the abort, rather than
922
allowing them to propagate. Defaults to False.
924
:seealso: start_write_group.
926
if self._write_group is not self.get_transaction():
927
# has an unlock or relock occured ?
930
'(suppressed) mismatched lock context and write group. %r, %r',
931
self._write_group, self.get_transaction())
933
raise errors.BzrError(
934
'mismatched lock context and write group. %r, %r' %
935
(self._write_group, self.get_transaction()))
937
self._abort_write_group()
938
except Exception, exc:
939
self._write_group = None
940
if not suppress_errors:
942
mutter('abort_write_group failed')
943
log_exception_quietly()
944
note('bzr: ERROR (ignored): %s', exc)
945
self._write_group = None
947
def _abort_write_group(self):
948
"""Template method for per-repository write group cleanup.
950
This is called during abort before the write group is considered to be
951
finished and should cleanup any internal state accrued during the write
952
group. There is no requirement that data handed to the repository be
953
*not* made available - this is not a rollback - but neither should any
954
attempt be made to ensure that data added is fully commited. Abort is
955
invoked when an error has occured so futher disk or network operations
956
may not be possible or may error and if possible should not be
960
def add_fallback_repository(self, repository):
961
"""Add a repository to use for looking up data not held locally.
963
:param repository: A repository.
965
if not self._format.supports_external_lookups:
966
raise errors.UnstackableRepositoryFormat(self._format, self.base)
968
# This repository will call fallback.unlock() when we transition to
969
# the unlocked state, so we make sure to increment the lock count
970
repository.lock_read()
971
self._check_fallback_repository(repository)
972
self._fallback_repositories.append(repository)
973
self.texts.add_fallback_versioned_files(repository.texts)
974
self.inventories.add_fallback_versioned_files(repository.inventories)
975
self.revisions.add_fallback_versioned_files(repository.revisions)
976
self.signatures.add_fallback_versioned_files(repository.signatures)
977
if self.chk_bytes is not None:
978
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
980
def _check_fallback_repository(self, repository):
981
"""Check that this repository can fallback to repository safely.
983
Raise an error if not.
985
:param repository: A repository to fallback to.
987
return InterRepository._assert_same_model(self, repository)
989
def add_inventory(self, revision_id, inv, parents):
990
"""Add the inventory inv to the repository as revision_id.
992
:param parents: The revision ids of the parents that revision_id
993
is known to have and are in the repository already.
995
:returns: The validator(which is a sha1 digest, though what is sha'd is
996
repository format specific) of the serialized inventory.
998
if not self.is_in_write_group():
999
raise AssertionError("%r not in write group" % (self,))
1000
_mod_revision.check_not_reserved_id(revision_id)
1001
if not (inv.revision_id is None or inv.revision_id == revision_id):
1002
raise AssertionError(
1003
"Mismatch between inventory revision"
1004
" id and insertion revid (%r, %r)"
1005
% (inv.revision_id, revision_id))
1006
if inv.root is None:
1007
raise AssertionError()
1008
return self._add_inventory_checked(revision_id, inv, parents)
1010
def _add_inventory_checked(self, revision_id, inv, parents):
1011
"""Add inv to the repository after checking the inputs.
1013
This function can be overridden to allow different inventory styles.
1015
:seealso: add_inventory, for the contract.
1017
inv_lines = self._serialise_inventory_to_lines(inv)
1018
return self._inventory_add_lines(revision_id, parents,
1019
inv_lines, check_content=False)
1021
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1022
parents, basis_inv=None, propagate_caches=False):
1023
"""Add a new inventory expressed as a delta against another revision.
1025
See the inventory developers documentation for the theory behind
1028
:param basis_revision_id: The inventory id the delta was created
1029
against. (This does not have to be a direct parent.)
1030
:param delta: The inventory delta (see Inventory.apply_delta for
1032
:param new_revision_id: The revision id that the inventory is being
1034
:param parents: The revision ids of the parents that revision_id is
1035
known to have and are in the repository already. These are supplied
1036
for repositories that depend on the inventory graph for revision
1037
graph access, as well as for those that pun ancestry with delta
1039
:param basis_inv: The basis inventory if it is already known,
1041
:param propagate_caches: If True, the caches for this inventory are
1042
copied to and updated for the result if possible.
1044
:returns: (validator, new_inv)
1045
The validator(which is a sha1 digest, though what is sha'd is
1046
repository format specific) of the serialized inventory, and the
1047
resulting inventory.
1049
if not self.is_in_write_group():
1050
raise AssertionError("%r not in write group" % (self,))
1051
_mod_revision.check_not_reserved_id(new_revision_id)
1052
basis_tree = self.revision_tree(basis_revision_id)
1053
basis_tree.lock_read()
1055
# Note that this mutates the inventory of basis_tree, which not all
1056
# inventory implementations may support: A better idiom would be to
1057
# return a new inventory, but as there is no revision tree cache in
1058
# repository this is safe for now - RBC 20081013
1059
if basis_inv is None:
1060
basis_inv = basis_tree.inventory
1061
basis_inv.apply_delta(delta)
1062
basis_inv.revision_id = new_revision_id
1063
return (self.add_inventory(new_revision_id, basis_inv, parents),
1068
def _inventory_add_lines(self, revision_id, parents, lines,
1069
check_content=True):
1070
"""Store lines in inv_vf and return the sha1 of the inventory."""
1071
parents = [(parent,) for parent in parents]
1072
return self.inventories.add_lines((revision_id,), parents, lines,
1073
check_content=check_content)[0]
1075
def add_revision(self, revision_id, rev, inv=None, config=None):
1076
"""Add rev to the revision store as revision_id.
1078
:param revision_id: the revision id to use.
1079
:param rev: The revision object.
1080
:param inv: The inventory for the revision. if None, it will be looked
1081
up in the inventory storer
1082
:param config: If None no digital signature will be created.
1083
If supplied its signature_needed method will be used
1084
to determine if a signature should be made.
1086
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1088
_mod_revision.check_not_reserved_id(revision_id)
1089
if config is not None and config.signature_needed():
1091
inv = self.get_inventory(revision_id)
1092
plaintext = Testament(rev, inv).as_short_text()
1093
self.store_revision_signature(
1094
gpg.GPGStrategy(config), plaintext, revision_id)
1095
# check inventory present
1096
if not self.inventories.get_parent_map([(revision_id,)]):
1098
raise errors.WeaveRevisionNotPresent(revision_id,
1101
# yes, this is not suitable for adding with ghosts.
1102
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1105
key = (revision_id,)
1106
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1107
self._add_revision(rev)
1109
def _add_revision(self, revision):
1110
text = self._serializer.write_revision_to_string(revision)
1111
key = (revision.revision_id,)
1112
parents = tuple((parent,) for parent in revision.parent_ids)
1113
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1115
def all_revision_ids(self):
1116
"""Returns a list of all the revision ids in the repository.
1118
This is conceptually deprecated because code should generally work on
1119
the graph reachable from a particular revision, and ignore any other
1120
revisions that might be present. There is no direct replacement
1123
if 'evil' in debug.debug_flags:
1124
mutter_callsite(2, "all_revision_ids is linear with history.")
1125
return self._all_revision_ids()
1127
def _all_revision_ids(self):
1128
"""Returns a list of all the revision ids in the repository.
1130
These are in as much topological order as the underlying store can
1133
raise NotImplementedError(self._all_revision_ids)
1135
def break_lock(self):
1136
"""Break a lock if one is present from another instance.
1138
Uses the ui factory to ask for confirmation if the lock may be from
1141
self.control_files.break_lock()
1144
def _eliminate_revisions_not_present(self, revision_ids):
1145
"""Check every revision id in revision_ids to see if we have it.
1147
Returns a set of the present revisions.
1150
graph = self.get_graph()
1151
parent_map = graph.get_parent_map(revision_ids)
1152
# The old API returned a list, should this actually be a set?
1153
return parent_map.keys()
1156
def create(a_bzrdir):
1157
"""Construct the current default format repository in a_bzrdir."""
1158
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1160
def __init__(self, _format, a_bzrdir, control_files):
1161
"""instantiate a Repository.
1163
:param _format: The format of the repository on disk.
1164
:param a_bzrdir: The BzrDir of the repository.
1166
In the future we will have a single api for all stores for
1167
getting file texts, inventories and revisions, then
1168
this construct will accept instances of those things.
1170
super(Repository, self).__init__()
1171
self._format = _format
1172
# the following are part of the public API for Repository:
1173
self.bzrdir = a_bzrdir
1174
self.control_files = control_files
1175
self._transport = control_files._transport
1176
self.base = self._transport.base
1178
self._reconcile_does_inventory_gc = True
1179
self._reconcile_fixes_text_parents = False
1180
self._reconcile_backsup_inventory = True
1181
# not right yet - should be more semantically clear ?
1183
# TODO: make sure to construct the right store classes, etc, depending
1184
# on whether escaping is required.
1185
self._warn_if_deprecated()
1186
self._write_group = None
1187
# Additional places to query for data.
1188
self._fallback_repositories = []
1189
# An InventoryEntry cache, used during deserialization
1190
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1193
if self._fallback_repositories:
1194
return '%s(%r, fallback_repositories=%r)' % (
1195
self.__class__.__name__,
1197
self._fallback_repositories)
1199
return '%s(%r)' % (self.__class__.__name__,
1202
def _has_same_fallbacks(self, other_repo):
1203
"""Returns true if the repositories have the same fallbacks."""
1204
my_fb = self._fallback_repositories
1205
other_fb = other_repo._fallback_repositories
1206
if len(my_fb) != len(other_fb):
1208
for f, g in zip(my_fb, other_fb):
1209
if not f.has_same_location(g):
1213
def has_same_location(self, other):
1214
"""Returns a boolean indicating if this repository is at the same
1215
location as another repository.
1217
This might return False even when two repository objects are accessing
1218
the same physical repository via different URLs.
1220
if self.__class__ is not other.__class__:
1222
return (self._transport.base == other._transport.base)
1224
def is_in_write_group(self):
1225
"""Return True if there is an open write group.
1227
:seealso: start_write_group.
1229
return self._write_group is not None
1231
def is_locked(self):
1232
return self.control_files.is_locked()
1234
def is_write_locked(self):
1235
"""Return True if this object is write locked."""
1236
return self.is_locked() and self.control_files._lock_mode == 'w'
1238
def lock_write(self, token=None):
1239
"""Lock this repository for writing.
1241
This causes caching within the repository obejct to start accumlating
1242
data during reads, and allows a 'write_group' to be obtained. Write
1243
groups must be used for actual data insertion.
1245
:param token: if this is already locked, then lock_write will fail
1246
unless the token matches the existing lock.
1247
:returns: a token if this instance supports tokens, otherwise None.
1248
:raises TokenLockingNotSupported: when a token is given but this
1249
instance doesn't support using token locks.
1250
:raises MismatchedToken: if the specified token doesn't match the token
1251
of the existing lock.
1252
:seealso: start_write_group.
1254
A token should be passed in if you know that you have locked the object
1255
some other way, and need to synchronise this object's state with that
1258
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1260
locked = self.is_locked()
1261
result = self.control_files.lock_write(token=token)
1263
for repo in self._fallback_repositories:
1264
# Writes don't affect fallback repos
1266
self._refresh_data()
1269
def lock_read(self):
1270
locked = self.is_locked()
1271
self.control_files.lock_read()
1273
for repo in self._fallback_repositories:
1275
self._refresh_data()
1277
def get_physical_lock_status(self):
1278
return self.control_files.get_physical_lock_status()
1280
def leave_lock_in_place(self):
1281
"""Tell this repository not to release the physical lock when this
1284
If lock_write doesn't return a token, then this method is not supported.
1286
self.control_files.leave_in_place()
1288
def dont_leave_lock_in_place(self):
1289
"""Tell this repository to release the physical lock when this
1290
object is unlocked, even if it didn't originally acquire it.
1292
If lock_write doesn't return a token, then this method is not supported.
1294
self.control_files.dont_leave_in_place()
1297
def gather_stats(self, revid=None, committers=None):
1298
"""Gather statistics from a revision id.
1300
:param revid: The revision id to gather statistics from, if None, then
1301
no revision specific statistics are gathered.
1302
:param committers: Optional parameter controlling whether to grab
1303
a count of committers from the revision specific statistics.
1304
:return: A dictionary of statistics. Currently this contains:
1305
committers: The number of committers if requested.
1306
firstrev: A tuple with timestamp, timezone for the penultimate left
1307
most ancestor of revid, if revid is not the NULL_REVISION.
1308
latestrev: A tuple with timestamp, timezone for revid, if revid is
1309
not the NULL_REVISION.
1310
revisions: The total revision count in the repository.
1311
size: An estimate disk size of the repository in bytes.
1314
if revid and committers:
1315
result['committers'] = 0
1316
if revid and revid != _mod_revision.NULL_REVISION:
1318
all_committers = set()
1319
revisions = self.get_ancestry(revid)
1320
# pop the leading None
1322
first_revision = None
1324
# ignore the revisions in the middle - just grab first and last
1325
revisions = revisions[0], revisions[-1]
1326
for revision in self.get_revisions(revisions):
1327
if not first_revision:
1328
first_revision = revision
1330
all_committers.add(revision.committer)
1331
last_revision = revision
1333
result['committers'] = len(all_committers)
1334
result['firstrev'] = (first_revision.timestamp,
1335
first_revision.timezone)
1336
result['latestrev'] = (last_revision.timestamp,
1337
last_revision.timezone)
1339
# now gather global repository information
1340
# XXX: This is available for many repos regardless of listability.
1341
if self.bzrdir.root_transport.listable():
1342
# XXX: do we want to __define len__() ?
1343
# Maybe the versionedfiles object should provide a different
1344
# method to get the number of keys.
1345
result['revisions'] = len(self.revisions.keys())
1346
# result['size'] = t
1349
def find_branches(self, using=False):
1350
"""Find branches underneath this repository.
1352
This will include branches inside other branches.
1354
:param using: If True, list only branches using this repository.
1356
if using and not self.is_shared():
1358
return [self.bzrdir.open_branch()]
1359
except errors.NotBranchError:
1361
class Evaluator(object):
1364
self.first_call = True
1366
def __call__(self, bzrdir):
1367
# On the first call, the parameter is always the bzrdir
1368
# containing the current repo.
1369
if not self.first_call:
1371
repository = bzrdir.open_repository()
1372
except errors.NoRepositoryPresent:
1375
return False, (None, repository)
1376
self.first_call = False
1378
value = (bzrdir.open_branch(), None)
1379
except errors.NotBranchError:
1380
value = (None, None)
1384
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1385
self.bzrdir.root_transport, evaluate=Evaluator()):
1386
if branch is not None:
1387
branches.append(branch)
1388
if not using and repository is not None:
1389
branches.extend(repository.find_branches())
1393
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1394
"""Return the revision ids that other has that this does not.
1396
These are returned in topological order.
1398
revision_id: only return revision ids included by revision_id.
1400
return InterRepository.get(other, self).search_missing_revision_ids(
1401
revision_id, find_ghosts)
1405
"""Open the repository rooted at base.
1407
For instance, if the repository is at URL/.bzr/repository,
1408
Repository.open(URL) -> a Repository instance.
1410
control = bzrdir.BzrDir.open(base)
1411
return control.open_repository()
1413
def copy_content_into(self, destination, revision_id=None):
1414
"""Make a complete copy of the content in self into destination.
1416
This is a destructive operation! Do not use it on existing
1419
return InterRepository.get(self, destination).copy_content(revision_id)
1421
def commit_write_group(self):
1422
"""Commit the contents accrued within the current write group.
1424
:seealso: start_write_group.
1426
if self._write_group is not self.get_transaction():
1427
# has an unlock or relock occured ?
1428
raise errors.BzrError('mismatched lock context %r and '
1430
(self.get_transaction(), self._write_group))
1431
result = self._commit_write_group()
1432
self._write_group = None
1435
def _commit_write_group(self):
1436
"""Template method for per-repository write group cleanup.
1438
This is called before the write group is considered to be
1439
finished and should ensure that all data handed to the repository
1440
for writing during the write group is safely committed (to the
1441
extent possible considering file system caching etc).
1444
def suspend_write_group(self):
1445
raise errors.UnsuspendableWriteGroup(self)
1447
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1448
"""Return the keys of missing inventory parents for revisions added in
1451
A revision is not complete if the inventory delta for that revision
1452
cannot be calculated. Therefore if the parent inventories of a
1453
revision are not present, the revision is incomplete, and e.g. cannot
1454
be streamed by a smart server. This method finds missing inventory
1455
parents for revisions added in this write group.
1457
if not self._format.supports_external_lookups:
1458
# This is only an issue for stacked repositories
1460
if not self.is_in_write_group():
1461
raise AssertionError('not in a write group')
1463
# XXX: We assume that every added revision already has its
1464
# corresponding inventory, so we only check for parent inventories that
1465
# might be missing, rather than all inventories.
1466
parents = set(self.revisions._index.get_missing_parents())
1467
parents.discard(_mod_revision.NULL_REVISION)
1468
unstacked_inventories = self.inventories._index
1469
present_inventories = unstacked_inventories.get_parent_map(
1470
key[-1:] for key in parents)
1471
parents.difference_update(present_inventories)
1472
if len(parents) == 0:
1473
# No missing parent inventories.
1475
if not check_for_missing_texts:
1476
return set(('inventories', rev_id) for (rev_id,) in parents)
1477
# Ok, now we have a list of missing inventories. But these only matter
1478
# if the inventories that reference them are missing some texts they
1479
# appear to introduce.
1480
# XXX: Texts referenced by all added inventories need to be present,
1481
# but at the moment we're only checking for texts referenced by
1482
# inventories at the graph's edge.
1483
key_deps = self.revisions._index._key_dependencies
1484
key_deps.add_keys(present_inventories)
1485
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1486
file_ids = self.fileids_altered_by_revision_ids(referrers)
1487
missing_texts = set()
1488
for file_id, version_ids in file_ids.iteritems():
1489
missing_texts.update(
1490
(file_id, version_id) for version_id in version_ids)
1491
present_texts = self.texts.get_parent_map(missing_texts)
1492
missing_texts.difference_update(present_texts)
1493
if not missing_texts:
1494
# No texts are missing, so all revisions and their deltas are
1497
# Alternatively the text versions could be returned as the missing
1498
# keys, but this is likely to be less data.
1499
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1502
def refresh_data(self):
1503
"""Re-read any data needed to to synchronise with disk.
1505
This method is intended to be called after another repository instance
1506
(such as one used by a smart server) has inserted data into the
1507
repository. It may not be called during a write group, but may be
1508
called at any other time.
1510
if self.is_in_write_group():
1511
raise errors.InternalBzrError(
1512
"May not refresh_data while in a write group.")
1513
self._refresh_data()
1515
def resume_write_group(self, tokens):
1516
if not self.is_write_locked():
1517
raise errors.NotWriteLocked(self)
1518
if self._write_group:
1519
raise errors.BzrError('already in a write group')
1520
self._resume_write_group(tokens)
1521
# so we can detect unlock/relock - the write group is now entered.
1522
self._write_group = self.get_transaction()
1524
def _resume_write_group(self, tokens):
1525
raise errors.UnsuspendableWriteGroup(self)
1527
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1529
"""Fetch the content required to construct revision_id from source.
1531
If revision_id is None and fetch_spec is None, then all content is
1534
fetch() may not be used when the repository is in a write group -
1535
either finish the current write group before using fetch, or use
1536
fetch before starting the write group.
1538
:param find_ghosts: Find and copy revisions in the source that are
1539
ghosts in the target (and not reachable directly by walking out to
1540
the first-present revision in target from revision_id).
1541
:param revision_id: If specified, all the content needed for this
1542
revision ID will be copied to the target. Fetch will determine for
1543
itself which content needs to be copied.
1544
:param fetch_spec: If specified, a SearchResult or
1545
PendingAncestryResult that describes which revisions to copy. This
1546
allows copying multiple heads at once. Mutually exclusive with
1549
if fetch_spec is not None and revision_id is not None:
1550
raise AssertionError(
1551
"fetch_spec and revision_id are mutually exclusive.")
1552
if self.is_in_write_group():
1553
raise errors.InternalBzrError(
1554
"May not fetch while in a write group.")
1555
# fast path same-url fetch operations
1556
# TODO: lift out to somewhere common with RemoteRepository
1557
# <https://bugs.edge.launchpad.net/bzr/+bug/401646>
1558
if (self.has_same_location(source)
1559
and fetch_spec is None
1560
and self._has_same_fallbacks(source)):
1561
# check that last_revision is in 'from' and then return a
1563
if (revision_id is not None and
1564
not _mod_revision.is_null(revision_id)):
1565
self.get_revision(revision_id)
1567
# if there is no specific appropriate InterRepository, this will get
1568
# the InterRepository base class, which raises an
1569
# IncompatibleRepositories when asked to fetch.
1570
inter = InterRepository.get(source, self)
1571
return inter.fetch(revision_id=revision_id, pb=pb,
1572
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1574
def create_bundle(self, target, base, fileobj, format=None):
1575
return serializer.write_bundle(self, target, base, fileobj, format)
1577
def get_commit_builder(self, branch, parents, config, timestamp=None,
1578
timezone=None, committer=None, revprops=None,
1580
"""Obtain a CommitBuilder for this repository.
1582
:param branch: Branch to commit to.
1583
:param parents: Revision ids of the parents of the new revision.
1584
:param config: Configuration to use.
1585
:param timestamp: Optional timestamp recorded for commit.
1586
:param timezone: Optional timezone for timestamp.
1587
:param committer: Optional committer to set for commit.
1588
:param revprops: Optional dictionary of revision properties.
1589
:param revision_id: Optional revision id.
1591
result = self._commit_builder_class(self, parents, config,
1592
timestamp, timezone, committer, revprops, revision_id)
1593
self.start_write_group()
1597
if (self.control_files._lock_count == 1 and
1598
self.control_files._lock_mode == 'w'):
1599
if self._write_group is not None:
1600
self.abort_write_group()
1601
self.control_files.unlock()
1602
raise errors.BzrError(
1603
'Must end write groups before releasing write locks.')
1604
self.control_files.unlock()
1605
if self.control_files._lock_count == 0:
1606
self._inventory_entry_cache.clear()
1607
for repo in self._fallback_repositories:
1611
def clone(self, a_bzrdir, revision_id=None):
1612
"""Clone this repository into a_bzrdir using the current format.
1614
Currently no check is made that the format of this repository and
1615
the bzrdir format are compatible. FIXME RBC 20060201.
1617
:return: The newly created destination repository.
1619
# TODO: deprecate after 0.16; cloning this with all its settings is
1620
# probably not very useful -- mbp 20070423
1621
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1622
self.copy_content_into(dest_repo, revision_id)
1625
def start_write_group(self):
1626
"""Start a write group in the repository.
1628
Write groups are used by repositories which do not have a 1:1 mapping
1629
between file ids and backend store to manage the insertion of data from
1630
both fetch and commit operations.
1632
A write lock is required around the start_write_group/commit_write_group
1633
for the support of lock-requiring repository formats.
1635
One can only insert data into a repository inside a write group.
1639
if not self.is_write_locked():
1640
raise errors.NotWriteLocked(self)
1641
if self._write_group:
1642
raise errors.BzrError('already in a write group')
1643
self._start_write_group()
1644
# so we can detect unlock/relock - the write group is now entered.
1645
self._write_group = self.get_transaction()
1647
def _start_write_group(self):
1648
"""Template method for per-repository write group startup.
1650
This is called before the write group is considered to be
1655
def sprout(self, to_bzrdir, revision_id=None):
1656
"""Create a descendent repository for new development.
1658
Unlike clone, this does not copy the settings of the repository.
1660
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1661
dest_repo.fetch(self, revision_id=revision_id)
1664
def _create_sprouting_repo(self, a_bzrdir, shared):
1665
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1666
# use target default format.
1667
dest_repo = a_bzrdir.create_repository()
1669
# Most control formats need the repository to be specifically
1670
# created, but on some old all-in-one formats it's not needed
1672
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1673
except errors.UninitializableFormat:
1674
dest_repo = a_bzrdir.open_repository()
1677
def _get_sink(self):
1678
"""Return a sink for streaming into this repository."""
1679
return StreamSink(self)
1681
def _get_source(self, to_format):
1682
"""Return a source for streaming from this repository."""
1683
return StreamSource(self, to_format)
1686
def has_revision(self, revision_id):
1687
"""True if this repository has a copy of the revision."""
1688
return revision_id in self.has_revisions((revision_id,))
1691
def has_revisions(self, revision_ids):
1692
"""Probe to find out the presence of multiple revisions.
1694
:param revision_ids: An iterable of revision_ids.
1695
:return: A set of the revision_ids that were present.
1697
parent_map = self.revisions.get_parent_map(
1698
[(rev_id,) for rev_id in revision_ids])
1700
if _mod_revision.NULL_REVISION in revision_ids:
1701
result.add(_mod_revision.NULL_REVISION)
1702
result.update([key[0] for key in parent_map])
1706
def get_revision(self, revision_id):
1707
"""Return the Revision object for a named revision."""
1708
return self.get_revisions([revision_id])[0]
1711
def get_revision_reconcile(self, revision_id):
1712
"""'reconcile' helper routine that allows access to a revision always.
1714
This variant of get_revision does not cross check the weave graph
1715
against the revision one as get_revision does: but it should only
1716
be used by reconcile, or reconcile-alike commands that are correcting
1717
or testing the revision graph.
1719
return self._get_revisions([revision_id])[0]
1722
def get_revisions(self, revision_ids):
1723
"""Get many revisions at once."""
1724
return self._get_revisions(revision_ids)
1727
def _get_revisions(self, revision_ids):
1728
"""Core work logic to get many revisions without sanity checks."""
1729
for rev_id in revision_ids:
1730
if not rev_id or not isinstance(rev_id, basestring):
1731
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1732
keys = [(key,) for key in revision_ids]
1733
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1735
for record in stream:
1736
if record.storage_kind == 'absent':
1737
raise errors.NoSuchRevision(self, record.key[0])
1738
text = record.get_bytes_as('fulltext')
1739
rev = self._serializer.read_revision_from_string(text)
1740
revs[record.key[0]] = rev
1741
return [revs[revid] for revid in revision_ids]
1744
def get_revision_xml(self, revision_id):
1745
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1746
# would have already do it.
1747
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1748
# TODO: this can't just be replaced by:
1749
# return self._serializer.write_revision_to_string(
1750
# self.get_revision(revision_id))
1751
# as cStringIO preservers the encoding unlike write_revision_to_string
1752
# or some other call down the path.
1753
rev = self.get_revision(revision_id)
1754
rev_tmp = cStringIO.StringIO()
1755
# the current serializer..
1756
self._serializer.write_revision(rev, rev_tmp)
1758
return rev_tmp.getvalue()
1760
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1761
"""Produce a generator of revision deltas.
1763
Note that the input is a sequence of REVISIONS, not revision_ids.
1764
Trees will be held in memory until the generator exits.
1765
Each delta is relative to the revision's lefthand predecessor.
1767
:param specific_fileids: if not None, the result is filtered
1768
so that only those file-ids, their parents and their
1769
children are included.
1771
# Get the revision-ids of interest
1772
required_trees = set()
1773
for revision in revisions:
1774
required_trees.add(revision.revision_id)
1775
required_trees.update(revision.parent_ids[:1])
1777
# Get the matching filtered trees. Note that it's more
1778
# efficient to pass filtered trees to changes_from() rather
1779
# than doing the filtering afterwards. changes_from() could
1780
# arguably do the filtering itself but it's path-based, not
1781
# file-id based, so filtering before or afterwards is
1783
if specific_fileids is None:
1784
trees = dict((t.get_revision_id(), t) for
1785
t in self.revision_trees(required_trees))
1787
trees = dict((t.get_revision_id(), t) for
1788
t in self._filtered_revision_trees(required_trees,
1791
# Calculate the deltas
1792
for revision in revisions:
1793
if not revision.parent_ids:
1794
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1796
old_tree = trees[revision.parent_ids[0]]
1797
yield trees[revision.revision_id].changes_from(old_tree)
1800
def get_revision_delta(self, revision_id, specific_fileids=None):
1801
"""Return the delta for one revision.
1803
The delta is relative to the left-hand predecessor of the
1806
:param specific_fileids: if not None, the result is filtered
1807
so that only those file-ids, their parents and their
1808
children are included.
1810
r = self.get_revision(revision_id)
1811
return list(self.get_deltas_for_revisions([r],
1812
specific_fileids=specific_fileids))[0]
1815
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1816
signature = gpg_strategy.sign(plaintext)
1817
self.add_signature_text(revision_id, signature)
1820
def add_signature_text(self, revision_id, signature):
1821
self.signatures.add_lines((revision_id,), (),
1822
osutils.split_lines(signature))
1824
def find_text_key_references(self):
1825
"""Find the text key references within the repository.
1827
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1828
to whether they were referred to by the inventory of the
1829
revision_id that they contain. The inventory texts from all present
1830
revision ids are assessed to generate this report.
1832
revision_keys = self.revisions.keys()
1833
w = self.inventories
1834
pb = ui.ui_factory.nested_progress_bar()
1836
return self._find_text_key_references_from_xml_inventory_lines(
1837
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1841
def _find_text_key_references_from_xml_inventory_lines(self,
1843
"""Core routine for extracting references to texts from inventories.
1845
This performs the translation of xml lines to revision ids.
1847
:param line_iterator: An iterator of lines, origin_version_id
1848
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1849
to whether they were referred to by the inventory of the
1850
revision_id that they contain. Note that if that revision_id was
1851
not part of the line_iterator's output then False will be given -
1852
even though it may actually refer to that key.
1854
if not self._serializer.support_altered_by_hack:
1855
raise AssertionError(
1856
"_find_text_key_references_from_xml_inventory_lines only "
1857
"supported for branches which store inventory as unnested xml"
1858
", not on %r" % self)
1861
# this code needs to read every new line in every inventory for the
1862
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1863
# not present in one of those inventories is unnecessary but not
1864
# harmful because we are filtering by the revision id marker in the
1865
# inventory lines : we only select file ids altered in one of those
1866
# revisions. We don't need to see all lines in the inventory because
1867
# only those added in an inventory in rev X can contain a revision=X
1869
unescape_revid_cache = {}
1870
unescape_fileid_cache = {}
1872
# jam 20061218 In a big fetch, this handles hundreds of thousands
1873
# of lines, so it has had a lot of inlining and optimizing done.
1874
# Sorry that it is a little bit messy.
1875
# Move several functions to be local variables, since this is a long
1877
search = self._file_ids_altered_regex.search
1878
unescape = _unescape_xml
1879
setdefault = result.setdefault
1880
for line, line_key in line_iterator:
1881
match = search(line)
1884
# One call to match.group() returning multiple items is quite a
1885
# bit faster than 2 calls to match.group() each returning 1
1886
file_id, revision_id = match.group('file_id', 'revision_id')
1888
# Inlining the cache lookups helps a lot when you make 170,000
1889
# lines and 350k ids, versus 8.4 unique ids.
1890
# Using a cache helps in 2 ways:
1891
# 1) Avoids unnecessary decoding calls
1892
# 2) Re-uses cached strings, which helps in future set and
1894
# (2) is enough that removing encoding entirely along with
1895
# the cache (so we are using plain strings) results in no
1896
# performance improvement.
1898
revision_id = unescape_revid_cache[revision_id]
1900
unescaped = unescape(revision_id)
1901
unescape_revid_cache[revision_id] = unescaped
1902
revision_id = unescaped
1904
# Note that unconditionally unescaping means that we deserialise
1905
# every fileid, which for general 'pull' is not great, but we don't
1906
# really want to have some many fulltexts that this matters anyway.
1909
file_id = unescape_fileid_cache[file_id]
1911
unescaped = unescape(file_id)
1912
unescape_fileid_cache[file_id] = unescaped
1915
key = (file_id, revision_id)
1916
setdefault(key, False)
1917
if revision_id == line_key[-1]:
1921
def _inventory_xml_lines_for_keys(self, keys):
1922
"""Get a line iterator of the sort needed for findind references.
1924
Not relevant for non-xml inventory repositories.
1926
Ghosts in revision_keys are ignored.
1928
:param revision_keys: The revision keys for the inventories to inspect.
1929
:return: An iterator over (inventory line, revid) for the fulltexts of
1930
all of the xml inventories specified by revision_keys.
1932
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1933
for record in stream:
1934
if record.storage_kind != 'absent':
1935
chunks = record.get_bytes_as('chunked')
1936
revid = record.key[-1]
1937
lines = osutils.chunks_to_lines(chunks)
1941
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1943
"""Helper routine for fileids_altered_by_revision_ids.
1945
This performs the translation of xml lines to revision ids.
1947
:param line_iterator: An iterator of lines, origin_version_id
1948
:param revision_keys: The revision ids to filter for. This should be a
1949
set or other type which supports efficient __contains__ lookups, as
1950
the revision key from each parsed line will be looked up in the
1951
revision_keys filter.
1952
:return: a dictionary mapping altered file-ids to an iterable of
1953
revision_ids. Each altered file-ids has the exact revision_ids that
1954
altered it listed explicitly.
1956
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1957
line_iterator).iterkeys())
1958
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1959
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1960
self._inventory_xml_lines_for_keys(parent_keys)))
1961
new_keys = seen - parent_seen
1963
setdefault = result.setdefault
1964
for key in new_keys:
1965
setdefault(key[0], set()).add(key[-1])
1968
def _find_parent_ids_of_revisions(self, revision_ids):
1969
"""Find all parent ids that are mentioned in the revision graph.
1971
:return: set of revisions that are parents of revision_ids which are
1972
not part of revision_ids themselves
1974
parent_map = self.get_parent_map(revision_ids)
1976
map(parent_ids.update, parent_map.itervalues())
1977
parent_ids.difference_update(revision_ids)
1978
parent_ids.discard(_mod_revision.NULL_REVISION)
1981
def _find_parent_keys_of_revisions(self, revision_keys):
1982
"""Similar to _find_parent_ids_of_revisions, but used with keys.
1984
:param revision_keys: An iterable of revision_keys.
1985
:return: The parents of all revision_keys that are not already in
1988
parent_map = self.revisions.get_parent_map(revision_keys)
1990
map(parent_keys.update, parent_map.itervalues())
1991
parent_keys.difference_update(revision_keys)
1992
parent_keys.discard(_mod_revision.NULL_REVISION)
1995
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1996
"""Find the file ids and versions affected by revisions.
1998
:param revisions: an iterable containing revision ids.
1999
:param _inv_weave: The inventory weave from this repository or None.
2000
If None, the inventory weave will be opened automatically.
2001
:return: a dictionary mapping altered file-ids to an iterable of
2002
revision_ids. Each altered file-ids has the exact revision_ids that
2003
altered it listed explicitly.
2005
selected_keys = set((revid,) for revid in revision_ids)
2006
w = _inv_weave or self.inventories
2007
pb = ui.ui_factory.nested_progress_bar()
2009
return self._find_file_ids_from_xml_inventory_lines(
2010
w.iter_lines_added_or_present_in_keys(
2011
selected_keys, pb=pb),
2016
def iter_files_bytes(self, desired_files):
2017
"""Iterate through file versions.
2019
Files will not necessarily be returned in the order they occur in
2020
desired_files. No specific order is guaranteed.
2022
Yields pairs of identifier, bytes_iterator. identifier is an opaque
2023
value supplied by the caller as part of desired_files. It should
2024
uniquely identify the file version in the caller's context. (Examples:
2025
an index number or a TreeTransform trans_id.)
2027
bytes_iterator is an iterable of bytestrings for the file. The
2028
kind of iterable and length of the bytestrings are unspecified, but for
2029
this implementation, it is a list of bytes produced by
2030
VersionedFile.get_record_stream().
2032
:param desired_files: a list of (file_id, revision_id, identifier)
2036
for file_id, revision_id, callable_data in desired_files:
2037
text_keys[(file_id, revision_id)] = callable_data
2038
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2039
if record.storage_kind == 'absent':
2040
raise errors.RevisionNotPresent(record.key, self)
2041
yield text_keys[record.key], record.get_bytes_as('chunked')
2043
def _generate_text_key_index(self, text_key_references=None,
2045
"""Generate a new text key index for the repository.
2047
This is an expensive function that will take considerable time to run.
2049
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2050
list of parents, also text keys. When a given key has no parents,
2051
the parents list will be [NULL_REVISION].
2053
# All revisions, to find inventory parents.
2054
if ancestors is None:
2055
graph = self.get_graph()
2056
ancestors = graph.get_parent_map(self.all_revision_ids())
2057
if text_key_references is None:
2058
text_key_references = self.find_text_key_references()
2059
pb = ui.ui_factory.nested_progress_bar()
2061
return self._do_generate_text_key_index(ancestors,
2062
text_key_references, pb)
2066
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2067
"""Helper for _generate_text_key_index to avoid deep nesting."""
2068
revision_order = tsort.topo_sort(ancestors)
2069
invalid_keys = set()
2071
for revision_id in revision_order:
2072
revision_keys[revision_id] = set()
2073
text_count = len(text_key_references)
2074
# a cache of the text keys to allow reuse; costs a dict of all the
2075
# keys, but saves a 2-tuple for every child of a given key.
2077
for text_key, valid in text_key_references.iteritems():
2079
invalid_keys.add(text_key)
2081
revision_keys[text_key[1]].add(text_key)
2082
text_key_cache[text_key] = text_key
2083
del text_key_references
2085
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2086
NULL_REVISION = _mod_revision.NULL_REVISION
2087
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2088
# too small for large or very branchy trees. However, for 55K path
2089
# trees, it would be easy to use too much memory trivially. Ideally we
2090
# could gauge this by looking at available real memory etc, but this is
2091
# always a tricky proposition.
2092
inventory_cache = lru_cache.LRUCache(10)
2093
batch_size = 10 # should be ~150MB on a 55K path tree
2094
batch_count = len(revision_order) / batch_size + 1
2096
pb.update("Calculating text parents", processed_texts, text_count)
2097
for offset in xrange(batch_count):
2098
to_query = revision_order[offset * batch_size:(offset + 1) *
2102
for rev_tree in self.revision_trees(to_query):
2103
revision_id = rev_tree.get_revision_id()
2104
parent_ids = ancestors[revision_id]
2105
for text_key in revision_keys[revision_id]:
2106
pb.update("Calculating text parents", processed_texts)
2107
processed_texts += 1
2108
candidate_parents = []
2109
for parent_id in parent_ids:
2110
parent_text_key = (text_key[0], parent_id)
2112
check_parent = parent_text_key not in \
2113
revision_keys[parent_id]
2115
# the parent parent_id is a ghost:
2116
check_parent = False
2117
# truncate the derived graph against this ghost.
2118
parent_text_key = None
2120
# look at the parent commit details inventories to
2121
# determine possible candidates in the per file graph.
2124
inv = inventory_cache[parent_id]
2126
inv = self.revision_tree(parent_id).inventory
2127
inventory_cache[parent_id] = inv
2129
parent_entry = inv[text_key[0]]
2130
except (KeyError, errors.NoSuchId):
2132
if parent_entry is not None:
2134
text_key[0], parent_entry.revision)
2136
parent_text_key = None
2137
if parent_text_key is not None:
2138
candidate_parents.append(
2139
text_key_cache[parent_text_key])
2140
parent_heads = text_graph.heads(candidate_parents)
2141
new_parents = list(parent_heads)
2142
new_parents.sort(key=lambda x:candidate_parents.index(x))
2143
if new_parents == []:
2144
new_parents = [NULL_REVISION]
2145
text_index[text_key] = new_parents
2147
for text_key in invalid_keys:
2148
text_index[text_key] = [NULL_REVISION]
2151
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2152
"""Get an iterable listing the keys of all the data introduced by a set
2155
The keys will be ordered so that the corresponding items can be safely
2156
fetched and inserted in that order.
2158
:returns: An iterable producing tuples of (knit-kind, file-id,
2159
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2160
'revisions'. file-id is None unless knit-kind is 'file'.
2162
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2165
for result in self._find_non_file_keys_to_fetch(revision_ids):
2168
def _find_file_keys_to_fetch(self, revision_ids, pb):
2169
# XXX: it's a bit weird to control the inventory weave caching in this
2170
# generator. Ideally the caching would be done in fetch.py I think. Or
2171
# maybe this generator should explicitly have the contract that it
2172
# should not be iterated until the previously yielded item has been
2174
inv_w = self.inventories
2176
# file ids that changed
2177
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2179
num_file_ids = len(file_ids)
2180
for file_id, altered_versions in file_ids.iteritems():
2182
pb.update("fetch texts", count, num_file_ids)
2184
yield ("file", file_id, altered_versions)
2186
def _find_non_file_keys_to_fetch(self, revision_ids):
2188
yield ("inventory", None, revision_ids)
2191
# XXX: Note ATM no callers actually pay attention to this return
2192
# instead they just use the list of revision ids and ignore
2193
# missing sigs. Consider removing this work entirely
2194
revisions_with_signatures = set(self.signatures.get_parent_map(
2195
[(r,) for r in revision_ids]))
2196
revisions_with_signatures = set(
2197
[r for (r,) in revisions_with_signatures])
2198
revisions_with_signatures.intersection_update(revision_ids)
2199
yield ("signatures", None, revisions_with_signatures)
2202
yield ("revisions", None, revision_ids)
2205
def get_inventory(self, revision_id):
2206
"""Get Inventory object by revision id."""
2207
return self.iter_inventories([revision_id]).next()
2209
def iter_inventories(self, revision_ids, ordering='unordered'):
2210
"""Get many inventories by revision_ids.
2212
This will buffer some or all of the texts used in constructing the
2213
inventories in memory, but will only parse a single inventory at a
2216
:param revision_ids: The expected revision ids of the inventories.
2217
:param ordering: optional ordering, e.g. 'topological'.
2218
:return: An iterator of inventories.
2220
if ((None in revision_ids)
2221
or (_mod_revision.NULL_REVISION in revision_ids)):
2222
raise ValueError('cannot get null revision inventory')
2223
return self._iter_inventories(revision_ids, ordering)
2225
def _iter_inventories(self, revision_ids, ordering):
2226
"""single-document based inventory iteration."""
2227
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2228
for text, revision_id in inv_xmls:
2229
yield self.deserialise_inventory(revision_id, text)
2231
def _iter_inventory_xmls(self, revision_ids, ordering='unordered'):
2232
keys = [(revision_id,) for revision_id in revision_ids]
2235
key_iter = iter(keys)
2236
next_key = key_iter.next()
2237
stream = self.inventories.get_record_stream(keys, ordering, True)
2239
for record in stream:
2240
if record.storage_kind != 'absent':
2241
text_chunks[record.key] = record.get_bytes_as('chunked')
2243
raise errors.NoSuchRevision(self, record.key)
2244
while next_key in text_chunks:
2245
chunks = text_chunks.pop(next_key)
2246
yield ''.join(chunks), next_key[-1]
2248
next_key = key_iter.next()
2249
except StopIteration:
2250
# We still want to fully consume the get_record_stream,
2251
# just in case it is not actually finished at this point
2255
def deserialise_inventory(self, revision_id, xml):
2256
"""Transform the xml into an inventory object.
2258
:param revision_id: The expected revision id of the inventory.
2259
:param xml: A serialised inventory.
2261
result = self._serializer.read_inventory_from_string(xml, revision_id,
2262
entry_cache=self._inventory_entry_cache)
2263
if result.revision_id != revision_id:
2264
raise AssertionError('revision id mismatch %s != %s' % (
2265
result.revision_id, revision_id))
2268
def serialise_inventory(self, inv):
2269
return self._serializer.write_inventory_to_string(inv)
2271
def _serialise_inventory_to_lines(self, inv):
2272
return self._serializer.write_inventory_to_lines(inv)
2274
def get_serializer_format(self):
2275
return self._serializer.format_num
2278
def get_inventory_xml(self, revision_id):
2279
"""Get inventory XML as a file object."""
2280
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2282
text, revision_id = texts.next()
2283
except StopIteration:
2284
raise errors.HistoryMissing(self, 'inventory', revision_id)
2288
def get_inventory_sha1(self, revision_id):
2289
"""Return the sha1 hash of the inventory entry
2291
return self.get_revision(revision_id).inventory_sha1
2293
def get_rev_id_for_revno(self, revno, known_pair):
2294
"""Return the revision id of a revno, given a later (revno, revid)
2295
pair in the same history.
2297
:return: if found (True, revid). If the available history ran out
2298
before reaching the revno, then this returns
2299
(False, (closest_revno, closest_revid)).
2301
known_revno, known_revid = known_pair
2302
partial_history = [known_revid]
2303
distance_from_known = known_revno - revno
2304
if distance_from_known < 0:
2306
'requested revno (%d) is later than given known revno (%d)'
2307
% (revno, known_revno))
2310
self, partial_history, stop_index=distance_from_known)
2311
except errors.RevisionNotPresent, err:
2312
if err.revision_id == known_revid:
2313
# The start revision (known_revid) wasn't found.
2315
# This is a stacked repository with no fallbacks, or a there's a
2316
# left-hand ghost. Either way, even though the revision named in
2317
# the error isn't in this repo, we know it's the next step in this
2318
# left-hand history.
2319
partial_history.append(err.revision_id)
2320
if len(partial_history) <= distance_from_known:
2321
# Didn't find enough history to get a revid for the revno.
2322
earliest_revno = known_revno - len(partial_history) + 1
2323
return (False, (earliest_revno, partial_history[-1]))
2324
if len(partial_history) - 1 > distance_from_known:
2325
raise AssertionError('_iter_for_revno returned too much history')
2326
return (True, partial_history[-1])
2328
def iter_reverse_revision_history(self, revision_id):
2329
"""Iterate backwards through revision ids in the lefthand history
2331
:param revision_id: The revision id to start with. All its lefthand
2332
ancestors will be traversed.
2334
graph = self.get_graph()
2335
next_id = revision_id
2337
if next_id in (None, _mod_revision.NULL_REVISION):
2340
parents = graph.get_parent_map([next_id])[next_id]
2342
raise errors.RevisionNotPresent(next_id, self)
2344
if len(parents) == 0:
2347
next_id = parents[0]
2350
def get_revision_inventory(self, revision_id):
2351
"""Return inventory of a past revision."""
2352
# TODO: Unify this with get_inventory()
2353
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2354
# must be the same as its revision, so this is trivial.
2355
if revision_id is None:
2356
# This does not make sense: if there is no revision,
2357
# then it is the current tree inventory surely ?!
2358
# and thus get_root_id() is something that looks at the last
2359
# commit on the branch, and the get_root_id is an inventory check.
2360
raise NotImplementedError
2361
# return Inventory(self.get_root_id())
2363
return self.get_inventory(revision_id)
2365
def is_shared(self):
2366
"""Return True if this repository is flagged as a shared repository."""
2367
raise NotImplementedError(self.is_shared)
2370
def reconcile(self, other=None, thorough=False):
2371
"""Reconcile this repository."""
2372
from bzrlib.reconcile import RepoReconciler
2373
reconciler = RepoReconciler(self, thorough=thorough)
2374
reconciler.reconcile()
2377
def _refresh_data(self):
2378
"""Helper called from lock_* to ensure coherency with disk.
2380
The default implementation does nothing; it is however possible
2381
for repositories to maintain loaded indices across multiple locks
2382
by checking inside their implementation of this method to see
2383
whether their indices are still valid. This depends of course on
2384
the disk format being validatable in this manner. This method is
2385
also called by the refresh_data() public interface to cause a refresh
2386
to occur while in a write lock so that data inserted by a smart server
2387
push operation is visible on the client's instance of the physical
2392
def revision_tree(self, revision_id):
2393
"""Return Tree for a revision on this branch.
2395
`revision_id` may be NULL_REVISION for the empty tree revision.
2397
revision_id = _mod_revision.ensure_null(revision_id)
2398
# TODO: refactor this to use an existing revision object
2399
# so we don't need to read it in twice.
2400
if revision_id == _mod_revision.NULL_REVISION:
2401
return RevisionTree(self, Inventory(root_id=None),
2402
_mod_revision.NULL_REVISION)
2404
inv = self.get_revision_inventory(revision_id)
2405
return RevisionTree(self, inv, revision_id)
2407
def revision_trees(self, revision_ids):
2408
"""Return Trees for revisions in this repository.
2410
:param revision_ids: a sequence of revision-ids;
2411
a revision-id may not be None or 'null:'
2413
inventories = self.iter_inventories(revision_ids)
2414
for inv in inventories:
2415
yield RevisionTree(self, inv, inv.revision_id)
2417
def _filtered_revision_trees(self, revision_ids, file_ids):
2418
"""Return Tree for a revision on this branch with only some files.
2420
:param revision_ids: a sequence of revision-ids;
2421
a revision-id may not be None or 'null:'
2422
:param file_ids: if not None, the result is filtered
2423
so that only those file-ids, their parents and their
2424
children are included.
2426
inventories = self.iter_inventories(revision_ids)
2427
for inv in inventories:
2428
# Should we introduce a FilteredRevisionTree class rather
2429
# than pre-filter the inventory here?
2430
filtered_inv = inv.filter(file_ids)
2431
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2434
def get_ancestry(self, revision_id, topo_sorted=True):
2435
"""Return a list of revision-ids integrated by a revision.
2437
The first element of the list is always None, indicating the origin
2438
revision. This might change when we have history horizons, or
2439
perhaps we should have a new API.
2441
This is topologically sorted.
2443
if _mod_revision.is_null(revision_id):
2445
if not self.has_revision(revision_id):
2446
raise errors.NoSuchRevision(self, revision_id)
2447
graph = self.get_graph()
2449
search = graph._make_breadth_first_searcher([revision_id])
2452
found, ghosts = search.next_with_ghosts()
2453
except StopIteration:
2456
if _mod_revision.NULL_REVISION in keys:
2457
keys.remove(_mod_revision.NULL_REVISION)
2459
parent_map = graph.get_parent_map(keys)
2460
keys = tsort.topo_sort(parent_map)
2461
return [None] + list(keys)
2463
def pack(self, hint=None):
2464
"""Compress the data within the repository.
2466
This operation only makes sense for some repository types. For other
2467
types it should be a no-op that just returns.
2469
This stub method does not require a lock, but subclasses should use
2470
@needs_write_lock as this is a long running call its reasonable to
2471
implicitly lock for the user.
2473
:param hint: If not supplied, the whole repository is packed.
2474
If supplied, the repository may use the hint parameter as a
2475
hint for the parts of the repository to pack. A hint can be
2476
obtained from the result of commit_write_group(). Out of
2477
date hints are simply ignored, because concurrent operations
2478
can obsolete them rapidly.
2481
def get_transaction(self):
2482
return self.control_files.get_transaction()
2484
def get_parent_map(self, revision_ids):
2485
"""See graph.StackedParentsProvider.get_parent_map"""
2486
# revisions index works in keys; this just works in revisions
2487
# therefore wrap and unwrap
2490
for revision_id in revision_ids:
2491
if revision_id == _mod_revision.NULL_REVISION:
2492
result[revision_id] = ()
2493
elif revision_id is None:
2494
raise ValueError('get_parent_map(None) is not valid')
2496
query_keys.append((revision_id ,))
2497
for ((revision_id,), parent_keys) in \
2498
self.revisions.get_parent_map(query_keys).iteritems():
2500
result[revision_id] = tuple(parent_revid
2501
for (parent_revid,) in parent_keys)
2503
result[revision_id] = (_mod_revision.NULL_REVISION,)
2506
def _make_parents_provider(self):
2509
def get_graph(self, other_repository=None):
2510
"""Return the graph walker for this repository format"""
2511
parents_provider = self._make_parents_provider()
2512
if (other_repository is not None and
2513
not self.has_same_location(other_repository)):
2514
parents_provider = graph.StackedParentsProvider(
2515
[parents_provider, other_repository._make_parents_provider()])
2516
return graph.Graph(parents_provider)
2518
def _get_versioned_file_checker(self, text_key_references=None):
2519
"""Return an object suitable for checking versioned files.
2521
:param text_key_references: if non-None, an already built
2522
dictionary mapping text keys ((fileid, revision_id) tuples)
2523
to whether they were referred to by the inventory of the
2524
revision_id that they contain. If None, this will be
2527
return _VersionedFileChecker(self,
2528
text_key_references=text_key_references)
2530
def revision_ids_to_search_result(self, result_set):
2531
"""Convert a set of revision ids to a graph SearchResult."""
2532
result_parents = set()
2533
for parents in self.get_graph().get_parent_map(
2534
result_set).itervalues():
2535
result_parents.update(parents)
2536
included_keys = result_set.intersection(result_parents)
2537
start_keys = result_set.difference(included_keys)
2538
exclude_keys = result_parents.difference(result_set)
2539
result = graph.SearchResult(start_keys, exclude_keys,
2540
len(result_set), result_set)
2544
def set_make_working_trees(self, new_value):
2545
"""Set the policy flag for making working trees when creating branches.
2547
This only applies to branches that use this repository.
2549
The default is 'True'.
2550
:param new_value: True to restore the default, False to disable making
2553
raise NotImplementedError(self.set_make_working_trees)
2555
def make_working_trees(self):
2556
"""Returns the policy for making working trees on new branches."""
2557
raise NotImplementedError(self.make_working_trees)
2560
def sign_revision(self, revision_id, gpg_strategy):
2561
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2562
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2565
def has_signature_for_revision_id(self, revision_id):
2566
"""Query for a revision signature for revision_id in the repository."""
2567
if not self.has_revision(revision_id):
2568
raise errors.NoSuchRevision(self, revision_id)
2569
sig_present = (1 == len(
2570
self.signatures.get_parent_map([(revision_id,)])))
2574
def get_signature_text(self, revision_id):
2575
"""Return the text for a signature."""
2576
stream = self.signatures.get_record_stream([(revision_id,)],
2578
record = stream.next()
2579
if record.storage_kind == 'absent':
2580
raise errors.NoSuchRevision(self, revision_id)
2581
return record.get_bytes_as('fulltext')
2584
def check(self, revision_ids=None):
2585
"""Check consistency of all history of given revision_ids.
2587
Different repository implementations should override _check().
2589
:param revision_ids: A non-empty list of revision_ids whose ancestry
2590
will be checked. Typically the last revision_id of a branch.
2592
return self._check(revision_ids)
2594
def _check(self, revision_ids):
2595
result = check.Check(self)
2599
def _warn_if_deprecated(self):
2600
global _deprecation_warning_done
2601
if _deprecation_warning_done:
2603
_deprecation_warning_done = True
2604
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2605
% (self._format, self.bzrdir.transport.base))
2607
def supports_rich_root(self):
2608
return self._format.rich_root_data
2610
def _check_ascii_revisionid(self, revision_id, method):
2611
"""Private helper for ascii-only repositories."""
2612
# weave repositories refuse to store revisionids that are non-ascii.
2613
if revision_id is not None:
2614
# weaves require ascii revision ids.
2615
if isinstance(revision_id, unicode):
2617
revision_id.encode('ascii')
2618
except UnicodeEncodeError:
2619
raise errors.NonAsciiRevisionId(method, self)
2622
revision_id.decode('ascii')
2623
except UnicodeDecodeError:
2624
raise errors.NonAsciiRevisionId(method, self)
2626
def revision_graph_can_have_wrong_parents(self):
2627
"""Is it possible for this repository to have a revision graph with
2630
If True, then this repository must also implement
2631
_find_inconsistent_revision_parents so that check and reconcile can
2632
check for inconsistencies before proceeding with other checks that may
2633
depend on the revision index being consistent.
2635
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2638
# remove these delegates a while after bzr 0.15
2639
def __make_delegated(name, from_module):
2640
def _deprecated_repository_forwarder():
2641
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2642
% (name, from_module),
2645
m = __import__(from_module, globals(), locals(), [name])
2647
return getattr(m, name)
2648
except AttributeError:
2649
raise AttributeError('module %s has no name %s'
2651
globals()[name] = _deprecated_repository_forwarder
2654
'AllInOneRepository',
2655
'WeaveMetaDirRepository',
2656
'PreSplitOutRepositoryFormat',
2657
'RepositoryFormat4',
2658
'RepositoryFormat5',
2659
'RepositoryFormat6',
2660
'RepositoryFormat7',
2662
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2666
'RepositoryFormatKnit',
2667
'RepositoryFormatKnit1',
2669
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2672
def install_revision(repository, rev, revision_tree):
2673
"""Install all revision data into a repository."""
2674
install_revisions(repository, [(rev, revision_tree, None)])
2677
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2678
"""Install all revision data into a repository.
2680
Accepts an iterable of revision, tree, signature tuples. The signature
2683
repository.start_write_group()
2685
inventory_cache = lru_cache.LRUCache(10)
2686
for n, (revision, revision_tree, signature) in enumerate(iterable):
2687
_install_revision(repository, revision, revision_tree, signature,
2690
pb.update('Transferring revisions', n + 1, num_revisions)
2692
repository.abort_write_group()
2695
repository.commit_write_group()
2698
def _install_revision(repository, rev, revision_tree, signature,
2700
"""Install all revision data into a repository."""
2701
present_parents = []
2703
for p_id in rev.parent_ids:
2704
if repository.has_revision(p_id):
2705
present_parents.append(p_id)
2706
parent_trees[p_id] = repository.revision_tree(p_id)
2708
parent_trees[p_id] = repository.revision_tree(
2709
_mod_revision.NULL_REVISION)
2711
inv = revision_tree.inventory
2712
entries = inv.iter_entries()
2713
# backwards compatibility hack: skip the root id.
2714
if not repository.supports_rich_root():
2715
path, root = entries.next()
2716
if root.revision != rev.revision_id:
2717
raise errors.IncompatibleRevision(repr(repository))
2719
for path, ie in entries:
2720
text_keys[(ie.file_id, ie.revision)] = ie
2721
text_parent_map = repository.texts.get_parent_map(text_keys)
2722
missing_texts = set(text_keys) - set(text_parent_map)
2723
# Add the texts that are not already present
2724
for text_key in missing_texts:
2725
ie = text_keys[text_key]
2727
# FIXME: TODO: The following loop overlaps/duplicates that done by
2728
# commit to determine parents. There is a latent/real bug here where
2729
# the parents inserted are not those commit would do - in particular
2730
# they are not filtered by heads(). RBC, AB
2731
for revision, tree in parent_trees.iteritems():
2732
if ie.file_id not in tree:
2734
parent_id = tree.inventory[ie.file_id].revision
2735
if parent_id in text_parents:
2737
text_parents.append((ie.file_id, parent_id))
2738
lines = revision_tree.get_file(ie.file_id).readlines()
2739
repository.texts.add_lines(text_key, text_parents, lines)
2741
# install the inventory
2742
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2743
# Cache this inventory
2744
inventory_cache[rev.revision_id] = inv
2746
basis_inv = inventory_cache[rev.parent_ids[0]]
2748
repository.add_inventory(rev.revision_id, inv, present_parents)
2750
delta = inv._make_delta(basis_inv)
2751
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2752
rev.revision_id, present_parents)
2754
repository.add_inventory(rev.revision_id, inv, present_parents)
2755
except errors.RevisionAlreadyPresent:
2757
if signature is not None:
2758
repository.add_signature_text(rev.revision_id, signature)
2759
repository.add_revision(rev.revision_id, rev, inv)
2762
class MetaDirRepository(Repository):
2763
"""Repositories in the new meta-dir layout.
2765
:ivar _transport: Transport for access to repository control files,
2766
typically pointing to .bzr/repository.
2769
def __init__(self, _format, a_bzrdir, control_files):
2770
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2771
self._transport = control_files._transport
2773
def is_shared(self):
2774
"""Return True if this repository is flagged as a shared repository."""
2775
return self._transport.has('shared-storage')
2778
def set_make_working_trees(self, new_value):
2779
"""Set the policy flag for making working trees when creating branches.
2781
This only applies to branches that use this repository.
2783
The default is 'True'.
2784
:param new_value: True to restore the default, False to disable making
2789
self._transport.delete('no-working-trees')
2790
except errors.NoSuchFile:
2793
self._transport.put_bytes('no-working-trees', '',
2794
mode=self.bzrdir._get_file_mode())
2796
def make_working_trees(self):
2797
"""Returns the policy for making working trees on new branches."""
2798
return not self._transport.has('no-working-trees')
2801
class MetaDirVersionedFileRepository(MetaDirRepository):
2802
"""Repositories in a meta-dir, that work via versioned file objects."""
2804
def __init__(self, _format, a_bzrdir, control_files):
2805
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2809
network_format_registry = registry.FormatRegistry()
2810
"""Registry of formats indexed by their network name.
2812
The network name for a repository format is an identifier that can be used when
2813
referring to formats with smart server operations. See
2814
RepositoryFormat.network_name() for more detail.
2818
format_registry = registry.FormatRegistry(network_format_registry)
2819
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2821
This can contain either format instances themselves, or classes/factories that
2822
can be called to obtain one.
2826
#####################################################################
2827
# Repository Formats
2829
class RepositoryFormat(object):
2830
"""A repository format.
2832
Formats provide four things:
2833
* An initialization routine to construct repository data on disk.
2834
* a optional format string which is used when the BzrDir supports
2836
* an open routine which returns a Repository instance.
2837
* A network name for referring to the format in smart server RPC
2840
There is one and only one Format subclass for each on-disk format. But
2841
there can be one Repository subclass that is used for several different
2842
formats. The _format attribute on a Repository instance can be used to
2843
determine the disk format.
2845
Formats are placed in a registry by their format string for reference
2846
during opening. These should be subclasses of RepositoryFormat for
2849
Once a format is deprecated, just deprecate the initialize and open
2850
methods on the format class. Do not deprecate the object, as the
2851
object may be created even when a repository instance hasn't been
2854
Common instance attributes:
2855
_matchingbzrdir - the bzrdir format that the repository format was
2856
originally written to work with. This can be used if manually
2857
constructing a bzrdir and repository, or more commonly for test suite
2861
# Set to True or False in derived classes. True indicates that the format
2862
# supports ghosts gracefully.
2863
supports_ghosts = None
2864
# Can this repository be given external locations to lookup additional
2865
# data. Set to True or False in derived classes.
2866
supports_external_lookups = None
2867
# Does this format support CHK bytestring lookups. Set to True or False in
2869
supports_chks = None
2870
# Should commit add an inventory, or an inventory delta to the repository.
2871
_commit_inv_deltas = True
2872
# What order should fetch operations request streams in?
2873
# The default is unordered as that is the cheapest for an origin to
2875
_fetch_order = 'unordered'
2876
# Does this repository format use deltas that can be fetched as-deltas ?
2877
# (E.g. knits, where the knit deltas can be transplanted intact.
2878
# We default to False, which will ensure that enough data to get
2879
# a full text out of any fetch stream will be grabbed.
2880
_fetch_uses_deltas = False
2881
# Should fetch trigger a reconcile after the fetch? Only needed for
2882
# some repository formats that can suffer internal inconsistencies.
2883
_fetch_reconcile = False
2884
# Does this format have < O(tree_size) delta generation. Used to hint what
2885
# code path for commit, amongst other things.
2887
# Does doing a pack operation compress data? Useful for the pack UI command
2888
# (so if there is one pack, the operation can still proceed because it may
2889
# help), and for fetching when data won't have come from the same
2891
pack_compresses = False
2894
return "<%s>" % self.__class__.__name__
2896
def __eq__(self, other):
2897
# format objects are generally stateless
2898
return isinstance(other, self.__class__)
2900
def __ne__(self, other):
2901
return not self == other
2904
def find_format(klass, a_bzrdir):
2905
"""Return the format for the repository object in a_bzrdir.
2907
This is used by bzr native formats that have a "format" file in
2908
the repository. Other methods may be used by different types of
2912
transport = a_bzrdir.get_repository_transport(None)
2913
format_string = transport.get("format").read()
2914
return format_registry.get(format_string)
2915
except errors.NoSuchFile:
2916
raise errors.NoRepositoryPresent(a_bzrdir)
2918
raise errors.UnknownFormatError(format=format_string,
2922
def register_format(klass, format):
2923
format_registry.register(format.get_format_string(), format)
2926
def unregister_format(klass, format):
2927
format_registry.remove(format.get_format_string())
2930
def get_default_format(klass):
2931
"""Return the current default format."""
2932
from bzrlib import bzrdir
2933
return bzrdir.format_registry.make_bzrdir('default').repository_format
2935
def get_format_string(self):
2936
"""Return the ASCII format string that identifies this format.
2938
Note that in pre format ?? repositories the format string is
2939
not permitted nor written to disk.
2941
raise NotImplementedError(self.get_format_string)
2943
def get_format_description(self):
2944
"""Return the short description for this format."""
2945
raise NotImplementedError(self.get_format_description)
2947
# TODO: this shouldn't be in the base class, it's specific to things that
2948
# use weaves or knits -- mbp 20070207
2949
def _get_versioned_file_store(self,
2954
versionedfile_class=None,
2955
versionedfile_kwargs={},
2957
if versionedfile_class is None:
2958
versionedfile_class = self._versionedfile_class
2959
weave_transport = control_files._transport.clone(name)
2960
dir_mode = control_files._dir_mode
2961
file_mode = control_files._file_mode
2962
return VersionedFileStore(weave_transport, prefixed=prefixed,
2964
file_mode=file_mode,
2965
versionedfile_class=versionedfile_class,
2966
versionedfile_kwargs=versionedfile_kwargs,
2969
def initialize(self, a_bzrdir, shared=False):
2970
"""Initialize a repository of this format in a_bzrdir.
2972
:param a_bzrdir: The bzrdir to put the new repository in it.
2973
:param shared: The repository should be initialized as a sharable one.
2974
:returns: The new repository object.
2976
This may raise UninitializableFormat if shared repository are not
2977
compatible the a_bzrdir.
2979
raise NotImplementedError(self.initialize)
2981
def is_supported(self):
2982
"""Is this format supported?
2984
Supported formats must be initializable and openable.
2985
Unsupported formats may not support initialization or committing or
2986
some other features depending on the reason for not being supported.
2990
def network_name(self):
2991
"""A simple byte string uniquely identifying this format for RPC calls.
2993
MetaDir repository formats use their disk format string to identify the
2994
repository over the wire. All in one formats such as bzr < 0.8, and
2995
foreign formats like svn/git and hg should use some marker which is
2996
unique and immutable.
2998
raise NotImplementedError(self.network_name)
3000
def check_conversion_target(self, target_format):
3001
raise NotImplementedError(self.check_conversion_target)
3003
def open(self, a_bzrdir, _found=False):
3004
"""Return an instance of this format for the bzrdir a_bzrdir.
3006
_found is a private parameter, do not use it.
3008
raise NotImplementedError(self.open)
3011
class MetaDirRepositoryFormat(RepositoryFormat):
3012
"""Common base class for the new repositories using the metadir layout."""
3014
rich_root_data = False
3015
supports_tree_reference = False
3016
supports_external_lookups = False
3019
def _matchingbzrdir(self):
3020
matching = bzrdir.BzrDirMetaFormat1()
3021
matching.repository_format = self
3025
super(MetaDirRepositoryFormat, self).__init__()
3027
def _create_control_files(self, a_bzrdir):
3028
"""Create the required files and the initial control_files object."""
3029
# FIXME: RBC 20060125 don't peek under the covers
3030
# NB: no need to escape relative paths that are url safe.
3031
repository_transport = a_bzrdir.get_repository_transport(self)
3032
control_files = lockable_files.LockableFiles(repository_transport,
3033
'lock', lockdir.LockDir)
3034
control_files.create_lock()
3035
return control_files
3037
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
3038
"""Upload the initial blank content."""
3039
control_files = self._create_control_files(a_bzrdir)
3040
control_files.lock_write()
3041
transport = control_files._transport
3043
utf8_files += [('shared-storage', '')]
3045
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
3046
for (filename, content_stream) in files:
3047
transport.put_file(filename, content_stream,
3048
mode=a_bzrdir._get_file_mode())
3049
for (filename, content_bytes) in utf8_files:
3050
transport.put_bytes_non_atomic(filename, content_bytes,
3051
mode=a_bzrdir._get_file_mode())
3053
control_files.unlock()
3055
def network_name(self):
3056
"""Metadir formats have matching disk and network format strings."""
3057
return self.get_format_string()
3060
# Pre-0.8 formats that don't have a disk format string (because they are
3061
# versioned by the matching control directory). We use the control directories
3062
# disk format string as a key for the network_name because they meet the
3063
# constraints (simple string, unique, immutable).
3064
network_format_registry.register_lazy(
3065
"Bazaar-NG branch, format 5\n",
3066
'bzrlib.repofmt.weaverepo',
3067
'RepositoryFormat5',
3069
network_format_registry.register_lazy(
3070
"Bazaar-NG branch, format 6\n",
3071
'bzrlib.repofmt.weaverepo',
3072
'RepositoryFormat6',
3075
# formats which have no format string are not discoverable or independently
3076
# creatable on disk, so are not registered in format_registry. They're
3077
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3078
# needed, it's constructed directly by the BzrDir. Non-native formats where
3079
# the repository is not separately opened are similar.
3081
format_registry.register_lazy(
3082
'Bazaar-NG Repository format 7',
3083
'bzrlib.repofmt.weaverepo',
3087
format_registry.register_lazy(
3088
'Bazaar-NG Knit Repository Format 1',
3089
'bzrlib.repofmt.knitrepo',
3090
'RepositoryFormatKnit1',
3093
format_registry.register_lazy(
3094
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
3095
'bzrlib.repofmt.knitrepo',
3096
'RepositoryFormatKnit3',
3099
format_registry.register_lazy(
3100
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
3101
'bzrlib.repofmt.knitrepo',
3102
'RepositoryFormatKnit4',
3105
# Pack-based formats. There is one format for pre-subtrees, and one for
3106
# post-subtrees to allow ease of testing.
3107
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3108
format_registry.register_lazy(
3109
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3110
'bzrlib.repofmt.pack_repo',
3111
'RepositoryFormatKnitPack1',
3113
format_registry.register_lazy(
3114
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3115
'bzrlib.repofmt.pack_repo',
3116
'RepositoryFormatKnitPack3',
3118
format_registry.register_lazy(
3119
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3120
'bzrlib.repofmt.pack_repo',
3121
'RepositoryFormatKnitPack4',
3123
format_registry.register_lazy(
3124
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3125
'bzrlib.repofmt.pack_repo',
3126
'RepositoryFormatKnitPack5',
3128
format_registry.register_lazy(
3129
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3130
'bzrlib.repofmt.pack_repo',
3131
'RepositoryFormatKnitPack5RichRoot',
3133
format_registry.register_lazy(
3134
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3135
'bzrlib.repofmt.pack_repo',
3136
'RepositoryFormatKnitPack5RichRootBroken',
3138
format_registry.register_lazy(
3139
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3140
'bzrlib.repofmt.pack_repo',
3141
'RepositoryFormatKnitPack6',
3143
format_registry.register_lazy(
3144
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3145
'bzrlib.repofmt.pack_repo',
3146
'RepositoryFormatKnitPack6RichRoot',
3149
# Development formats.
3150
# Obsolete but kept pending a CHK based subtree format.
3151
format_registry.register_lazy(
3152
("Bazaar development format 2 with subtree support "
3153
"(needs bzr.dev from before 1.8)\n"),
3154
'bzrlib.repofmt.pack_repo',
3155
'RepositoryFormatPackDevelopment2Subtree',
3158
# 1.14->1.16 go below here
3159
format_registry.register_lazy(
3160
'Bazaar development format - group compression and chk inventory'
3161
' (needs bzr.dev from 1.14)\n',
3162
'bzrlib.repofmt.groupcompress_repo',
3163
'RepositoryFormatCHK1',
3166
format_registry.register_lazy(
3167
'Bazaar development format - chk repository with bencode revision '
3168
'serialization (needs bzr.dev from 1.16)\n',
3169
'bzrlib.repofmt.groupcompress_repo',
3170
'RepositoryFormatCHK2',
3172
format_registry.register_lazy(
3173
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3174
'bzrlib.repofmt.groupcompress_repo',
3175
'RepositoryFormat2a',
3179
class InterRepository(InterObject):
3180
"""This class represents operations taking place between two repositories.
3182
Its instances have methods like copy_content and fetch, and contain
3183
references to the source and target repositories these operations can be
3186
Often we will provide convenience methods on 'repository' which carry out
3187
operations with another repository - they will always forward to
3188
InterRepository.get(other).method_name(parameters).
3191
_walk_to_common_revisions_batch_size = 50
3193
"""The available optimised InterRepository types."""
3196
def copy_content(self, revision_id=None):
3197
"""Make a complete copy of the content in self into destination.
3199
This is a destructive operation! Do not use it on existing
3202
:param revision_id: Only copy the content needed to construct
3203
revision_id and its parents.
3206
self.target.set_make_working_trees(self.source.make_working_trees())
3207
except NotImplementedError:
3209
self.target.fetch(self.source, revision_id=revision_id)
3212
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3214
"""Fetch the content required to construct revision_id.
3216
The content is copied from self.source to self.target.
3218
:param revision_id: if None all content is copied, if NULL_REVISION no
3220
:param pb: optional progress bar to use for progress reports. If not
3221
provided a default one will be created.
3224
from bzrlib.fetch import RepoFetcher
3225
f = RepoFetcher(to_repository=self.target,
3226
from_repository=self.source,
3227
last_revision=revision_id,
3228
fetch_spec=fetch_spec,
3229
pb=pb, find_ghosts=find_ghosts)
3231
def _walk_to_common_revisions(self, revision_ids):
3232
"""Walk out from revision_ids in source to revisions target has.
3234
:param revision_ids: The start point for the search.
3235
:return: A set of revision ids.
3237
target_graph = self.target.get_graph()
3238
revision_ids = frozenset(revision_ids)
3239
missing_revs = set()
3240
source_graph = self.source.get_graph()
3241
# ensure we don't pay silly lookup costs.
3242
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3243
null_set = frozenset([_mod_revision.NULL_REVISION])
3244
searcher_exhausted = False
3248
# Iterate the searcher until we have enough next_revs
3249
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3251
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3252
next_revs.update(next_revs_part)
3253
ghosts.update(ghosts_part)
3254
except StopIteration:
3255
searcher_exhausted = True
3257
# If there are ghosts in the source graph, and the caller asked for
3258
# them, make sure that they are present in the target.
3259
# We don't care about other ghosts as we can't fetch them and
3260
# haven't been asked to.
3261
ghosts_to_check = set(revision_ids.intersection(ghosts))
3262
revs_to_get = set(next_revs).union(ghosts_to_check)
3264
have_revs = set(target_graph.get_parent_map(revs_to_get))
3265
# we always have NULL_REVISION present.
3266
have_revs = have_revs.union(null_set)
3267
# Check if the target is missing any ghosts we need.
3268
ghosts_to_check.difference_update(have_revs)
3270
# One of the caller's revision_ids is a ghost in both the
3271
# source and the target.
3272
raise errors.NoSuchRevision(
3273
self.source, ghosts_to_check.pop())
3274
missing_revs.update(next_revs - have_revs)
3275
# Because we may have walked past the original stop point, make
3276
# sure everything is stopped
3277
stop_revs = searcher.find_seen_ancestors(have_revs)
3278
searcher.stop_searching_any(stop_revs)
3279
if searcher_exhausted:
3281
return searcher.get_result()
3284
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3285
"""Return the revision ids that source has that target does not.
3287
:param revision_id: only return revision ids included by this
3289
:param find_ghosts: If True find missing revisions in deep history
3290
rather than just finding the surface difference.
3291
:return: A bzrlib.graph.SearchResult.
3293
# stop searching at found target revisions.
3294
if not find_ghosts and revision_id is not None:
3295
return self._walk_to_common_revisions([revision_id])
3296
# generic, possibly worst case, slow code path.
3297
target_ids = set(self.target.all_revision_ids())
3298
if revision_id is not None:
3299
source_ids = self.source.get_ancestry(revision_id)
3300
if source_ids[0] is not None:
3301
raise AssertionError()
3304
source_ids = self.source.all_revision_ids()
3305
result_set = set(source_ids).difference(target_ids)
3306
return self.source.revision_ids_to_search_result(result_set)
3309
def _same_model(source, target):
3310
"""True if source and target have the same data representation.
3312
Note: this is always called on the base class; overriding it in a
3313
subclass will have no effect.
3316
InterRepository._assert_same_model(source, target)
3318
except errors.IncompatibleRepositories, e:
3322
def _assert_same_model(source, target):
3323
"""Raise an exception if two repositories do not use the same model.
3325
if source.supports_rich_root() != target.supports_rich_root():
3326
raise errors.IncompatibleRepositories(source, target,
3327
"different rich-root support")
3328
if source._serializer != target._serializer:
3329
raise errors.IncompatibleRepositories(source, target,
3330
"different serializers")
3333
class InterSameDataRepository(InterRepository):
3334
"""Code for converting between repositories that represent the same data.
3336
Data format and model must match for this to work.
3340
def _get_repo_format_to_test(self):
3341
"""Repository format for testing with.
3343
InterSameData can pull from subtree to subtree and from non-subtree to
3344
non-subtree, so we test this with the richest repository format.
3346
from bzrlib.repofmt import knitrepo
3347
return knitrepo.RepositoryFormatKnit3()
3350
def is_compatible(source, target):
3351
return InterRepository._same_model(source, target)
3354
class InterWeaveRepo(InterSameDataRepository):
3355
"""Optimised code paths between Weave based repositories.
3357
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3358
implemented lazy inter-object optimisation.
3362
def _get_repo_format_to_test(self):
3363
from bzrlib.repofmt import weaverepo
3364
return weaverepo.RepositoryFormat7()
3367
def is_compatible(source, target):
3368
"""Be compatible with known Weave formats.
3370
We don't test for the stores being of specific types because that
3371
could lead to confusing results, and there is no need to be
3374
from bzrlib.repofmt.weaverepo import (
3380
return (isinstance(source._format, (RepositoryFormat5,
3382
RepositoryFormat7)) and
3383
isinstance(target._format, (RepositoryFormat5,
3385
RepositoryFormat7)))
3386
except AttributeError:
3390
def copy_content(self, revision_id=None):
3391
"""See InterRepository.copy_content()."""
3392
# weave specific optimised path:
3394
self.target.set_make_working_trees(self.source.make_working_trees())
3395
except (errors.RepositoryUpgradeRequired, NotImplemented):
3397
# FIXME do not peek!
3398
if self.source._transport.listable():
3399
pb = ui.ui_factory.nested_progress_bar()
3401
self.target.texts.insert_record_stream(
3402
self.source.texts.get_record_stream(
3403
self.source.texts.keys(), 'topological', False))
3404
pb.update('copying inventory', 0, 1)
3405
self.target.inventories.insert_record_stream(
3406
self.source.inventories.get_record_stream(
3407
self.source.inventories.keys(), 'topological', False))
3408
self.target.signatures.insert_record_stream(
3409
self.source.signatures.get_record_stream(
3410
self.source.signatures.keys(),
3412
self.target.revisions.insert_record_stream(
3413
self.source.revisions.get_record_stream(
3414
self.source.revisions.keys(),
3415
'topological', True))
3419
self.target.fetch(self.source, revision_id=revision_id)
3422
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3423
"""See InterRepository.missing_revision_ids()."""
3424
# we want all revisions to satisfy revision_id in source.
3425
# but we don't want to stat every file here and there.
3426
# we want then, all revisions other needs to satisfy revision_id
3427
# checked, but not those that we have locally.
3428
# so the first thing is to get a subset of the revisions to
3429
# satisfy revision_id in source, and then eliminate those that
3430
# we do already have.
3431
# this is slow on high latency connection to self, but as this
3432
# disk format scales terribly for push anyway due to rewriting
3433
# inventory.weave, this is considered acceptable.
3435
if revision_id is not None:
3436
source_ids = self.source.get_ancestry(revision_id)
3437
if source_ids[0] is not None:
3438
raise AssertionError()
3441
source_ids = self.source._all_possible_ids()
3442
source_ids_set = set(source_ids)
3443
# source_ids is the worst possible case we may need to pull.
3444
# now we want to filter source_ids against what we actually
3445
# have in target, but don't try to check for existence where we know
3446
# we do not have a revision as that would be pointless.
3447
target_ids = set(self.target._all_possible_ids())
3448
possibly_present_revisions = target_ids.intersection(source_ids_set)
3449
actually_present_revisions = set(
3450
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3451
required_revisions = source_ids_set.difference(actually_present_revisions)
3452
if revision_id is not None:
3453
# we used get_ancestry to determine source_ids then we are assured all
3454
# revisions referenced are present as they are installed in topological order.
3455
# and the tip revision was validated by get_ancestry.
3456
result_set = required_revisions
3458
# if we just grabbed the possibly available ids, then
3459
# we only have an estimate of whats available and need to validate
3460
# that against the revision records.
3462
self.source._eliminate_revisions_not_present(required_revisions))
3463
return self.source.revision_ids_to_search_result(result_set)
3466
class InterKnitRepo(InterSameDataRepository):
3467
"""Optimised code paths between Knit based repositories."""
3470
def _get_repo_format_to_test(self):
3471
from bzrlib.repofmt import knitrepo
3472
return knitrepo.RepositoryFormatKnit1()
3475
def is_compatible(source, target):
3476
"""Be compatible with known Knit formats.
3478
We don't test for the stores being of specific types because that
3479
could lead to confusing results, and there is no need to be
3482
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3484
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3485
isinstance(target._format, RepositoryFormatKnit))
3486
except AttributeError:
3488
return are_knits and InterRepository._same_model(source, target)
3491
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3492
"""See InterRepository.missing_revision_ids()."""
3493
if revision_id is not None:
3494
source_ids = self.source.get_ancestry(revision_id)
3495
if source_ids[0] is not None:
3496
raise AssertionError()
3499
source_ids = self.source.all_revision_ids()
3500
source_ids_set = set(source_ids)
3501
# source_ids is the worst possible case we may need to pull.
3502
# now we want to filter source_ids against what we actually
3503
# have in target, but don't try to check for existence where we know
3504
# we do not have a revision as that would be pointless.
3505
target_ids = set(self.target.all_revision_ids())
3506
possibly_present_revisions = target_ids.intersection(source_ids_set)
3507
actually_present_revisions = set(
3508
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3509
required_revisions = source_ids_set.difference(actually_present_revisions)
3510
if revision_id is not None:
3511
# we used get_ancestry to determine source_ids then we are assured all
3512
# revisions referenced are present as they are installed in topological order.
3513
# and the tip revision was validated by get_ancestry.
3514
result_set = required_revisions
3516
# if we just grabbed the possibly available ids, then
3517
# we only have an estimate of whats available and need to validate
3518
# that against the revision records.
3520
self.source._eliminate_revisions_not_present(required_revisions))
3521
return self.source.revision_ids_to_search_result(result_set)
3524
class InterDifferingSerializer(InterRepository):
3527
def _get_repo_format_to_test(self):
3531
def is_compatible(source, target):
3532
"""Be compatible with Knit2 source and Knit3 target"""
3533
# This is redundant with format.check_conversion_target(), however that
3534
# raises an exception, and we just want to say "False" as in we won't
3535
# support converting between these formats.
3537
if source.supports_rich_root() and not target.supports_rich_root():
3539
if (source._format.supports_tree_reference
3540
and not target._format.supports_tree_reference):
3542
# Only use this code path for local source and target. IDS does far
3543
# too much IO (both bandwidth and roundtrips) over a network.
3544
if not source.bzrdir.transport.base.startswith('file:///'):
3546
if not target.bzrdir.transport.base.startswith('file:///'):
3550
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3551
"""Get the best delta and base for this revision.
3553
:return: (basis_id, delta)
3555
possible_trees = [(parent_id, cache[parent_id])
3556
for parent_id in parent_ids
3557
if parent_id in cache]
3558
if len(possible_trees) == 0:
3559
# There either aren't any parents, or the parents aren't in the
3560
# cache, so just use the last converted tree
3561
possible_trees.append((basis_id, cache[basis_id]))
3563
for basis_id, basis_tree in possible_trees:
3564
delta = tree.inventory._make_delta(basis_tree.inventory)
3565
deltas.append((len(delta), basis_id, delta))
3567
return deltas[0][1:]
3569
def _fetch_batch(self, revision_ids, basis_id, cache):
3570
"""Fetch across a few revisions.
3572
:param revision_ids: The revisions to copy
3573
:param basis_id: The revision_id of a tree that must be in cache, used
3574
as a basis for delta when no other base is available
3575
:param cache: A cache of RevisionTrees that we can use.
3576
:return: The revision_id of the last converted tree. The RevisionTree
3577
for it will be in cache
3579
# Walk though all revisions; get inventory deltas, copy referenced
3580
# texts that delta references, insert the delta, revision and
3582
root_keys_to_create = set()
3585
pending_revisions = []
3586
parent_map = self.source.get_parent_map(revision_ids)
3587
for tree in self.source.revision_trees(revision_ids):
3588
current_revision_id = tree.get_revision_id()
3589
parent_ids = parent_map.get(current_revision_id, ())
3590
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3592
if self._converting_to_rich_root:
3593
self._revision_id_to_root_id[current_revision_id] = \
3595
# Find text entries that need to be copied
3596
for old_path, new_path, file_id, entry in delta:
3597
if new_path is not None:
3600
if not self.target.supports_rich_root():
3601
# The target doesn't support rich root, so we don't
3604
if self._converting_to_rich_root:
3605
# This can't be copied normally, we have to insert
3607
root_keys_to_create.add((file_id, entry.revision))
3609
text_keys.add((file_id, entry.revision))
3610
revision = self.source.get_revision(current_revision_id)
3611
pending_deltas.append((basis_id, delta,
3612
current_revision_id, revision.parent_ids))
3613
pending_revisions.append(revision)
3614
cache[current_revision_id] = tree
3615
basis_id = current_revision_id
3617
from_texts = self.source.texts
3618
to_texts = self.target.texts
3619
if root_keys_to_create:
3620
from bzrlib.fetch import _new_root_data_stream
3621
root_stream = _new_root_data_stream(
3622
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3624
to_texts.insert_record_stream(root_stream)
3625
to_texts.insert_record_stream(from_texts.get_record_stream(
3626
text_keys, self.target._format._fetch_order,
3627
not self.target._format._fetch_uses_deltas))
3628
# insert inventory deltas
3629
for delta in pending_deltas:
3630
self.target.add_inventory_by_delta(*delta)
3631
if self.target._fallback_repositories:
3632
# Make sure this stacked repository has all the parent inventories
3633
# for the new revisions that we are about to insert. We do this
3634
# before adding the revisions so that no revision is added until
3635
# all the inventories it may depend on are added.
3637
revision_ids = set()
3638
for revision in pending_revisions:
3639
revision_ids.add(revision.revision_id)
3640
parent_ids.update(revision.parent_ids)
3641
parent_ids.difference_update(revision_ids)
3642
parent_ids.discard(_mod_revision.NULL_REVISION)
3643
parent_map = self.source.get_parent_map(parent_ids)
3644
for parent_tree in self.source.revision_trees(parent_ids):
3645
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, basis_id, cache)
3646
current_revision_id = parent_tree.get_revision_id()
3647
parents_parents = parent_map[current_revision_id]
3648
self.target.add_inventory_by_delta(
3649
basis_id, delta, current_revision_id, parents_parents)
3650
# insert signatures and revisions
3651
for revision in pending_revisions:
3653
signature = self.source.get_signature_text(
3654
revision.revision_id)
3655
self.target.add_signature_text(revision.revision_id,
3657
except errors.NoSuchRevision:
3659
self.target.add_revision(revision.revision_id, revision)
3662
def _fetch_all_revisions(self, revision_ids, pb):
3663
"""Fetch everything for the list of revisions.
3665
:param revision_ids: The list of revisions to fetch. Must be in
3667
:param pb: A ProgressBar
3670
basis_id, basis_tree = self._get_basis(revision_ids[0])
3672
cache = lru_cache.LRUCache(100)
3673
cache[basis_id] = basis_tree
3674
del basis_tree # We don't want to hang on to it here
3676
for offset in range(0, len(revision_ids), batch_size):
3677
self.target.start_write_group()
3679
pb.update('Transferring revisions', offset,
3681
batch = revision_ids[offset:offset+batch_size]
3682
basis_id = self._fetch_batch(batch, basis_id, cache)
3684
self.target.abort_write_group()
3687
hint = self.target.commit_write_group()
3690
if hints and self.target._format.pack_compresses:
3691
self.target.pack(hint=hints)
3692
pb.update('Transferring revisions', len(revision_ids),
3696
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3698
"""See InterRepository.fetch()."""
3699
if fetch_spec is not None:
3700
raise AssertionError("Not implemented yet...")
3701
if (not self.source.supports_rich_root()
3702
and self.target.supports_rich_root()):
3703
self._converting_to_rich_root = True
3704
self._revision_id_to_root_id = {}
3706
self._converting_to_rich_root = False
3707
revision_ids = self.target.search_missing_revision_ids(self.source,
3708
revision_id, find_ghosts=find_ghosts).get_keys()
3709
if not revision_ids:
3711
revision_ids = tsort.topo_sort(
3712
self.source.get_graph().get_parent_map(revision_ids))
3713
if not revision_ids:
3715
# Walk though all revisions; get inventory deltas, copy referenced
3716
# texts that delta references, insert the delta, revision and
3719
my_pb = ui.ui_factory.nested_progress_bar()
3722
symbol_versioning.warn(
3723
symbol_versioning.deprecated_in((1, 14, 0))
3724
% "pb parameter to fetch()")
3727
self._fetch_all_revisions(revision_ids, pb)
3729
if my_pb is not None:
3731
return len(revision_ids), 0
3733
def _get_basis(self, first_revision_id):
3734
"""Get a revision and tree which exists in the target.
3736
This assumes that first_revision_id is selected for transmission
3737
because all other ancestors are already present. If we can't find an
3738
ancestor we fall back to NULL_REVISION since we know that is safe.
3740
:return: (basis_id, basis_tree)
3742
first_rev = self.source.get_revision(first_revision_id)
3744
basis_id = first_rev.parent_ids[0]
3745
# only valid as a basis if the target has it
3746
self.target.get_revision(basis_id)
3747
# Try to get a basis tree - if its a ghost it will hit the
3748
# NoSuchRevision case.
3749
basis_tree = self.source.revision_tree(basis_id)
3750
except (IndexError, errors.NoSuchRevision):
3751
basis_id = _mod_revision.NULL_REVISION
3752
basis_tree = self.source.revision_tree(basis_id)
3753
return basis_id, basis_tree
3756
InterRepository.register_optimiser(InterDifferingSerializer)
3757
InterRepository.register_optimiser(InterSameDataRepository)
3758
InterRepository.register_optimiser(InterWeaveRepo)
3759
InterRepository.register_optimiser(InterKnitRepo)
3762
class CopyConverter(object):
3763
"""A repository conversion tool which just performs a copy of the content.
3765
This is slow but quite reliable.
3768
def __init__(self, target_format):
3769
"""Create a CopyConverter.
3771
:param target_format: The format the resulting repository should be.
3773
self.target_format = target_format
3775
def convert(self, repo, pb):
3776
"""Perform the conversion of to_convert, giving feedback via pb.
3778
:param to_convert: The disk object to convert.
3779
:param pb: a progress bar to use for progress information.
3784
# this is only useful with metadir layouts - separated repo content.
3785
# trigger an assertion if not such
3786
repo._format.get_format_string()
3787
self.repo_dir = repo.bzrdir
3788
self.step('Moving repository to repository.backup')
3789
self.repo_dir.transport.move('repository', 'repository.backup')
3790
backup_transport = self.repo_dir.transport.clone('repository.backup')
3791
repo._format.check_conversion_target(self.target_format)
3792
self.source_repo = repo._format.open(self.repo_dir,
3794
_override_transport=backup_transport)
3795
self.step('Creating new repository')
3796
converted = self.target_format.initialize(self.repo_dir,
3797
self.source_repo.is_shared())
3798
converted.lock_write()
3800
self.step('Copying content into repository.')
3801
self.source_repo.copy_content_into(converted)
3804
self.step('Deleting old repository content.')
3805
self.repo_dir.transport.delete_tree('repository.backup')
3806
self.pb.note('repository converted')
3808
def step(self, message):
3809
"""Update the pb by a step."""
3811
self.pb.update(message, self.count, self.total)
3823
def _unescaper(match, _map=_unescape_map):
3824
code = match.group(1)
3828
if not code.startswith('#'):
3830
return unichr(int(code[1:])).encode('utf8')
3836
def _unescape_xml(data):
3837
"""Unescape predefined XML entities in a string of data."""
3839
if _unescape_re is None:
3840
_unescape_re = re.compile('\&([^;]*);')
3841
return _unescape_re.sub(_unescaper, data)
3844
class _VersionedFileChecker(object):
3846
def __init__(self, repository, text_key_references=None):
3847
self.repository = repository
3848
self.text_index = self.repository._generate_text_key_index(
3849
text_key_references=text_key_references)
3851
def calculate_file_version_parents(self, text_key):
3852
"""Calculate the correct parents for a file version according to
3855
parent_keys = self.text_index[text_key]
3856
if parent_keys == [_mod_revision.NULL_REVISION]:
3858
return tuple(parent_keys)
3860
def check_file_version_parents(self, texts, progress_bar=None):
3861
"""Check the parents stored in a versioned file are correct.
3863
It also detects file versions that are not referenced by their
3864
corresponding revision's inventory.
3866
:returns: A tuple of (wrong_parents, dangling_file_versions).
3867
wrong_parents is a dict mapping {revision_id: (stored_parents,
3868
correct_parents)} for each revision_id where the stored parents
3869
are not correct. dangling_file_versions is a set of (file_id,
3870
revision_id) tuples for versions that are present in this versioned
3871
file, but not used by the corresponding inventory.
3874
self.file_ids = set([file_id for file_id, _ in
3875
self.text_index.iterkeys()])
3876
# text keys is now grouped by file_id
3877
n_versions = len(self.text_index)
3878
progress_bar.update('loading text store', 0, n_versions)
3879
parent_map = self.repository.texts.get_parent_map(self.text_index)
3880
# On unlistable transports this could well be empty/error...
3881
text_keys = self.repository.texts.keys()
3882
unused_keys = frozenset(text_keys) - set(self.text_index)
3883
for num, key in enumerate(self.text_index.iterkeys()):
3884
if progress_bar is not None:
3885
progress_bar.update('checking text graph', num, n_versions)
3886
correct_parents = self.calculate_file_version_parents(key)
3888
knit_parents = parent_map[key]
3889
except errors.RevisionNotPresent:
3892
if correct_parents != knit_parents:
3893
wrong_parents[key] = (knit_parents, correct_parents)
3894
return wrong_parents, unused_keys
3897
def _old_get_graph(repository, revision_id):
3898
"""DO NOT USE. That is all. I'm serious."""
3899
graph = repository.get_graph()
3900
revision_graph = dict(((key, value) for key, value in
3901
graph.iter_ancestry([revision_id]) if value is not None))
3902
return _strip_NULL_ghosts(revision_graph)
3905
def _strip_NULL_ghosts(revision_graph):
3906
"""Also don't use this. more compatibility code for unmigrated clients."""
3907
# Filter ghosts, and null:
3908
if _mod_revision.NULL_REVISION in revision_graph:
3909
del revision_graph[_mod_revision.NULL_REVISION]
3910
for key, parents in revision_graph.items():
3911
revision_graph[key] = tuple(parent for parent in parents if parent
3913
return revision_graph
3916
class StreamSink(object):
3917
"""An object that can insert a stream into a repository.
3919
This interface handles the complexity of reserialising inventories and
3920
revisions from different formats, and allows unidirectional insertion into
3921
stacked repositories without looking for the missing basis parents
3925
def __init__(self, target_repo):
3926
self.target_repo = target_repo
3928
def insert_stream(self, stream, src_format, resume_tokens):
3929
"""Insert a stream's content into the target repository.
3931
:param src_format: a bzr repository format.
3933
:return: a list of resume tokens and an iterable of keys additional
3934
items required before the insertion can be completed.
3936
self.target_repo.lock_write()
3939
self.target_repo.resume_write_group(resume_tokens)
3942
self.target_repo.start_write_group()
3945
# locked_insert_stream performs a commit|suspend.
3946
return self._locked_insert_stream(stream, src_format, is_resume)
3948
self.target_repo.abort_write_group(suppress_errors=True)
3951
self.target_repo.unlock()
3953
def _locked_insert_stream(self, stream, src_format, is_resume):
3954
to_serializer = self.target_repo._format._serializer
3955
src_serializer = src_format._serializer
3957
if to_serializer == src_serializer:
3958
# If serializers match and the target is a pack repository, set the
3959
# write cache size on the new pack. This avoids poor performance
3960
# on transports where append is unbuffered (such as
3961
# RemoteTransport). This is safe to do because nothing should read
3962
# back from the target repository while a stream with matching
3963
# serialization is being inserted.
3964
# The exception is that a delta record from the source that should
3965
# be a fulltext may need to be expanded by the target (see
3966
# test_fetch_revisions_with_deltas_into_pack); but we take care to
3967
# explicitly flush any buffered writes first in that rare case.
3969
new_pack = self.target_repo._pack_collection._new_pack
3970
except AttributeError:
3971
# Not a pack repository
3974
new_pack.set_write_cache_size(1024*1024)
3975
for substream_type, substream in stream:
3976
if substream_type == 'texts':
3977
self.target_repo.texts.insert_record_stream(substream)
3978
elif substream_type == 'inventories':
3979
if src_serializer == to_serializer:
3980
self.target_repo.inventories.insert_record_stream(
3983
self._extract_and_insert_inventories(
3984
substream, src_serializer)
3985
elif substream_type == 'inventory-deltas':
3986
self._extract_and_insert_inventory_deltas(
3987
substream, src_serializer)
3988
elif substream_type == 'chk_bytes':
3989
# XXX: This doesn't support conversions, as it assumes the
3990
# conversion was done in the fetch code.
3991
self.target_repo.chk_bytes.insert_record_stream(substream)
3992
elif substream_type == 'revisions':
3993
# This may fallback to extract-and-insert more often than
3994
# required if the serializers are different only in terms of
3996
if src_serializer == to_serializer:
3997
self.target_repo.revisions.insert_record_stream(
4000
self._extract_and_insert_revisions(substream,
4002
elif substream_type == 'signatures':
4003
self.target_repo.signatures.insert_record_stream(substream)
4005
raise AssertionError('kaboom! %s' % (substream_type,))
4006
# Done inserting data, and the missing_keys calculations will try to
4007
# read back from the inserted data, so flush the writes to the new pack
4008
# (if this is pack format).
4009
if new_pack is not None:
4010
new_pack._write_data('', flush=True)
4011
# Find all the new revisions (including ones from resume_tokens)
4012
missing_keys = self.target_repo.get_missing_parent_inventories(
4013
check_for_missing_texts=is_resume)
4015
for prefix, versioned_file in (
4016
('texts', self.target_repo.texts),
4017
('inventories', self.target_repo.inventories),
4018
('revisions', self.target_repo.revisions),
4019
('signatures', self.target_repo.signatures),
4020
('chk_bytes', self.target_repo.chk_bytes),
4022
if versioned_file is None:
4024
missing_keys.update((prefix,) + key for key in
4025
versioned_file.get_missing_compression_parent_keys())
4026
except NotImplementedError:
4027
# cannot even attempt suspending, and missing would have failed
4028
# during stream insertion.
4029
missing_keys = set()
4032
# suspend the write group and tell the caller what we is
4033
# missing. We know we can suspend or else we would not have
4034
# entered this code path. (All repositories that can handle
4035
# missing keys can handle suspending a write group).
4036
write_group_tokens = self.target_repo.suspend_write_group()
4037
return write_group_tokens, missing_keys
4038
hint = self.target_repo.commit_write_group()
4039
if (to_serializer != src_serializer and
4040
self.target_repo._format.pack_compresses):
4041
self.target_repo.pack(hint=hint)
4044
def _extract_and_insert_inventory_deltas(self, substream, serializer):
4045
target_rich_root = self.target_repo._format.rich_root_data
4046
target_tree_refs = self.target_repo._format.supports_tree_reference
4047
for record in substream:
4048
# Insert the delta directly
4049
inventory_delta_bytes = record.get_bytes_as('fulltext')
4050
deserialiser = inventory_delta.InventoryDeltaSerializer()
4051
parse_result = deserialiser.parse_text_bytes(inventory_delta_bytes)
4052
basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
4053
#mutter('inv_delta: %r', inv_delta)
4054
# Make sure the delta is compatible with the target
4055
if rich_root and not target_rich_root:
4056
raise errors.IncompatibleRevision(self.target_repo._format)
4057
if tree_refs and not target_tree_refs:
4058
raise errors.IncompatibleRevision(self.target_repo._format)
4059
#revision_id = new_id[0]
4060
revision_id = new_id
4061
parents = [key[0] for key in record.parents]
4062
self.target_repo.add_inventory_by_delta(
4063
basis_id, inv_delta, revision_id, parents)
4065
def _extract_and_insert_inventories(self, substream, serializer,
4067
"""Generate a new inventory versionedfile in target, converting data.
4069
The inventory is retrieved from the source, (deserializing it), and
4070
stored in the target (reserializing it in a different format).
4072
target_rich_root = self.target_repo._format.rich_root_data
4073
target_tree_refs = self.target_repo._format.supports_tree_reference
4074
for record in substream:
4075
# It's not a delta, so it must be a fulltext in the source
4076
# serializer's format.
4077
bytes = record.get_bytes_as('fulltext')
4078
revision_id = record.key[0]
4079
inv = serializer.read_inventory_from_string(bytes, revision_id)
4080
parents = [key[0] for key in record.parents]
4081
self.target_repo.add_inventory(revision_id, inv, parents)
4082
# No need to keep holding this full inv in memory when the rest of
4083
# the substream is likely to be all deltas.
4086
def _extract_and_insert_revisions(self, substream, serializer):
4087
for record in substream:
4088
bytes = record.get_bytes_as('fulltext')
4089
revision_id = record.key[0]
4090
rev = serializer.read_revision_from_string(bytes)
4091
if rev.revision_id != revision_id:
4092
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
4093
self.target_repo.add_revision(revision_id, rev)
4096
if self.target_repo._format._fetch_reconcile:
4097
self.target_repo.reconcile()
4100
class StreamSource(object):
4101
"""A source of a stream for fetching between repositories."""
4103
def __init__(self, from_repository, to_format):
4104
"""Create a StreamSource streaming from from_repository."""
4105
self.from_repository = from_repository
4106
self.to_format = to_format
4108
def delta_on_metadata(self):
4109
"""Return True if delta's are permitted on metadata streams.
4111
That is on revisions and signatures.
4113
src_serializer = self.from_repository._format._serializer
4114
target_serializer = self.to_format._serializer
4115
return (self.to_format._fetch_uses_deltas and
4116
src_serializer == target_serializer)
4118
def _fetch_revision_texts(self, revs):
4119
# fetch signatures first and then the revision texts
4120
# may need to be a InterRevisionStore call here.
4121
from_sf = self.from_repository.signatures
4122
# A missing signature is just skipped.
4123
keys = [(rev_id,) for rev_id in revs]
4124
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
4126
self.to_format._fetch_order,
4127
not self.to_format._fetch_uses_deltas))
4128
# If a revision has a delta, this is actually expanded inside the
4129
# insert_record_stream code now, which is an alternate fix for
4131
from_rf = self.from_repository.revisions
4132
revisions = from_rf.get_record_stream(
4134
self.to_format._fetch_order,
4135
not self.delta_on_metadata())
4136
return [('signatures', signatures), ('revisions', revisions)]
4138
def _generate_root_texts(self, revs):
4139
"""This will be called by get_stream between fetching weave texts and
4140
fetching the inventory weave.
4142
if self._rich_root_upgrade():
4144
return bzrlib.fetch.Inter1and2Helper(
4145
self.from_repository).generate_root_texts(revs)
4149
def get_stream(self, search):
4151
revs = search.get_keys()
4152
graph = self.from_repository.get_graph()
4153
revs = list(graph.iter_topo_order(revs))
4154
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
4156
for knit_kind, file_id, revisions in data_to_fetch:
4157
if knit_kind != phase:
4159
# Make a new progress bar for this phase
4160
if knit_kind == "file":
4161
# Accumulate file texts
4162
text_keys.extend([(file_id, revision) for revision in
4164
elif knit_kind == "inventory":
4165
# Now copy the file texts.
4166
from_texts = self.from_repository.texts
4167
yield ('texts', from_texts.get_record_stream(
4168
text_keys, self.to_format._fetch_order,
4169
not self.to_format._fetch_uses_deltas))
4170
# Cause an error if a text occurs after we have done the
4173
# Before we process the inventory we generate the root
4174
# texts (if necessary) so that the inventories references
4176
for _ in self._generate_root_texts(revs):
4178
# we fetch only the referenced inventories because we do not
4179
# know for unselected inventories whether all their required
4180
# texts are present in the other repository - it could be
4182
for info in self._get_inventory_stream(revs):
4184
elif knit_kind == "signatures":
4185
# Nothing to do here; this will be taken care of when
4186
# _fetch_revision_texts happens.
4188
elif knit_kind == "revisions":
4189
for record in self._fetch_revision_texts(revs):
4192
raise AssertionError("Unknown knit kind %r" % knit_kind)
4194
def get_stream_for_missing_keys(self, missing_keys):
4195
# missing keys can only occur when we are byte copying and not
4196
# translating (because translation means we don't send
4197
# unreconstructable deltas ever).
4199
keys['texts'] = set()
4200
keys['revisions'] = set()
4201
keys['inventories'] = set()
4202
keys['chk_bytes'] = set()
4203
keys['signatures'] = set()
4204
for key in missing_keys:
4205
keys[key[0]].add(key[1:])
4206
if len(keys['revisions']):
4207
# If we allowed copying revisions at this point, we could end up
4208
# copying a revision without copying its required texts: a
4209
# violation of the requirements for repository integrity.
4210
raise AssertionError(
4211
'cannot copy revisions to fill in missing deltas %s' % (
4212
keys['revisions'],))
4213
for substream_kind, keys in keys.iteritems():
4214
vf = getattr(self.from_repository, substream_kind)
4215
if vf is None and keys:
4216
raise AssertionError(
4217
"cannot fill in keys for a versioned file we don't"
4218
" have: %s needs %s" % (substream_kind, keys))
4220
# No need to stream something we don't have
4222
if substream_kind == 'inventory-deltas':
4224
if substream_kind == 'inventories':
4225
# Some missing keys are genuinely ghosts, filter those out.
4226
present = self.from_repository.inventories.get_parent_map(keys)
4227
revs = [key[0] for key in present]
4228
# As with the original stream, we may need to generate root
4229
# texts for the inventories we're about to stream.
4230
for _ in self._generate_root_texts(revs):
4232
# Get the inventory stream more-or-less as we do for the
4233
# original stream; there's no reason to assume that records
4234
# direct from the source will be suitable for the sink. (Think
4235
# e.g. 2a -> 1.9-rich-root).
4236
for info in self._get_inventory_stream(revs, missing=True):
4240
# Ask for full texts always so that we don't need more round trips
4241
# after this stream.
4242
# Some of the missing keys are genuinely ghosts, so filter absent
4243
# records. The Sink is responsible for doing another check to
4244
# ensure that ghosts don't introduce missing data for future
4246
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
4247
self.to_format._fetch_order, True))
4248
yield substream_kind, stream
4250
def inventory_fetch_order(self):
4251
if self._rich_root_upgrade():
4252
return 'topological'
4254
return self.to_format._fetch_order
4256
def _rich_root_upgrade(self):
4257
return (not self.from_repository._format.rich_root_data and
4258
self.to_format.rich_root_data)
4260
def _get_inventory_stream(self, revision_ids, missing=False):
4261
from_format = self.from_repository._format
4262
if (from_format.supports_chks and self.to_format.supports_chks and
4263
from_format.network_name() == self.to_format.network_name()):
4264
raise AssertionError(
4265
"this case should be handled by GroupCHKStreamSource")
4267
# Any time we switch serializations, we want to use an
4268
# inventory-delta based approach.
4269
return self._get_convertable_inventory_stream(revision_ids,
4270
delta_versus_null=missing)
4272
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4273
# NB: This currently reopens the inventory weave in source;
4274
# using a single stream interface instead would avoid this.
4275
from_weave = self.from_repository.inventories
4277
delta_closure = True
4279
delta_closure = not self.delta_on_metadata()
4280
yield ('inventories', from_weave.get_record_stream(
4281
[(rev_id,) for rev_id in revision_ids],
4282
self.inventory_fetch_order(), delta_closure))
4284
def _get_convertable_inventory_stream(self, revision_ids,
4285
delta_versus_null=False):
4286
# The source is using CHKs, but the target either doesn't or is has a
4287
# different serializer. The StreamSink code expects to be able to
4288
# convert on the target, so we need to put bytes-on-the-wire that can
4289
# be converted. That means inventory deltas (if the remote is <1.18,
4290
# RemoteStreamSink will fallback to VFS to insert the deltas).
4291
yield ('inventory-deltas',
4292
self._stream_invs_as_deltas(revision_ids,
4293
delta_versus_null=delta_versus_null))
4295
def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
4296
"""Return a stream of inventory-deltas for the given rev ids.
4298
:param revision_ids: The list of inventories to transmit
4299
:param delta_versus_null: Don't try to find a minimal delta for this
4300
entry, instead compute the delta versus the NULL_REVISION. This
4301
effectively streams a complete inventory. Used for stuff like
4302
filling in missing parents, etc.
4304
from_repo = self.from_repository
4305
revision_keys = [(rev_id,) for rev_id in revision_ids]
4306
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4307
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4309
inventories = self.from_repository.iter_inventories(
4310
revision_ids, 'topological')
4311
# XXX: ideally these flags would be per-revision, not per-repo (e.g.
4312
# streaming a non-rich-root revision out of a rich-root repo back into
4313
# a non-rich-root repo ought to be allowed)
4314
format = from_repo._format
4315
flags = (format.rich_root_data, format.supports_tree_reference)
4316
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4317
inventory_cache = lru_cache.LRUCache(50)
4318
null_inventory = from_repo.revision_tree(
4319
_mod_revision.NULL_REVISION).inventory
4320
serializer = inventory_delta.InventoryDeltaSerializer()
4321
serializer.require_flags(*flags)
4322
for inv in inventories:
4323
key = (inv.revision_id,)
4324
parent_keys = parent_map.get(key, ())
4326
if not delta_versus_null and parent_keys:
4327
# The caller did not ask for complete inventories and we have
4328
# some parents that we can delta against. Make a delta against
4329
# each parent so that we can find the smallest.
4330
parent_ids = [parent_key[0] for parent_key in parent_keys]
4331
for parent_id in parent_ids:
4332
if parent_id not in invs_sent_so_far:
4333
# We don't know that the remote side has this basis, so
4336
if parent_id == _mod_revision.NULL_REVISION:
4337
parent_inv = null_inventory
4339
parent_inv = inventory_cache.get(parent_id, None)
4340
if parent_inv is None:
4341
parent_inv = from_repo.get_inventory(parent_id)
4342
candidate_delta = inv._make_delta(parent_inv)
4343
if (delta is None or
4344
len(delta) > len(candidate_delta)):
4345
delta = candidate_delta
4346
basis_id = parent_id
4348
# Either none of the parents ended up being suitable, or we
4349
# were asked to delta against NULL
4350
basis_id = _mod_revision.NULL_REVISION
4351
delta = inv._make_delta(null_inventory)
4352
invs_sent_so_far.add(inv.revision_id)
4353
inventory_cache[inv.revision_id] = inv
4354
delta_serialized = ''.join(
4355
serializer.delta_to_lines(basis_id, key[-1], delta))
4356
yield versionedfile.FulltextContentFactory(
4357
key, parent_keys, None, delta_serialized)
4360
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4361
stop_revision=None):
4362
"""Extend the partial history to include a given index
4364
If a stop_index is supplied, stop when that index has been reached.
4365
If a stop_revision is supplied, stop when that revision is
4366
encountered. Otherwise, stop when the beginning of history is
4369
:param stop_index: The index which should be present. When it is
4370
present, history extension will stop.
4371
:param stop_revision: The revision id which should be present. When
4372
it is encountered, history extension will stop.
4374
start_revision = partial_history_cache[-1]
4375
iterator = repo.iter_reverse_revision_history(start_revision)
4377
#skip the last revision in the list
4380
if (stop_index is not None and
4381
len(partial_history_cache) > stop_index):
4383
if partial_history_cache[-1] == stop_revision:
4385
revision_id = iterator.next()
4386
partial_history_cache.append(revision_id)
4387
except StopIteration: