1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.testament import Testament
52
from bzrlib.decorators import needs_read_lock, needs_write_lock
53
from bzrlib.inter import InterObject
54
from bzrlib.inventory import (
60
from bzrlib import registry
61
from bzrlib.trace import (
62
log_exception_quietly, note, mutter, mutter_callsite, warning)
65
# Old formats display a warning, but only once
66
_deprecation_warning_done = False
69
class CommitBuilder(object):
70
"""Provides an interface to build up a commit.
72
This allows describing a tree to be committed without needing to
73
know the internals of the format of the repository.
76
# all clients should supply tree roots.
77
record_root_entry = True
78
# the default CommitBuilder does not manage trees whose root is versioned.
79
_versioned_root = False
81
def __init__(self, repository, parents, config, timestamp=None,
82
timezone=None, committer=None, revprops=None,
84
"""Initiate a CommitBuilder.
86
:param repository: Repository to commit to.
87
:param parents: Revision ids of the parents of the new revision.
88
:param config: Configuration to use.
89
:param timestamp: Optional timestamp recorded for commit.
90
:param timezone: Optional timezone for timestamp.
91
:param committer: Optional committer to set for commit.
92
:param revprops: Optional dictionary of revision properties.
93
:param revision_id: Optional revision id.
98
self._committer = self._config.username()
100
self._committer = committer
102
self.new_inventory = Inventory(None)
103
self._new_revision_id = revision_id
104
self.parents = parents
105
self.repository = repository
108
if revprops is not None:
109
self._validate_revprops(revprops)
110
self._revprops.update(revprops)
112
if timestamp is None:
113
timestamp = time.time()
114
# Restrict resolution to 1ms
115
self._timestamp = round(timestamp, 3)
118
self._timezone = osutils.local_time_offset()
120
self._timezone = int(timezone)
122
self._generate_revision_if_needed()
123
self.__heads = graph.HeadsCache(repository.get_graph()).heads
124
self._basis_delta = []
125
# API compatibility, older code that used CommitBuilder did not call
126
# .record_delete(), which means the delta that is computed would not be
127
# valid. Callers that will call record_delete() should call
128
# .will_record_deletes() to indicate that.
129
self._recording_deletes = False
130
# memo'd check for no-op commits.
131
self._any_changes = False
133
def any_changes(self):
134
"""Return True if any entries were changed.
136
This includes merge-only changes. It is the core for the --unchanged
139
:return: True if any changes have occured.
141
return self._any_changes
143
def _validate_unicode_text(self, text, context):
144
"""Verify things like commit messages don't have bogus characters."""
146
raise ValueError('Invalid value for %s: %r' % (context, text))
148
def _validate_revprops(self, revprops):
149
for key, value in revprops.iteritems():
150
# We know that the XML serializers do not round trip '\r'
151
# correctly, so refuse to accept them
152
if not isinstance(value, basestring):
153
raise ValueError('revision property (%s) is not a valid'
154
' (unicode) string: %r' % (key, value))
155
self._validate_unicode_text(value,
156
'revision property (%s)' % (key,))
158
def commit(self, message):
159
"""Make the actual commit.
161
:return: The revision id of the recorded revision.
163
self._validate_unicode_text(message, 'commit message')
164
rev = _mod_revision.Revision(
165
timestamp=self._timestamp,
166
timezone=self._timezone,
167
committer=self._committer,
169
inventory_sha1=self.inv_sha1,
170
revision_id=self._new_revision_id,
171
properties=self._revprops)
172
rev.parent_ids = self.parents
173
self.repository.add_revision(self._new_revision_id, rev,
174
self.new_inventory, self._config)
175
self.repository.commit_write_group()
176
return self._new_revision_id
179
"""Abort the commit that is being built.
181
self.repository.abort_write_group()
183
def revision_tree(self):
184
"""Return the tree that was just committed.
186
After calling commit() this can be called to get a RevisionTree
187
representing the newly committed tree. This is preferred to
188
calling Repository.revision_tree() because that may require
189
deserializing the inventory, while we already have a copy in
192
if self.new_inventory is None:
193
self.new_inventory = self.repository.get_inventory(
194
self._new_revision_id)
195
return RevisionTree(self.repository, self.new_inventory,
196
self._new_revision_id)
198
def finish_inventory(self):
199
"""Tell the builder that the inventory is finished.
201
:return: The inventory id in the repository, which can be used with
202
repository.get_inventory.
204
if self.new_inventory is None:
205
# an inventory delta was accumulated without creating a new
207
basis_id = self.basis_delta_revision
208
self.inv_sha1 = self.repository.add_inventory_by_delta(
209
basis_id, self._basis_delta, self._new_revision_id,
212
if self.new_inventory.root is None:
213
raise AssertionError('Root entry should be supplied to'
214
' record_entry_contents, as of bzr 0.10.')
215
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
216
self.new_inventory.revision_id = self._new_revision_id
217
self.inv_sha1 = self.repository.add_inventory(
218
self._new_revision_id,
222
return self._new_revision_id
224
def _gen_revision_id(self):
225
"""Return new revision-id."""
226
return generate_ids.gen_revision_id(self._config.username(),
229
def _generate_revision_if_needed(self):
230
"""Create a revision id if None was supplied.
232
If the repository can not support user-specified revision ids
233
they should override this function and raise CannotSetRevisionId
234
if _new_revision_id is not None.
236
:raises: CannotSetRevisionId
238
if self._new_revision_id is None:
239
self._new_revision_id = self._gen_revision_id()
240
self.random_revid = True
242
self.random_revid = False
244
def _heads(self, file_id, revision_ids):
245
"""Calculate the graph heads for revision_ids in the graph of file_id.
247
This can use either a per-file graph or a global revision graph as we
248
have an identity relationship between the two graphs.
250
return self.__heads(revision_ids)
252
def _check_root(self, ie, parent_invs, tree):
253
"""Helper for record_entry_contents.
255
:param ie: An entry being added.
256
:param parent_invs: The inventories of the parent revisions of the
258
:param tree: The tree that is being committed.
260
# In this revision format, root entries have no knit or weave When
261
# serializing out to disk and back in root.revision is always
263
ie.revision = self._new_revision_id
265
def _require_root_change(self, tree):
266
"""Enforce an appropriate root object change.
268
This is called once when record_iter_changes is called, if and only if
269
the root was not in the delta calculated by record_iter_changes.
271
:param tree: The tree which is being committed.
273
# NB: if there are no parents then this method is not called, so no
274
# need to guard on parents having length.
275
entry = entry_factory['directory'](tree.path2id(''), '',
277
entry.revision = self._new_revision_id
278
self._basis_delta.append(('', '', entry.file_id, entry))
280
def _get_delta(self, ie, basis_inv, path):
281
"""Get a delta against the basis inventory for ie."""
282
if ie.file_id not in basis_inv:
284
result = (None, path, ie.file_id, ie)
285
self._basis_delta.append(result)
287
elif ie != basis_inv[ie.file_id]:
289
# TODO: avoid tis id2path call.
290
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
291
self._basis_delta.append(result)
297
def get_basis_delta(self):
298
"""Return the complete inventory delta versus the basis inventory.
300
This has been built up with the calls to record_delete and
301
record_entry_contents. The client must have already called
302
will_record_deletes() to indicate that they will be generating a
305
:return: An inventory delta, suitable for use with apply_delta, or
306
Repository.add_inventory_by_delta, etc.
308
if not self._recording_deletes:
309
raise AssertionError("recording deletes not activated.")
310
return self._basis_delta
312
def record_delete(self, path, file_id):
313
"""Record that a delete occured against a basis tree.
315
This is an optional API - when used it adds items to the basis_delta
316
being accumulated by the commit builder. It cannot be called unless the
317
method will_record_deletes() has been called to inform the builder that
318
a delta is being supplied.
320
:param path: The path of the thing deleted.
321
:param file_id: The file id that was deleted.
323
if not self._recording_deletes:
324
raise AssertionError("recording deletes not activated.")
325
delta = (path, None, file_id, None)
326
self._basis_delta.append(delta)
327
self._any_changes = True
330
def will_record_deletes(self):
331
"""Tell the commit builder that deletes are being notified.
333
This enables the accumulation of an inventory delta; for the resulting
334
commit to be valid, deletes against the basis MUST be recorded via
335
builder.record_delete().
337
self._recording_deletes = True
339
basis_id = self.parents[0]
341
basis_id = _mod_revision.NULL_REVISION
342
self.basis_delta_revision = basis_id
344
def record_entry_contents(self, ie, parent_invs, path, tree,
346
"""Record the content of ie from tree into the commit if needed.
348
Side effect: sets ie.revision when unchanged
350
:param ie: An inventory entry present in the commit.
351
:param parent_invs: The inventories of the parent revisions of the
353
:param path: The path the entry is at in the tree.
354
:param tree: The tree which contains this entry and should be used to
356
:param content_summary: Summary data from the tree about the paths
357
content - stat, length, exec, sha/link target. This is only
358
accessed when the entry has a revision of None - that is when it is
359
a candidate to commit.
360
:return: A tuple (change_delta, version_recorded, fs_hash).
361
change_delta is an inventory_delta change for this entry against
362
the basis tree of the commit, or None if no change occured against
364
version_recorded is True if a new version of the entry has been
365
recorded. For instance, committing a merge where a file was only
366
changed on the other side will return (delta, False).
367
fs_hash is either None, or the hash details for the path (currently
368
a tuple of the contents sha1 and the statvalue returned by
369
tree.get_file_with_stat()).
371
if self.new_inventory.root is None:
372
if ie.parent_id is not None:
373
raise errors.RootMissing()
374
self._check_root(ie, parent_invs, tree)
375
if ie.revision is None:
376
kind = content_summary[0]
378
# ie is carried over from a prior commit
380
# XXX: repository specific check for nested tree support goes here - if
381
# the repo doesn't want nested trees we skip it ?
382
if (kind == 'tree-reference' and
383
not self.repository._format.supports_tree_reference):
384
# mismatch between commit builder logic and repository:
385
# this needs the entry creation pushed down into the builder.
386
raise NotImplementedError('Missing repository subtree support.')
387
self.new_inventory.add(ie)
389
# TODO: slow, take it out of the inner loop.
391
basis_inv = parent_invs[0]
393
basis_inv = Inventory(root_id=None)
395
# ie.revision is always None if the InventoryEntry is considered
396
# for committing. We may record the previous parents revision if the
397
# content is actually unchanged against a sole head.
398
if ie.revision is not None:
399
if not self._versioned_root and path == '':
400
# repositories that do not version the root set the root's
401
# revision to the new commit even when no change occurs (more
402
# specifically, they do not record a revision on the root; and
403
# the rev id is assigned to the root during deserialisation -
404
# this masks when a change may have occurred against the basis.
405
# To match this we always issue a delta, because the revision
406
# of the root will always be changing.
407
if ie.file_id in basis_inv:
408
delta = (basis_inv.id2path(ie.file_id), path,
412
delta = (None, path, ie.file_id, ie)
413
self._basis_delta.append(delta)
414
return delta, False, None
416
# we don't need to commit this, because the caller already
417
# determined that an existing revision of this file is
418
# appropriate. If its not being considered for committing then
419
# it and all its parents to the root must be unaltered so
420
# no-change against the basis.
421
if ie.revision == self._new_revision_id:
422
raise AssertionError("Impossible situation, a skipped "
423
"inventory entry (%r) claims to be modified in this "
424
"commit (%r).", (ie, self._new_revision_id))
425
return None, False, None
426
# XXX: Friction: parent_candidates should return a list not a dict
427
# so that we don't have to walk the inventories again.
428
parent_candiate_entries = ie.parent_candidates(parent_invs)
429
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
431
for inv in parent_invs:
432
if ie.file_id in inv:
433
old_rev = inv[ie.file_id].revision
434
if old_rev in head_set:
435
heads.append(inv[ie.file_id].revision)
436
head_set.remove(inv[ie.file_id].revision)
439
# now we check to see if we need to write a new record to the
441
# We write a new entry unless there is one head to the ancestors, and
442
# the kind-derived content is unchanged.
444
# Cheapest check first: no ancestors, or more the one head in the
445
# ancestors, we write a new node.
449
# There is a single head, look it up for comparison
450
parent_entry = parent_candiate_entries[heads[0]]
451
# if the non-content specific data has changed, we'll be writing a
453
if (parent_entry.parent_id != ie.parent_id or
454
parent_entry.name != ie.name):
456
# now we need to do content specific checks:
458
# if the kind changed the content obviously has
459
if kind != parent_entry.kind:
461
# Stat cache fingerprint feedback for the caller - None as we usually
462
# don't generate one.
465
if content_summary[2] is None:
466
raise ValueError("Files must not have executable = None")
468
if (# if the file length changed we have to store:
469
parent_entry.text_size != content_summary[1] or
470
# if the exec bit has changed we have to store:
471
parent_entry.executable != content_summary[2]):
473
elif parent_entry.text_sha1 == content_summary[3]:
474
# all meta and content is unchanged (using a hash cache
475
# hit to check the sha)
476
ie.revision = parent_entry.revision
477
ie.text_size = parent_entry.text_size
478
ie.text_sha1 = parent_entry.text_sha1
479
ie.executable = parent_entry.executable
480
return self._get_delta(ie, basis_inv, path), False, None
482
# Either there is only a hash change(no hash cache entry,
483
# or same size content change), or there is no change on
485
# Provide the parent's hash to the store layer, so that the
486
# content is unchanged we will not store a new node.
487
nostore_sha = parent_entry.text_sha1
489
# We want to record a new node regardless of the presence or
490
# absence of a content change in the file.
492
ie.executable = content_summary[2]
493
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
495
text = file_obj.read()
499
ie.text_sha1, ie.text_size = self._add_text_to_weave(
500
ie.file_id, text, heads, nostore_sha)
501
# Let the caller know we generated a stat fingerprint.
502
fingerprint = (ie.text_sha1, stat_value)
503
except errors.ExistingContent:
504
# Turns out that the file content was unchanged, and we were
505
# only going to store a new node if it was changed. Carry over
507
ie.revision = parent_entry.revision
508
ie.text_size = parent_entry.text_size
509
ie.text_sha1 = parent_entry.text_sha1
510
ie.executable = parent_entry.executable
511
return self._get_delta(ie, basis_inv, path), False, None
512
elif kind == 'directory':
514
# all data is meta here, nothing specific to directory, so
516
ie.revision = parent_entry.revision
517
return self._get_delta(ie, basis_inv, path), False, None
518
self._add_text_to_weave(ie.file_id, '', heads, None)
519
elif kind == 'symlink':
520
current_link_target = content_summary[3]
522
# symlink target is not generic metadata, check if it has
524
if current_link_target != parent_entry.symlink_target:
527
# unchanged, carry over.
528
ie.revision = parent_entry.revision
529
ie.symlink_target = parent_entry.symlink_target
530
return self._get_delta(ie, basis_inv, path), False, None
531
ie.symlink_target = current_link_target
532
self._add_text_to_weave(ie.file_id, '', heads, None)
533
elif kind == 'tree-reference':
535
if content_summary[3] != parent_entry.reference_revision:
538
# unchanged, carry over.
539
ie.reference_revision = parent_entry.reference_revision
540
ie.revision = parent_entry.revision
541
return self._get_delta(ie, basis_inv, path), False, None
542
ie.reference_revision = content_summary[3]
543
self._add_text_to_weave(ie.file_id, '', heads, None)
545
raise NotImplementedError('unknown kind')
546
ie.revision = self._new_revision_id
547
self._any_changes = True
548
return self._get_delta(ie, basis_inv, path), True, fingerprint
550
def record_iter_changes(self, tree, basis_revision_id, iter_changes,
551
_entry_factory=entry_factory):
552
"""Record a new tree via iter_changes.
554
:param tree: The tree to obtain text contents from for changed objects.
555
:param basis_revision_id: The revision id of the tree the iter_changes
556
has been generated against. Currently assumed to be the same
557
as self.parents[0] - if it is not, errors may occur.
558
:param iter_changes: An iter_changes iterator with the changes to apply
559
to basis_revision_id. The iterator must not include any items with
560
a current kind of None - missing items must be either filtered out
561
or errored-on beefore record_iter_changes sees the item.
562
:param _entry_factory: Private method to bind entry_factory locally for
564
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
567
# Create an inventory delta based on deltas between all the parents and
568
# deltas between all the parent inventories. We use inventory delta's
569
# between the inventory objects because iter_changes masks
570
# last-changed-field only changes.
572
# file_id -> change map, change is fileid, paths, changed, versioneds,
573
# parents, names, kinds, executables
575
# {file_id -> revision_id -> inventory entry, for entries in parent
576
# trees that are not parents[0]
580
revtrees = list(self.repository.revision_trees(self.parents))
581
except errors.NoSuchRevision:
582
# one or more ghosts, slow path.
584
for revision_id in self.parents:
586
revtrees.append(self.repository.revision_tree(revision_id))
587
except errors.NoSuchRevision:
589
basis_revision_id = _mod_revision.NULL_REVISION
591
revtrees.append(self.repository.revision_tree(
592
_mod_revision.NULL_REVISION))
593
# The basis inventory from a repository
595
basis_inv = revtrees[0].inventory
597
basis_inv = self.repository.revision_tree(
598
_mod_revision.NULL_REVISION).inventory
599
if len(self.parents) > 0:
600
if basis_revision_id != self.parents[0] and not ghost_basis:
602
"arbitrary basis parents not yet supported with merges")
603
for revtree in revtrees[1:]:
604
for change in revtree.inventory._make_delta(basis_inv):
605
if change[1] is None:
606
# Not present in this parent.
608
if change[2] not in merged_ids:
609
if change[0] is not None:
610
basis_entry = basis_inv[change[2]]
611
merged_ids[change[2]] = [
613
basis_entry.revision,
616
parent_entries[change[2]] = {
618
basis_entry.revision:basis_entry,
620
change[3].revision:change[3],
623
merged_ids[change[2]] = [change[3].revision]
624
parent_entries[change[2]] = {change[3].revision:change[3]}
626
merged_ids[change[2]].append(change[3].revision)
627
parent_entries[change[2]][change[3].revision] = change[3]
630
# Setup the changes from the tree:
631
# changes maps file_id -> (change, [parent revision_ids])
633
for change in iter_changes:
634
# This probably looks up in basis_inv way to much.
635
if change[1][0] is not None:
636
head_candidate = [basis_inv[change[0]].revision]
639
changes[change[0]] = change, merged_ids.get(change[0],
641
unchanged_merged = set(merged_ids) - set(changes)
642
# Extend the changes dict with synthetic changes to record merges of
644
for file_id in unchanged_merged:
645
# Record a merged version of these items that did not change vs the
646
# basis. This can be either identical parallel changes, or a revert
647
# of a specific file after a merge. The recorded content will be
648
# that of the current tree (which is the same as the basis), but
649
# the per-file graph will reflect a merge.
650
# NB:XXX: We are reconstructing path information we had, this
651
# should be preserved instead.
652
# inv delta change: (file_id, (path_in_source, path_in_target),
653
# changed_content, versioned, parent, name, kind,
656
basis_entry = basis_inv[file_id]
657
except errors.NoSuchId:
658
# a change from basis->some_parents but file_id isn't in basis
659
# so was new in the merge, which means it must have changed
660
# from basis -> current, and as it hasn't the add was reverted
661
# by the user. So we discard this change.
665
(basis_inv.id2path(file_id), tree.id2path(file_id)),
667
(basis_entry.parent_id, basis_entry.parent_id),
668
(basis_entry.name, basis_entry.name),
669
(basis_entry.kind, basis_entry.kind),
670
(basis_entry.executable, basis_entry.executable))
671
changes[file_id] = (change, merged_ids[file_id])
672
# changes contains tuples with the change and a set of inventory
673
# candidates for the file.
675
# old_path, new_path, file_id, new_inventory_entry
676
seen_root = False # Is the root in the basis delta?
677
inv_delta = self._basis_delta
678
modified_rev = self._new_revision_id
679
for change, head_candidates in changes.values():
680
if change[3][1]: # versioned in target.
681
# Several things may be happening here:
682
# We may have a fork in the per-file graph
683
# - record a change with the content from tree
684
# We may have a change against < all trees
685
# - carry over the tree that hasn't changed
686
# We may have a change against all trees
687
# - record the change with the content from tree
690
entry = _entry_factory[kind](file_id, change[5][1],
692
head_set = self._heads(change[0], set(head_candidates))
695
for head_candidate in head_candidates:
696
if head_candidate in head_set:
697
heads.append(head_candidate)
698
head_set.remove(head_candidate)
701
# Could be a carry-over situation:
702
parent_entry_revs = parent_entries.get(file_id, None)
703
if parent_entry_revs:
704
parent_entry = parent_entry_revs.get(heads[0], None)
707
if parent_entry is None:
708
# The parent iter_changes was called against is the one
709
# that is the per-file head, so any change is relevant
710
# iter_changes is valid.
711
carry_over_possible = False
713
# could be a carry over situation
714
# A change against the basis may just indicate a merge,
715
# we need to check the content against the source of the
716
# merge to determine if it was changed after the merge
718
if (parent_entry.kind != entry.kind or
719
parent_entry.parent_id != entry.parent_id or
720
parent_entry.name != entry.name):
721
# Metadata common to all entries has changed
722
# against per-file parent
723
carry_over_possible = False
725
carry_over_possible = True
726
# per-type checks for changes against the parent_entry
729
# Cannot be a carry-over situation
730
carry_over_possible = False
731
# Populate the entry in the delta
733
# XXX: There is still a small race here: If someone reverts the content of a file
734
# after iter_changes examines and decides it has changed,
735
# we will unconditionally record a new version even if some
736
# other process reverts it while commit is running (with
737
# the revert happening after iter_changes did it's
740
entry.executable = True
742
entry.executable = False
743
if (carry_over_possible and
744
parent_entry.executable == entry.executable):
745
# Check the file length, content hash after reading
747
nostore_sha = parent_entry.text_sha1
750
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
752
text = file_obj.read()
756
entry.text_sha1, entry.text_size = self._add_text_to_weave(
757
file_id, text, heads, nostore_sha)
758
yield file_id, change[1][1], (entry.text_sha1, stat_value)
759
except errors.ExistingContent:
760
# No content change against a carry_over parent
761
# Perhaps this should also yield a fs hash update?
763
entry.text_size = parent_entry.text_size
764
entry.text_sha1 = parent_entry.text_sha1
765
elif kind == 'symlink':
767
entry.symlink_target = tree.get_symlink_target(file_id)
768
if (carry_over_possible and
769
parent_entry.symlink_target == entry.symlink_target):
772
self._add_text_to_weave(change[0], '', heads, None)
773
elif kind == 'directory':
774
if carry_over_possible:
777
# Nothing to set on the entry.
778
# XXX: split into the Root and nonRoot versions.
779
if change[1][1] != '' or self.repository.supports_rich_root():
780
self._add_text_to_weave(change[0], '', heads, None)
781
elif kind == 'tree-reference':
782
if not self.repository._format.supports_tree_reference:
783
# This isn't quite sane as an error, but we shouldn't
784
# ever see this code path in practice: tree's don't
785
# permit references when the repo doesn't support tree
787
raise errors.UnsupportedOperation(tree.add_reference,
789
reference_revision = tree.get_reference_revision(change[0])
790
entry.reference_revision = reference_revision
791
if (carry_over_possible and
792
parent_entry.reference_revision == reference_revision):
795
self._add_text_to_weave(change[0], '', heads, None)
797
raise AssertionError('unknown kind %r' % kind)
799
entry.revision = modified_rev
801
entry.revision = parent_entry.revision
804
new_path = change[1][1]
805
inv_delta.append((change[1][0], new_path, change[0], entry))
808
self.new_inventory = None
810
self._any_changes = True
812
# housekeeping root entry changes do not affect no-change commits.
813
self._require_root_change(tree)
814
self.basis_delta_revision = basis_revision_id
816
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
817
parent_keys = tuple([(file_id, parent) for parent in parents])
818
return self.repository.texts._add_text(
819
(file_id, self._new_revision_id), parent_keys, new_text,
820
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
823
class RootCommitBuilder(CommitBuilder):
824
"""This commitbuilder actually records the root id"""
826
# the root entry gets versioned properly by this builder.
827
_versioned_root = True
829
def _check_root(self, ie, parent_invs, tree):
830
"""Helper for record_entry_contents.
832
:param ie: An entry being added.
833
:param parent_invs: The inventories of the parent revisions of the
835
:param tree: The tree that is being committed.
838
def _require_root_change(self, tree):
839
"""Enforce an appropriate root object change.
841
This is called once when record_iter_changes is called, if and only if
842
the root was not in the delta calculated by record_iter_changes.
844
:param tree: The tree which is being committed.
846
# versioned roots do not change unless the tree found a change.
849
######################################################################
852
class Repository(object):
853
"""Repository holding history for one or more branches.
855
The repository holds and retrieves historical information including
856
revisions and file history. It's normally accessed only by the Branch,
857
which views a particular line of development through that history.
859
The Repository builds on top of some byte storage facilies (the revisions,
860
signatures, inventories, texts and chk_bytes attributes) and a Transport,
861
which respectively provide byte storage and a means to access the (possibly
864
The byte storage facilities are addressed via tuples, which we refer to
865
as 'keys' throughout the code base. Revision_keys, inventory_keys and
866
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
867
(file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
868
byte string made up of a hash identifier and a hash value.
869
We use this interface because it allows low friction with the underlying
870
code that implements disk indices, network encoding and other parts of
873
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
874
the serialised revisions for the repository. This can be used to obtain
875
revision graph information or to access raw serialised revisions.
876
The result of trying to insert data into the repository via this store
877
is undefined: it should be considered read-only except for implementors
879
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
880
the serialised signatures for the repository. This can be used to
881
obtain access to raw serialised signatures. The result of trying to
882
insert data into the repository via this store is undefined: it should
883
be considered read-only except for implementors of repositories.
884
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
885
the serialised inventories for the repository. This can be used to
886
obtain unserialised inventories. The result of trying to insert data
887
into the repository via this store is undefined: it should be
888
considered read-only except for implementors of repositories.
889
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
890
texts of files and directories for the repository. This can be used to
891
obtain file texts or file graphs. Note that Repository.iter_file_bytes
892
is usually a better interface for accessing file texts.
893
The result of trying to insert data into the repository via this store
894
is undefined: it should be considered read-only except for implementors
896
:ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
897
any data the repository chooses to store or have indexed by its hash.
898
The result of trying to insert data into the repository via this store
899
is undefined: it should be considered read-only except for implementors
901
:ivar _transport: Transport for file access to repository, typically
902
pointing to .bzr/repository.
905
# What class to use for a CommitBuilder. Often its simpler to change this
906
# in a Repository class subclass rather than to override
907
# get_commit_builder.
908
_commit_builder_class = CommitBuilder
909
# The search regex used by xml based repositories to determine what things
910
# where changed in a single commit.
911
_file_ids_altered_regex = lazy_regex.lazy_compile(
912
r'file_id="(?P<file_id>[^"]+)"'
913
r'.* revision="(?P<revision_id>[^"]+)"'
916
def abort_write_group(self, suppress_errors=False):
917
"""Commit the contents accrued within the current write group.
919
:param suppress_errors: if true, abort_write_group will catch and log
920
unexpected errors that happen during the abort, rather than
921
allowing them to propagate. Defaults to False.
923
:seealso: start_write_group.
925
if self._write_group is not self.get_transaction():
926
# has an unlock or relock occured ?
929
'(suppressed) mismatched lock context and write group. %r, %r',
930
self._write_group, self.get_transaction())
932
raise errors.BzrError(
933
'mismatched lock context and write group. %r, %r' %
934
(self._write_group, self.get_transaction()))
936
self._abort_write_group()
937
except Exception, exc:
938
self._write_group = None
939
if not suppress_errors:
941
mutter('abort_write_group failed')
942
log_exception_quietly()
943
note('bzr: ERROR (ignored): %s', exc)
944
self._write_group = None
946
def _abort_write_group(self):
947
"""Template method for per-repository write group cleanup.
949
This is called during abort before the write group is considered to be
950
finished and should cleanup any internal state accrued during the write
951
group. There is no requirement that data handed to the repository be
952
*not* made available - this is not a rollback - but neither should any
953
attempt be made to ensure that data added is fully commited. Abort is
954
invoked when an error has occured so futher disk or network operations
955
may not be possible or may error and if possible should not be
959
def add_fallback_repository(self, repository):
960
"""Add a repository to use for looking up data not held locally.
962
:param repository: A repository.
964
if not self._format.supports_external_lookups:
965
raise errors.UnstackableRepositoryFormat(self._format, self.base)
967
# This repository will call fallback.unlock() when we transition to
968
# the unlocked state, so we make sure to increment the lock count
969
repository.lock_read()
970
self._check_fallback_repository(repository)
971
self._fallback_repositories.append(repository)
972
self.texts.add_fallback_versioned_files(repository.texts)
973
self.inventories.add_fallback_versioned_files(repository.inventories)
974
self.revisions.add_fallback_versioned_files(repository.revisions)
975
self.signatures.add_fallback_versioned_files(repository.signatures)
976
if self.chk_bytes is not None:
977
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
979
def _check_fallback_repository(self, repository):
980
"""Check that this repository can fallback to repository safely.
982
Raise an error if not.
984
:param repository: A repository to fallback to.
986
return InterRepository._assert_same_model(self, repository)
988
def add_inventory(self, revision_id, inv, parents):
989
"""Add the inventory inv to the repository as revision_id.
991
:param parents: The revision ids of the parents that revision_id
992
is known to have and are in the repository already.
994
:returns: The validator(which is a sha1 digest, though what is sha'd is
995
repository format specific) of the serialized inventory.
997
if not self.is_in_write_group():
998
raise AssertionError("%r not in write group" % (self,))
999
_mod_revision.check_not_reserved_id(revision_id)
1000
if not (inv.revision_id is None or inv.revision_id == revision_id):
1001
raise AssertionError(
1002
"Mismatch between inventory revision"
1003
" id and insertion revid (%r, %r)"
1004
% (inv.revision_id, revision_id))
1005
if inv.root is None:
1006
raise AssertionError()
1007
return self._add_inventory_checked(revision_id, inv, parents)
1009
def _add_inventory_checked(self, revision_id, inv, parents):
1010
"""Add inv to the repository after checking the inputs.
1012
This function can be overridden to allow different inventory styles.
1014
:seealso: add_inventory, for the contract.
1016
inv_lines = self._serialise_inventory_to_lines(inv)
1017
return self._inventory_add_lines(revision_id, parents,
1018
inv_lines, check_content=False)
1020
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
1021
parents, basis_inv=None, propagate_caches=False):
1022
"""Add a new inventory expressed as a delta against another revision.
1024
See the inventory developers documentation for the theory behind
1027
:param basis_revision_id: The inventory id the delta was created
1028
against. (This does not have to be a direct parent.)
1029
:param delta: The inventory delta (see Inventory.apply_delta for
1031
:param new_revision_id: The revision id that the inventory is being
1033
:param parents: The revision ids of the parents that revision_id is
1034
known to have and are in the repository already. These are supplied
1035
for repositories that depend on the inventory graph for revision
1036
graph access, as well as for those that pun ancestry with delta
1038
:param basis_inv: The basis inventory if it is already known,
1040
:param propagate_caches: If True, the caches for this inventory are
1041
copied to and updated for the result if possible.
1043
:returns: (validator, new_inv)
1044
The validator(which is a sha1 digest, though what is sha'd is
1045
repository format specific) of the serialized inventory, and the
1046
resulting inventory.
1048
if not self.is_in_write_group():
1049
raise AssertionError("%r not in write group" % (self,))
1050
_mod_revision.check_not_reserved_id(new_revision_id)
1051
basis_tree = self.revision_tree(basis_revision_id)
1052
basis_tree.lock_read()
1054
# Note that this mutates the inventory of basis_tree, which not all
1055
# inventory implementations may support: A better idiom would be to
1056
# return a new inventory, but as there is no revision tree cache in
1057
# repository this is safe for now - RBC 20081013
1058
if basis_inv is None:
1059
basis_inv = basis_tree.inventory
1060
basis_inv.apply_delta(delta)
1061
basis_inv.revision_id = new_revision_id
1062
return (self.add_inventory(new_revision_id, basis_inv, parents),
1067
def _inventory_add_lines(self, revision_id, parents, lines,
1068
check_content=True):
1069
"""Store lines in inv_vf and return the sha1 of the inventory."""
1070
parents = [(parent,) for parent in parents]
1071
return self.inventories.add_lines((revision_id,), parents, lines,
1072
check_content=check_content)[0]
1074
def add_revision(self, revision_id, rev, inv=None, config=None):
1075
"""Add rev to the revision store as revision_id.
1077
:param revision_id: the revision id to use.
1078
:param rev: The revision object.
1079
:param inv: The inventory for the revision. if None, it will be looked
1080
up in the inventory storer
1081
:param config: If None no digital signature will be created.
1082
If supplied its signature_needed method will be used
1083
to determine if a signature should be made.
1085
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
1087
_mod_revision.check_not_reserved_id(revision_id)
1088
if config is not None and config.signature_needed():
1090
inv = self.get_inventory(revision_id)
1091
plaintext = Testament(rev, inv).as_short_text()
1092
self.store_revision_signature(
1093
gpg.GPGStrategy(config), plaintext, revision_id)
1094
# check inventory present
1095
if not self.inventories.get_parent_map([(revision_id,)]):
1097
raise errors.WeaveRevisionNotPresent(revision_id,
1100
# yes, this is not suitable for adding with ghosts.
1101
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1104
key = (revision_id,)
1105
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1106
self._add_revision(rev)
1108
def _add_revision(self, revision):
1109
text = self._serializer.write_revision_to_string(revision)
1110
key = (revision.revision_id,)
1111
parents = tuple((parent,) for parent in revision.parent_ids)
1112
self.revisions.add_lines(key, parents, osutils.split_lines(text))
1114
def all_revision_ids(self):
1115
"""Returns a list of all the revision ids in the repository.
1117
This is conceptually deprecated because code should generally work on
1118
the graph reachable from a particular revision, and ignore any other
1119
revisions that might be present. There is no direct replacement
1122
if 'evil' in debug.debug_flags:
1123
mutter_callsite(2, "all_revision_ids is linear with history.")
1124
return self._all_revision_ids()
1126
def _all_revision_ids(self):
1127
"""Returns a list of all the revision ids in the repository.
1129
These are in as much topological order as the underlying store can
1132
raise NotImplementedError(self._all_revision_ids)
1134
def break_lock(self):
1135
"""Break a lock if one is present from another instance.
1137
Uses the ui factory to ask for confirmation if the lock may be from
1140
self.control_files.break_lock()
1143
def _eliminate_revisions_not_present(self, revision_ids):
1144
"""Check every revision id in revision_ids to see if we have it.
1146
Returns a set of the present revisions.
1149
graph = self.get_graph()
1150
parent_map = graph.get_parent_map(revision_ids)
1151
# The old API returned a list, should this actually be a set?
1152
return parent_map.keys()
1155
def create(a_bzrdir):
1156
"""Construct the current default format repository in a_bzrdir."""
1157
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
1159
def __init__(self, _format, a_bzrdir, control_files):
1160
"""instantiate a Repository.
1162
:param _format: The format of the repository on disk.
1163
:param a_bzrdir: The BzrDir of the repository.
1165
In the future we will have a single api for all stores for
1166
getting file texts, inventories and revisions, then
1167
this construct will accept instances of those things.
1169
super(Repository, self).__init__()
1170
self._format = _format
1171
# the following are part of the public API for Repository:
1172
self.bzrdir = a_bzrdir
1173
self.control_files = control_files
1174
self._transport = control_files._transport
1175
self.base = self._transport.base
1177
self._reconcile_does_inventory_gc = True
1178
self._reconcile_fixes_text_parents = False
1179
self._reconcile_backsup_inventory = True
1180
# not right yet - should be more semantically clear ?
1182
# TODO: make sure to construct the right store classes, etc, depending
1183
# on whether escaping is required.
1184
self._warn_if_deprecated()
1185
self._write_group = None
1186
# Additional places to query for data.
1187
self._fallback_repositories = []
1188
# An InventoryEntry cache, used during deserialization
1189
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1192
return '%s(%r)' % (self.__class__.__name__,
1195
def has_same_location(self, other):
1196
"""Returns a boolean indicating if this repository is at the same
1197
location as another repository.
1199
This might return False even when two repository objects are accessing
1200
the same physical repository via different URLs.
1202
if self.__class__ is not other.__class__:
1204
return (self._transport.base == other._transport.base)
1206
def is_in_write_group(self):
1207
"""Return True if there is an open write group.
1209
:seealso: start_write_group.
1211
return self._write_group is not None
1213
def is_locked(self):
1214
return self.control_files.is_locked()
1216
def is_write_locked(self):
1217
"""Return True if this object is write locked."""
1218
return self.is_locked() and self.control_files._lock_mode == 'w'
1220
def lock_write(self, token=None):
1221
"""Lock this repository for writing.
1223
This causes caching within the repository obejct to start accumlating
1224
data during reads, and allows a 'write_group' to be obtained. Write
1225
groups must be used for actual data insertion.
1227
:param token: if this is already locked, then lock_write will fail
1228
unless the token matches the existing lock.
1229
:returns: a token if this instance supports tokens, otherwise None.
1230
:raises TokenLockingNotSupported: when a token is given but this
1231
instance doesn't support using token locks.
1232
:raises MismatchedToken: if the specified token doesn't match the token
1233
of the existing lock.
1234
:seealso: start_write_group.
1236
A token should be passed in if you know that you have locked the object
1237
some other way, and need to synchronise this object's state with that
1240
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1242
locked = self.is_locked()
1243
result = self.control_files.lock_write(token=token)
1245
for repo in self._fallback_repositories:
1246
# Writes don't affect fallback repos
1248
self._refresh_data()
1251
def lock_read(self):
1252
locked = self.is_locked()
1253
self.control_files.lock_read()
1255
for repo in self._fallback_repositories:
1257
self._refresh_data()
1259
def get_physical_lock_status(self):
1260
return self.control_files.get_physical_lock_status()
1262
def leave_lock_in_place(self):
1263
"""Tell this repository not to release the physical lock when this
1266
If lock_write doesn't return a token, then this method is not supported.
1268
self.control_files.leave_in_place()
1270
def dont_leave_lock_in_place(self):
1271
"""Tell this repository to release the physical lock when this
1272
object is unlocked, even if it didn't originally acquire it.
1274
If lock_write doesn't return a token, then this method is not supported.
1276
self.control_files.dont_leave_in_place()
1279
def gather_stats(self, revid=None, committers=None):
1280
"""Gather statistics from a revision id.
1282
:param revid: The revision id to gather statistics from, if None, then
1283
no revision specific statistics are gathered.
1284
:param committers: Optional parameter controlling whether to grab
1285
a count of committers from the revision specific statistics.
1286
:return: A dictionary of statistics. Currently this contains:
1287
committers: The number of committers if requested.
1288
firstrev: A tuple with timestamp, timezone for the penultimate left
1289
most ancestor of revid, if revid is not the NULL_REVISION.
1290
latestrev: A tuple with timestamp, timezone for revid, if revid is
1291
not the NULL_REVISION.
1292
revisions: The total revision count in the repository.
1293
size: An estimate disk size of the repository in bytes.
1296
if revid and committers:
1297
result['committers'] = 0
1298
if revid and revid != _mod_revision.NULL_REVISION:
1300
all_committers = set()
1301
revisions = self.get_ancestry(revid)
1302
# pop the leading None
1304
first_revision = None
1306
# ignore the revisions in the middle - just grab first and last
1307
revisions = revisions[0], revisions[-1]
1308
for revision in self.get_revisions(revisions):
1309
if not first_revision:
1310
first_revision = revision
1312
all_committers.add(revision.committer)
1313
last_revision = revision
1315
result['committers'] = len(all_committers)
1316
result['firstrev'] = (first_revision.timestamp,
1317
first_revision.timezone)
1318
result['latestrev'] = (last_revision.timestamp,
1319
last_revision.timezone)
1321
# now gather global repository information
1322
# XXX: This is available for many repos regardless of listability.
1323
if self.bzrdir.root_transport.listable():
1324
# XXX: do we want to __define len__() ?
1325
# Maybe the versionedfiles object should provide a different
1326
# method to get the number of keys.
1327
result['revisions'] = len(self.revisions.keys())
1328
# result['size'] = t
1331
def find_branches(self, using=False):
1332
"""Find branches underneath this repository.
1334
This will include branches inside other branches.
1336
:param using: If True, list only branches using this repository.
1338
if using and not self.is_shared():
1340
return [self.bzrdir.open_branch()]
1341
except errors.NotBranchError:
1343
class Evaluator(object):
1346
self.first_call = True
1348
def __call__(self, bzrdir):
1349
# On the first call, the parameter is always the bzrdir
1350
# containing the current repo.
1351
if not self.first_call:
1353
repository = bzrdir.open_repository()
1354
except errors.NoRepositoryPresent:
1357
return False, (None, repository)
1358
self.first_call = False
1360
value = (bzrdir.open_branch(), None)
1361
except errors.NotBranchError:
1362
value = (None, None)
1366
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1367
self.bzrdir.root_transport, evaluate=Evaluator()):
1368
if branch is not None:
1369
branches.append(branch)
1370
if not using and repository is not None:
1371
branches.extend(repository.find_branches())
1375
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1376
"""Return the revision ids that other has that this does not.
1378
These are returned in topological order.
1380
revision_id: only return revision ids included by revision_id.
1382
return InterRepository.get(other, self).search_missing_revision_ids(
1383
revision_id, find_ghosts)
1387
"""Open the repository rooted at base.
1389
For instance, if the repository is at URL/.bzr/repository,
1390
Repository.open(URL) -> a Repository instance.
1392
control = bzrdir.BzrDir.open(base)
1393
return control.open_repository()
1395
def copy_content_into(self, destination, revision_id=None):
1396
"""Make a complete copy of the content in self into destination.
1398
This is a destructive operation! Do not use it on existing
1401
return InterRepository.get(self, destination).copy_content(revision_id)
1403
def commit_write_group(self):
1404
"""Commit the contents accrued within the current write group.
1406
:seealso: start_write_group.
1408
if self._write_group is not self.get_transaction():
1409
# has an unlock or relock occured ?
1410
raise errors.BzrError('mismatched lock context %r and '
1412
(self.get_transaction(), self._write_group))
1413
result = self._commit_write_group()
1414
self._write_group = None
1417
def _commit_write_group(self):
1418
"""Template method for per-repository write group cleanup.
1420
This is called before the write group is considered to be
1421
finished and should ensure that all data handed to the repository
1422
for writing during the write group is safely committed (to the
1423
extent possible considering file system caching etc).
1426
def suspend_write_group(self):
1427
raise errors.UnsuspendableWriteGroup(self)
1429
def get_missing_parent_inventories(self, check_for_missing_texts=True):
1430
"""Return the keys of missing inventory parents for revisions added in
1433
A revision is not complete if the inventory delta for that revision
1434
cannot be calculated. Therefore if the parent inventories of a
1435
revision are not present, the revision is incomplete, and e.g. cannot
1436
be streamed by a smart server. This method finds missing inventory
1437
parents for revisions added in this write group.
1439
if not self._format.supports_external_lookups:
1440
# This is only an issue for stacked repositories
1442
if not self.is_in_write_group():
1443
raise AssertionError('not in a write group')
1445
# XXX: We assume that every added revision already has its
1446
# corresponding inventory, so we only check for parent inventories that
1447
# might be missing, rather than all inventories.
1448
parents = set(self.revisions._index.get_missing_parents())
1449
parents.discard(_mod_revision.NULL_REVISION)
1450
unstacked_inventories = self.inventories._index
1451
present_inventories = unstacked_inventories.get_parent_map(
1452
key[-1:] for key in parents)
1453
parents.difference_update(present_inventories)
1454
if len(parents) == 0:
1455
# No missing parent inventories.
1457
if not check_for_missing_texts:
1458
return set(('inventories', rev_id) for (rev_id,) in parents)
1459
# Ok, now we have a list of missing inventories. But these only matter
1460
# if the inventories that reference them are missing some texts they
1461
# appear to introduce.
1462
# XXX: Texts referenced by all added inventories need to be present,
1463
# but at the moment we're only checking for texts referenced by
1464
# inventories at the graph's edge.
1465
key_deps = self.revisions._index._key_dependencies
1466
key_deps.add_keys(present_inventories)
1467
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1468
file_ids = self.fileids_altered_by_revision_ids(referrers)
1469
missing_texts = set()
1470
for file_id, version_ids in file_ids.iteritems():
1471
missing_texts.update(
1472
(file_id, version_id) for version_id in version_ids)
1473
present_texts = self.texts.get_parent_map(missing_texts)
1474
missing_texts.difference_update(present_texts)
1475
if not missing_texts:
1476
# No texts are missing, so all revisions and their deltas are
1479
# Alternatively the text versions could be returned as the missing
1480
# keys, but this is likely to be less data.
1481
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1484
def refresh_data(self):
1485
"""Re-read any data needed to to synchronise with disk.
1487
This method is intended to be called after another repository instance
1488
(such as one used by a smart server) has inserted data into the
1489
repository. It may not be called during a write group, but may be
1490
called at any other time.
1492
if self.is_in_write_group():
1493
raise errors.InternalBzrError(
1494
"May not refresh_data while in a write group.")
1495
self._refresh_data()
1497
def resume_write_group(self, tokens):
1498
if not self.is_write_locked():
1499
raise errors.NotWriteLocked(self)
1500
if self._write_group:
1501
raise errors.BzrError('already in a write group')
1502
self._resume_write_group(tokens)
1503
# so we can detect unlock/relock - the write group is now entered.
1504
self._write_group = self.get_transaction()
1506
def _resume_write_group(self, tokens):
1507
raise errors.UnsuspendableWriteGroup(self)
1509
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1511
"""Fetch the content required to construct revision_id from source.
1513
If revision_id is None and fetch_spec is None, then all content is
1516
fetch() may not be used when the repository is in a write group -
1517
either finish the current write group before using fetch, or use
1518
fetch before starting the write group.
1520
:param find_ghosts: Find and copy revisions in the source that are
1521
ghosts in the target (and not reachable directly by walking out to
1522
the first-present revision in target from revision_id).
1523
:param revision_id: If specified, all the content needed for this
1524
revision ID will be copied to the target. Fetch will determine for
1525
itself which content needs to be copied.
1526
:param fetch_spec: If specified, a SearchResult or
1527
PendingAncestryResult that describes which revisions to copy. This
1528
allows copying multiple heads at once. Mutually exclusive with
1531
if fetch_spec is not None and revision_id is not None:
1532
raise AssertionError(
1533
"fetch_spec and revision_id are mutually exclusive.")
1534
if self.is_in_write_group():
1535
raise errors.InternalBzrError(
1536
"May not fetch while in a write group.")
1537
# fast path same-url fetch operations
1538
if self.has_same_location(source) and fetch_spec is None:
1539
# check that last_revision is in 'from' and then return a
1541
if (revision_id is not None and
1542
not _mod_revision.is_null(revision_id)):
1543
self.get_revision(revision_id)
1545
# if there is no specific appropriate InterRepository, this will get
1546
# the InterRepository base class, which raises an
1547
# IncompatibleRepositories when asked to fetch.
1548
inter = InterRepository.get(source, self)
1549
return inter.fetch(revision_id=revision_id, pb=pb,
1550
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1552
def create_bundle(self, target, base, fileobj, format=None):
1553
return serializer.write_bundle(self, target, base, fileobj, format)
1555
def get_commit_builder(self, branch, parents, config, timestamp=None,
1556
timezone=None, committer=None, revprops=None,
1558
"""Obtain a CommitBuilder for this repository.
1560
:param branch: Branch to commit to.
1561
:param parents: Revision ids of the parents of the new revision.
1562
:param config: Configuration to use.
1563
:param timestamp: Optional timestamp recorded for commit.
1564
:param timezone: Optional timezone for timestamp.
1565
:param committer: Optional committer to set for commit.
1566
:param revprops: Optional dictionary of revision properties.
1567
:param revision_id: Optional revision id.
1569
result = self._commit_builder_class(self, parents, config,
1570
timestamp, timezone, committer, revprops, revision_id)
1571
self.start_write_group()
1575
if (self.control_files._lock_count == 1 and
1576
self.control_files._lock_mode == 'w'):
1577
if self._write_group is not None:
1578
self.abort_write_group()
1579
self.control_files.unlock()
1580
raise errors.BzrError(
1581
'Must end write groups before releasing write locks.')
1582
self.control_files.unlock()
1583
if self.control_files._lock_count == 0:
1584
self._inventory_entry_cache.clear()
1585
for repo in self._fallback_repositories:
1589
def clone(self, a_bzrdir, revision_id=None):
1590
"""Clone this repository into a_bzrdir using the current format.
1592
Currently no check is made that the format of this repository and
1593
the bzrdir format are compatible. FIXME RBC 20060201.
1595
:return: The newly created destination repository.
1597
# TODO: deprecate after 0.16; cloning this with all its settings is
1598
# probably not very useful -- mbp 20070423
1599
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1600
self.copy_content_into(dest_repo, revision_id)
1603
def start_write_group(self):
1604
"""Start a write group in the repository.
1606
Write groups are used by repositories which do not have a 1:1 mapping
1607
between file ids and backend store to manage the insertion of data from
1608
both fetch and commit operations.
1610
A write lock is required around the start_write_group/commit_write_group
1611
for the support of lock-requiring repository formats.
1613
One can only insert data into a repository inside a write group.
1617
if not self.is_write_locked():
1618
raise errors.NotWriteLocked(self)
1619
if self._write_group:
1620
raise errors.BzrError('already in a write group')
1621
self._start_write_group()
1622
# so we can detect unlock/relock - the write group is now entered.
1623
self._write_group = self.get_transaction()
1625
def _start_write_group(self):
1626
"""Template method for per-repository write group startup.
1628
This is called before the write group is considered to be
1633
def sprout(self, to_bzrdir, revision_id=None):
1634
"""Create a descendent repository for new development.
1636
Unlike clone, this does not copy the settings of the repository.
1638
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1639
dest_repo.fetch(self, revision_id=revision_id)
1642
def _create_sprouting_repo(self, a_bzrdir, shared):
1643
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1644
# use target default format.
1645
dest_repo = a_bzrdir.create_repository()
1647
# Most control formats need the repository to be specifically
1648
# created, but on some old all-in-one formats it's not needed
1650
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1651
except errors.UninitializableFormat:
1652
dest_repo = a_bzrdir.open_repository()
1655
def _get_sink(self):
1656
"""Return a sink for streaming into this repository."""
1657
return StreamSink(self)
1659
def _get_source(self, to_format):
1660
"""Return a source for streaming from this repository."""
1661
return StreamSource(self, to_format)
1664
def has_revision(self, revision_id):
1665
"""True if this repository has a copy of the revision."""
1666
return revision_id in self.has_revisions((revision_id,))
1669
def has_revisions(self, revision_ids):
1670
"""Probe to find out the presence of multiple revisions.
1672
:param revision_ids: An iterable of revision_ids.
1673
:return: A set of the revision_ids that were present.
1675
parent_map = self.revisions.get_parent_map(
1676
[(rev_id,) for rev_id in revision_ids])
1678
if _mod_revision.NULL_REVISION in revision_ids:
1679
result.add(_mod_revision.NULL_REVISION)
1680
result.update([key[0] for key in parent_map])
1684
def get_revision(self, revision_id):
1685
"""Return the Revision object for a named revision."""
1686
return self.get_revisions([revision_id])[0]
1689
def get_revision_reconcile(self, revision_id):
1690
"""'reconcile' helper routine that allows access to a revision always.
1692
This variant of get_revision does not cross check the weave graph
1693
against the revision one as get_revision does: but it should only
1694
be used by reconcile, or reconcile-alike commands that are correcting
1695
or testing the revision graph.
1697
return self._get_revisions([revision_id])[0]
1700
def get_revisions(self, revision_ids):
1701
"""Get many revisions at once."""
1702
return self._get_revisions(revision_ids)
1705
def _get_revisions(self, revision_ids):
1706
"""Core work logic to get many revisions without sanity checks."""
1707
for rev_id in revision_ids:
1708
if not rev_id or not isinstance(rev_id, basestring):
1709
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1710
keys = [(key,) for key in revision_ids]
1711
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1713
for record in stream:
1714
if record.storage_kind == 'absent':
1715
raise errors.NoSuchRevision(self, record.key[0])
1716
text = record.get_bytes_as('fulltext')
1717
rev = self._serializer.read_revision_from_string(text)
1718
revs[record.key[0]] = rev
1719
return [revs[revid] for revid in revision_ids]
1722
def get_revision_xml(self, revision_id):
1723
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1724
# would have already do it.
1725
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1726
# TODO: this can't just be replaced by:
1727
# return self._serializer.write_revision_to_string(
1728
# self.get_revision(revision_id))
1729
# as cStringIO preservers the encoding unlike write_revision_to_string
1730
# or some other call down the path.
1731
rev = self.get_revision(revision_id)
1732
rev_tmp = cStringIO.StringIO()
1733
# the current serializer..
1734
self._serializer.write_revision(rev, rev_tmp)
1736
return rev_tmp.getvalue()
1738
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1739
"""Produce a generator of revision deltas.
1741
Note that the input is a sequence of REVISIONS, not revision_ids.
1742
Trees will be held in memory until the generator exits.
1743
Each delta is relative to the revision's lefthand predecessor.
1745
:param specific_fileids: if not None, the result is filtered
1746
so that only those file-ids, their parents and their
1747
children are included.
1749
# Get the revision-ids of interest
1750
required_trees = set()
1751
for revision in revisions:
1752
required_trees.add(revision.revision_id)
1753
required_trees.update(revision.parent_ids[:1])
1755
# Get the matching filtered trees. Note that it's more
1756
# efficient to pass filtered trees to changes_from() rather
1757
# than doing the filtering afterwards. changes_from() could
1758
# arguably do the filtering itself but it's path-based, not
1759
# file-id based, so filtering before or afterwards is
1761
if specific_fileids is None:
1762
trees = dict((t.get_revision_id(), t) for
1763
t in self.revision_trees(required_trees))
1765
trees = dict((t.get_revision_id(), t) for
1766
t in self._filtered_revision_trees(required_trees,
1769
# Calculate the deltas
1770
for revision in revisions:
1771
if not revision.parent_ids:
1772
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1774
old_tree = trees[revision.parent_ids[0]]
1775
yield trees[revision.revision_id].changes_from(old_tree)
1778
def get_revision_delta(self, revision_id, specific_fileids=None):
1779
"""Return the delta for one revision.
1781
The delta is relative to the left-hand predecessor of the
1784
:param specific_fileids: if not None, the result is filtered
1785
so that only those file-ids, their parents and their
1786
children are included.
1788
r = self.get_revision(revision_id)
1789
return list(self.get_deltas_for_revisions([r],
1790
specific_fileids=specific_fileids))[0]
1793
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1794
signature = gpg_strategy.sign(plaintext)
1795
self.add_signature_text(revision_id, signature)
1798
def add_signature_text(self, revision_id, signature):
1799
self.signatures.add_lines((revision_id,), (),
1800
osutils.split_lines(signature))
1802
def find_text_key_references(self):
1803
"""Find the text key references within the repository.
1805
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1806
to whether they were referred to by the inventory of the
1807
revision_id that they contain. The inventory texts from all present
1808
revision ids are assessed to generate this report.
1810
revision_keys = self.revisions.keys()
1811
w = self.inventories
1812
pb = ui.ui_factory.nested_progress_bar()
1814
return self._find_text_key_references_from_xml_inventory_lines(
1815
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1819
def _find_text_key_references_from_xml_inventory_lines(self,
1821
"""Core routine for extracting references to texts from inventories.
1823
This performs the translation of xml lines to revision ids.
1825
:param line_iterator: An iterator of lines, origin_version_id
1826
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1827
to whether they were referred to by the inventory of the
1828
revision_id that they contain. Note that if that revision_id was
1829
not part of the line_iterator's output then False will be given -
1830
even though it may actually refer to that key.
1832
if not self._serializer.support_altered_by_hack:
1833
raise AssertionError(
1834
"_find_text_key_references_from_xml_inventory_lines only "
1835
"supported for branches which store inventory as unnested xml"
1836
", not on %r" % self)
1839
# this code needs to read every new line in every inventory for the
1840
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1841
# not present in one of those inventories is unnecessary but not
1842
# harmful because we are filtering by the revision id marker in the
1843
# inventory lines : we only select file ids altered in one of those
1844
# revisions. We don't need to see all lines in the inventory because
1845
# only those added in an inventory in rev X can contain a revision=X
1847
unescape_revid_cache = {}
1848
unescape_fileid_cache = {}
1850
# jam 20061218 In a big fetch, this handles hundreds of thousands
1851
# of lines, so it has had a lot of inlining and optimizing done.
1852
# Sorry that it is a little bit messy.
1853
# Move several functions to be local variables, since this is a long
1855
search = self._file_ids_altered_regex.search
1856
unescape = _unescape_xml
1857
setdefault = result.setdefault
1858
for line, line_key in line_iterator:
1859
match = search(line)
1862
# One call to match.group() returning multiple items is quite a
1863
# bit faster than 2 calls to match.group() each returning 1
1864
file_id, revision_id = match.group('file_id', 'revision_id')
1866
# Inlining the cache lookups helps a lot when you make 170,000
1867
# lines and 350k ids, versus 8.4 unique ids.
1868
# Using a cache helps in 2 ways:
1869
# 1) Avoids unnecessary decoding calls
1870
# 2) Re-uses cached strings, which helps in future set and
1872
# (2) is enough that removing encoding entirely along with
1873
# the cache (so we are using plain strings) results in no
1874
# performance improvement.
1876
revision_id = unescape_revid_cache[revision_id]
1878
unescaped = unescape(revision_id)
1879
unescape_revid_cache[revision_id] = unescaped
1880
revision_id = unescaped
1882
# Note that unconditionally unescaping means that we deserialise
1883
# every fileid, which for general 'pull' is not great, but we don't
1884
# really want to have some many fulltexts that this matters anyway.
1887
file_id = unescape_fileid_cache[file_id]
1889
unescaped = unescape(file_id)
1890
unescape_fileid_cache[file_id] = unescaped
1893
key = (file_id, revision_id)
1894
setdefault(key, False)
1895
if revision_id == line_key[-1]:
1899
def _inventory_xml_lines_for_keys(self, keys):
1900
"""Get a line iterator of the sort needed for findind references.
1902
Not relevant for non-xml inventory repositories.
1904
Ghosts in revision_keys are ignored.
1906
:param revision_keys: The revision keys for the inventories to inspect.
1907
:return: An iterator over (inventory line, revid) for the fulltexts of
1908
all of the xml inventories specified by revision_keys.
1910
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1911
for record in stream:
1912
if record.storage_kind != 'absent':
1913
chunks = record.get_bytes_as('chunked')
1914
revid = record.key[-1]
1915
lines = osutils.chunks_to_lines(chunks)
1919
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1921
"""Helper routine for fileids_altered_by_revision_ids.
1923
This performs the translation of xml lines to revision ids.
1925
:param line_iterator: An iterator of lines, origin_version_id
1926
:param revision_keys: The revision ids to filter for. This should be a
1927
set or other type which supports efficient __contains__ lookups, as
1928
the revision key from each parsed line will be looked up in the
1929
revision_keys filter.
1930
:return: a dictionary mapping altered file-ids to an iterable of
1931
revision_ids. Each altered file-ids has the exact revision_ids that
1932
altered it listed explicitly.
1934
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1935
line_iterator).iterkeys())
1936
parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1937
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1938
self._inventory_xml_lines_for_keys(parent_keys)))
1939
new_keys = seen - parent_seen
1941
setdefault = result.setdefault
1942
for key in new_keys:
1943
setdefault(key[0], set()).add(key[-1])
1946
def _find_parent_ids_of_revisions(self, revision_ids):
1947
"""Find all parent ids that are mentioned in the revision graph.
1949
:return: set of revisions that are parents of revision_ids which are
1950
not part of revision_ids themselves
1952
parent_map = self.get_parent_map(revision_ids)
1954
map(parent_ids.update, parent_map.itervalues())
1955
parent_ids.difference_update(revision_ids)
1956
parent_ids.discard(_mod_revision.NULL_REVISION)
1959
def _find_parent_keys_of_revisions(self, revision_keys):
1960
"""Similar to _find_parent_ids_of_revisions, but used with keys.
1962
:param revision_keys: An iterable of revision_keys.
1963
:return: The parents of all revision_keys that are not already in
1966
parent_map = self.revisions.get_parent_map(revision_keys)
1968
map(parent_keys.update, parent_map.itervalues())
1969
parent_keys.difference_update(revision_keys)
1970
parent_keys.discard(_mod_revision.NULL_REVISION)
1973
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1974
"""Find the file ids and versions affected by revisions.
1976
:param revisions: an iterable containing revision ids.
1977
:param _inv_weave: The inventory weave from this repository or None.
1978
If None, the inventory weave will be opened automatically.
1979
:return: a dictionary mapping altered file-ids to an iterable of
1980
revision_ids. Each altered file-ids has the exact revision_ids that
1981
altered it listed explicitly.
1983
selected_keys = set((revid,) for revid in revision_ids)
1984
w = _inv_weave or self.inventories
1985
pb = ui.ui_factory.nested_progress_bar()
1987
return self._find_file_ids_from_xml_inventory_lines(
1988
w.iter_lines_added_or_present_in_keys(
1989
selected_keys, pb=pb),
1994
def iter_files_bytes(self, desired_files):
1995
"""Iterate through file versions.
1997
Files will not necessarily be returned in the order they occur in
1998
desired_files. No specific order is guaranteed.
2000
Yields pairs of identifier, bytes_iterator. identifier is an opaque
2001
value supplied by the caller as part of desired_files. It should
2002
uniquely identify the file version in the caller's context. (Examples:
2003
an index number or a TreeTransform trans_id.)
2005
bytes_iterator is an iterable of bytestrings for the file. The
2006
kind of iterable and length of the bytestrings are unspecified, but for
2007
this implementation, it is a list of bytes produced by
2008
VersionedFile.get_record_stream().
2010
:param desired_files: a list of (file_id, revision_id, identifier)
2014
for file_id, revision_id, callable_data in desired_files:
2015
text_keys[(file_id, revision_id)] = callable_data
2016
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
2017
if record.storage_kind == 'absent':
2018
raise errors.RevisionNotPresent(record.key, self)
2019
yield text_keys[record.key], record.get_bytes_as('chunked')
2021
def _generate_text_key_index(self, text_key_references=None,
2023
"""Generate a new text key index for the repository.
2025
This is an expensive function that will take considerable time to run.
2027
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
2028
list of parents, also text keys. When a given key has no parents,
2029
the parents list will be [NULL_REVISION].
2031
# All revisions, to find inventory parents.
2032
if ancestors is None:
2033
graph = self.get_graph()
2034
ancestors = graph.get_parent_map(self.all_revision_ids())
2035
if text_key_references is None:
2036
text_key_references = self.find_text_key_references()
2037
pb = ui.ui_factory.nested_progress_bar()
2039
return self._do_generate_text_key_index(ancestors,
2040
text_key_references, pb)
2044
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
2045
"""Helper for _generate_text_key_index to avoid deep nesting."""
2046
revision_order = tsort.topo_sort(ancestors)
2047
invalid_keys = set()
2049
for revision_id in revision_order:
2050
revision_keys[revision_id] = set()
2051
text_count = len(text_key_references)
2052
# a cache of the text keys to allow reuse; costs a dict of all the
2053
# keys, but saves a 2-tuple for every child of a given key.
2055
for text_key, valid in text_key_references.iteritems():
2057
invalid_keys.add(text_key)
2059
revision_keys[text_key[1]].add(text_key)
2060
text_key_cache[text_key] = text_key
2061
del text_key_references
2063
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
2064
NULL_REVISION = _mod_revision.NULL_REVISION
2065
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
2066
# too small for large or very branchy trees. However, for 55K path
2067
# trees, it would be easy to use too much memory trivially. Ideally we
2068
# could gauge this by looking at available real memory etc, but this is
2069
# always a tricky proposition.
2070
inventory_cache = lru_cache.LRUCache(10)
2071
batch_size = 10 # should be ~150MB on a 55K path tree
2072
batch_count = len(revision_order) / batch_size + 1
2074
pb.update("Calculating text parents", processed_texts, text_count)
2075
for offset in xrange(batch_count):
2076
to_query = revision_order[offset * batch_size:(offset + 1) *
2080
for rev_tree in self.revision_trees(to_query):
2081
revision_id = rev_tree.get_revision_id()
2082
parent_ids = ancestors[revision_id]
2083
for text_key in revision_keys[revision_id]:
2084
pb.update("Calculating text parents", processed_texts)
2085
processed_texts += 1
2086
candidate_parents = []
2087
for parent_id in parent_ids:
2088
parent_text_key = (text_key[0], parent_id)
2090
check_parent = parent_text_key not in \
2091
revision_keys[parent_id]
2093
# the parent parent_id is a ghost:
2094
check_parent = False
2095
# truncate the derived graph against this ghost.
2096
parent_text_key = None
2098
# look at the parent commit details inventories to
2099
# determine possible candidates in the per file graph.
2102
inv = inventory_cache[parent_id]
2104
inv = self.revision_tree(parent_id).inventory
2105
inventory_cache[parent_id] = inv
2107
parent_entry = inv[text_key[0]]
2108
except (KeyError, errors.NoSuchId):
2110
if parent_entry is not None:
2112
text_key[0], parent_entry.revision)
2114
parent_text_key = None
2115
if parent_text_key is not None:
2116
candidate_parents.append(
2117
text_key_cache[parent_text_key])
2118
parent_heads = text_graph.heads(candidate_parents)
2119
new_parents = list(parent_heads)
2120
new_parents.sort(key=lambda x:candidate_parents.index(x))
2121
if new_parents == []:
2122
new_parents = [NULL_REVISION]
2123
text_index[text_key] = new_parents
2125
for text_key in invalid_keys:
2126
text_index[text_key] = [NULL_REVISION]
2129
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
2130
"""Get an iterable listing the keys of all the data introduced by a set
2133
The keys will be ordered so that the corresponding items can be safely
2134
fetched and inserted in that order.
2136
:returns: An iterable producing tuples of (knit-kind, file-id,
2137
versions). knit-kind is one of 'file', 'inventory', 'signatures',
2138
'revisions'. file-id is None unless knit-kind is 'file'.
2140
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
2143
for result in self._find_non_file_keys_to_fetch(revision_ids):
2146
def _find_file_keys_to_fetch(self, revision_ids, pb):
2147
# XXX: it's a bit weird to control the inventory weave caching in this
2148
# generator. Ideally the caching would be done in fetch.py I think. Or
2149
# maybe this generator should explicitly have the contract that it
2150
# should not be iterated until the previously yielded item has been
2152
inv_w = self.inventories
2154
# file ids that changed
2155
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
2157
num_file_ids = len(file_ids)
2158
for file_id, altered_versions in file_ids.iteritems():
2160
pb.update("fetch texts", count, num_file_ids)
2162
yield ("file", file_id, altered_versions)
2164
def _find_non_file_keys_to_fetch(self, revision_ids):
2166
yield ("inventory", None, revision_ids)
2169
# XXX: Note ATM no callers actually pay attention to this return
2170
# instead they just use the list of revision ids and ignore
2171
# missing sigs. Consider removing this work entirely
2172
revisions_with_signatures = set(self.signatures.get_parent_map(
2173
[(r,) for r in revision_ids]))
2174
revisions_with_signatures = set(
2175
[r for (r,) in revisions_with_signatures])
2176
revisions_with_signatures.intersection_update(revision_ids)
2177
yield ("signatures", None, revisions_with_signatures)
2180
yield ("revisions", None, revision_ids)
2183
def get_inventory(self, revision_id):
2184
"""Get Inventory object by revision id."""
2185
return self.iter_inventories([revision_id]).next()
2187
def iter_inventories(self, revision_ids, ordering='unordered'):
2188
"""Get many inventories by revision_ids.
2190
This will buffer some or all of the texts used in constructing the
2191
inventories in memory, but will only parse a single inventory at a
2194
:param revision_ids: The expected revision ids of the inventories.
2195
:param ordering: optional ordering, e.g. 'topological'.
2196
:return: An iterator of inventories.
2198
if ((None in revision_ids)
2199
or (_mod_revision.NULL_REVISION in revision_ids)):
2200
raise ValueError('cannot get null revision inventory')
2201
return self._iter_inventories(revision_ids, ordering)
2203
def _iter_inventories(self, revision_ids, ordering):
2204
"""single-document based inventory iteration."""
2205
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2206
for text, revision_id in inv_xmls:
2207
yield self.deserialise_inventory(revision_id, text)
2209
def _iter_inventory_xmls(self, revision_ids, ordering='unordered'):
2210
keys = [(revision_id,) for revision_id in revision_ids]
2211
stream = self.inventories.get_record_stream(keys, ordering, True)
2213
for record in stream:
2214
if record.storage_kind != 'absent':
2215
text_chunks[record.key] = record.get_bytes_as('chunked')
2217
raise errors.NoSuchRevision(self, record.key)
2219
chunks = text_chunks.pop(key)
2220
yield ''.join(chunks), key[-1]
2222
def deserialise_inventory(self, revision_id, xml):
2223
"""Transform the xml into an inventory object.
2225
:param revision_id: The expected revision id of the inventory.
2226
:param xml: A serialised inventory.
2228
result = self._serializer.read_inventory_from_string(xml, revision_id,
2229
entry_cache=self._inventory_entry_cache)
2230
if result.revision_id != revision_id:
2231
raise AssertionError('revision id mismatch %s != %s' % (
2232
result.revision_id, revision_id))
2235
def serialise_inventory(self, inv):
2236
return self._serializer.write_inventory_to_string(inv)
2238
def _serialise_inventory_to_lines(self, inv):
2239
return self._serializer.write_inventory_to_lines(inv)
2241
def get_serializer_format(self):
2242
return self._serializer.format_num
2245
def get_inventory_xml(self, revision_id):
2246
"""Get inventory XML as a file object."""
2247
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2249
text, revision_id = texts.next()
2250
except StopIteration:
2251
raise errors.HistoryMissing(self, 'inventory', revision_id)
2255
def get_inventory_sha1(self, revision_id):
2256
"""Return the sha1 hash of the inventory entry
2258
return self.get_revision(revision_id).inventory_sha1
2260
def get_rev_id_for_revno(self, revno, known_pair):
2261
"""Return the revision id of a revno, given a later (revno, revid)
2262
pair in the same history.
2264
:return: if found (True, revid). If the available history ran out
2265
before reaching the revno, then this returns
2266
(False, (closest_revno, closest_revid)).
2268
known_revno, known_revid = known_pair
2269
partial_history = [known_revid]
2270
distance_from_known = known_revno - revno
2271
if distance_from_known < 0:
2273
'requested revno (%d) is later than given known revno (%d)'
2274
% (revno, known_revno))
2277
self, partial_history, stop_index=distance_from_known)
2278
except errors.RevisionNotPresent, err:
2279
if err.revision_id == known_revid:
2280
# The start revision (known_revid) wasn't found.
2282
# This is a stacked repository with no fallbacks, or a there's a
2283
# left-hand ghost. Either way, even though the revision named in
2284
# the error isn't in this repo, we know it's the next step in this
2285
# left-hand history.
2286
partial_history.append(err.revision_id)
2287
if len(partial_history) <= distance_from_known:
2288
# Didn't find enough history to get a revid for the revno.
2289
earliest_revno = known_revno - len(partial_history) + 1
2290
return (False, (earliest_revno, partial_history[-1]))
2291
if len(partial_history) - 1 > distance_from_known:
2292
raise AssertionError('_iter_for_revno returned too much history')
2293
return (True, partial_history[-1])
2295
def iter_reverse_revision_history(self, revision_id):
2296
"""Iterate backwards through revision ids in the lefthand history
2298
:param revision_id: The revision id to start with. All its lefthand
2299
ancestors will be traversed.
2301
graph = self.get_graph()
2302
next_id = revision_id
2304
if next_id in (None, _mod_revision.NULL_REVISION):
2307
parents = graph.get_parent_map([next_id])[next_id]
2309
raise errors.RevisionNotPresent(next_id, self)
2311
if len(parents) == 0:
2314
next_id = parents[0]
2317
def get_revision_inventory(self, revision_id):
2318
"""Return inventory of a past revision."""
2319
# TODO: Unify this with get_inventory()
2320
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2321
# must be the same as its revision, so this is trivial.
2322
if revision_id is None:
2323
# This does not make sense: if there is no revision,
2324
# then it is the current tree inventory surely ?!
2325
# and thus get_root_id() is something that looks at the last
2326
# commit on the branch, and the get_root_id is an inventory check.
2327
raise NotImplementedError
2328
# return Inventory(self.get_root_id())
2330
return self.get_inventory(revision_id)
2332
def is_shared(self):
2333
"""Return True if this repository is flagged as a shared repository."""
2334
raise NotImplementedError(self.is_shared)
2337
def reconcile(self, other=None, thorough=False):
2338
"""Reconcile this repository."""
2339
from bzrlib.reconcile import RepoReconciler
2340
reconciler = RepoReconciler(self, thorough=thorough)
2341
reconciler.reconcile()
2344
def _refresh_data(self):
2345
"""Helper called from lock_* to ensure coherency with disk.
2347
The default implementation does nothing; it is however possible
2348
for repositories to maintain loaded indices across multiple locks
2349
by checking inside their implementation of this method to see
2350
whether their indices are still valid. This depends of course on
2351
the disk format being validatable in this manner. This method is
2352
also called by the refresh_data() public interface to cause a refresh
2353
to occur while in a write lock so that data inserted by a smart server
2354
push operation is visible on the client's instance of the physical
2359
def revision_tree(self, revision_id):
2360
"""Return Tree for a revision on this branch.
2362
`revision_id` may be NULL_REVISION for the empty tree revision.
2364
revision_id = _mod_revision.ensure_null(revision_id)
2365
# TODO: refactor this to use an existing revision object
2366
# so we don't need to read it in twice.
2367
if revision_id == _mod_revision.NULL_REVISION:
2368
return RevisionTree(self, Inventory(root_id=None),
2369
_mod_revision.NULL_REVISION)
2371
inv = self.get_revision_inventory(revision_id)
2372
return RevisionTree(self, inv, revision_id)
2374
def revision_trees(self, revision_ids):
2375
"""Return Trees for revisions in this repository.
2377
:param revision_ids: a sequence of revision-ids;
2378
a revision-id may not be None or 'null:'
2380
inventories = self.iter_inventories(revision_ids)
2381
for inv in inventories:
2382
yield RevisionTree(self, inv, inv.revision_id)
2384
def _filtered_revision_trees(self, revision_ids, file_ids):
2385
"""Return Tree for a revision on this branch with only some files.
2387
:param revision_ids: a sequence of revision-ids;
2388
a revision-id may not be None or 'null:'
2389
:param file_ids: if not None, the result is filtered
2390
so that only those file-ids, their parents and their
2391
children are included.
2393
inventories = self.iter_inventories(revision_ids)
2394
for inv in inventories:
2395
# Should we introduce a FilteredRevisionTree class rather
2396
# than pre-filter the inventory here?
2397
filtered_inv = inv.filter(file_ids)
2398
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)
2401
def get_ancestry(self, revision_id, topo_sorted=True):
2402
"""Return a list of revision-ids integrated by a revision.
2404
The first element of the list is always None, indicating the origin
2405
revision. This might change when we have history horizons, or
2406
perhaps we should have a new API.
2408
This is topologically sorted.
2410
if _mod_revision.is_null(revision_id):
2412
if not self.has_revision(revision_id):
2413
raise errors.NoSuchRevision(self, revision_id)
2414
graph = self.get_graph()
2416
search = graph._make_breadth_first_searcher([revision_id])
2419
found, ghosts = search.next_with_ghosts()
2420
except StopIteration:
2423
if _mod_revision.NULL_REVISION in keys:
2424
keys.remove(_mod_revision.NULL_REVISION)
2426
parent_map = graph.get_parent_map(keys)
2427
keys = tsort.topo_sort(parent_map)
2428
return [None] + list(keys)
2430
def pack(self, hint=None):
2431
"""Compress the data within the repository.
2433
This operation only makes sense for some repository types. For other
2434
types it should be a no-op that just returns.
2436
This stub method does not require a lock, but subclasses should use
2437
@needs_write_lock as this is a long running call its reasonable to
2438
implicitly lock for the user.
2440
:param hint: If not supplied, the whole repository is packed.
2441
If supplied, the repository may use the hint parameter as a
2442
hint for the parts of the repository to pack. A hint can be
2443
obtained from the result of commit_write_group(). Out of
2444
date hints are simply ignored, because concurrent operations
2445
can obsolete them rapidly.
2448
def get_transaction(self):
2449
return self.control_files.get_transaction()
2451
def get_parent_map(self, revision_ids):
2452
"""See graph.StackedParentsProvider.get_parent_map"""
2453
# revisions index works in keys; this just works in revisions
2454
# therefore wrap and unwrap
2457
for revision_id in revision_ids:
2458
if revision_id == _mod_revision.NULL_REVISION:
2459
result[revision_id] = ()
2460
elif revision_id is None:
2461
raise ValueError('get_parent_map(None) is not valid')
2463
query_keys.append((revision_id ,))
2464
for ((revision_id,), parent_keys) in \
2465
self.revisions.get_parent_map(query_keys).iteritems():
2467
result[revision_id] = tuple(parent_revid
2468
for (parent_revid,) in parent_keys)
2470
result[revision_id] = (_mod_revision.NULL_REVISION,)
2473
def _make_parents_provider(self):
2476
def get_graph(self, other_repository=None):
2477
"""Return the graph walker for this repository format"""
2478
parents_provider = self._make_parents_provider()
2479
if (other_repository is not None and
2480
not self.has_same_location(other_repository)):
2481
parents_provider = graph.StackedParentsProvider(
2482
[parents_provider, other_repository._make_parents_provider()])
2483
return graph.Graph(parents_provider)
2485
def _get_versioned_file_checker(self, text_key_references=None):
2486
"""Return an object suitable for checking versioned files.
2488
:param text_key_references: if non-None, an already built
2489
dictionary mapping text keys ((fileid, revision_id) tuples)
2490
to whether they were referred to by the inventory of the
2491
revision_id that they contain. If None, this will be
2494
return _VersionedFileChecker(self,
2495
text_key_references=text_key_references)
2497
def revision_ids_to_search_result(self, result_set):
2498
"""Convert a set of revision ids to a graph SearchResult."""
2499
result_parents = set()
2500
for parents in self.get_graph().get_parent_map(
2501
result_set).itervalues():
2502
result_parents.update(parents)
2503
included_keys = result_set.intersection(result_parents)
2504
start_keys = result_set.difference(included_keys)
2505
exclude_keys = result_parents.difference(result_set)
2506
result = graph.SearchResult(start_keys, exclude_keys,
2507
len(result_set), result_set)
2511
def set_make_working_trees(self, new_value):
2512
"""Set the policy flag for making working trees when creating branches.
2514
This only applies to branches that use this repository.
2516
The default is 'True'.
2517
:param new_value: True to restore the default, False to disable making
2520
raise NotImplementedError(self.set_make_working_trees)
2522
def make_working_trees(self):
2523
"""Returns the policy for making working trees on new branches."""
2524
raise NotImplementedError(self.make_working_trees)
2527
def sign_revision(self, revision_id, gpg_strategy):
2528
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2529
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2532
def has_signature_for_revision_id(self, revision_id):
2533
"""Query for a revision signature for revision_id in the repository."""
2534
if not self.has_revision(revision_id):
2535
raise errors.NoSuchRevision(self, revision_id)
2536
sig_present = (1 == len(
2537
self.signatures.get_parent_map([(revision_id,)])))
2541
def get_signature_text(self, revision_id):
2542
"""Return the text for a signature."""
2543
stream = self.signatures.get_record_stream([(revision_id,)],
2545
record = stream.next()
2546
if record.storage_kind == 'absent':
2547
raise errors.NoSuchRevision(self, revision_id)
2548
return record.get_bytes_as('fulltext')
2551
def check(self, revision_ids=None):
2552
"""Check consistency of all history of given revision_ids.
2554
Different repository implementations should override _check().
2556
:param revision_ids: A non-empty list of revision_ids whose ancestry
2557
will be checked. Typically the last revision_id of a branch.
2559
return self._check(revision_ids)
2561
def _check(self, revision_ids):
2562
result = check.Check(self)
2566
def _warn_if_deprecated(self):
2567
global _deprecation_warning_done
2568
if _deprecation_warning_done:
2570
_deprecation_warning_done = True
2571
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2572
% (self._format, self.bzrdir.transport.base))
2574
def supports_rich_root(self):
2575
return self._format.rich_root_data
2577
def _check_ascii_revisionid(self, revision_id, method):
2578
"""Private helper for ascii-only repositories."""
2579
# weave repositories refuse to store revisionids that are non-ascii.
2580
if revision_id is not None:
2581
# weaves require ascii revision ids.
2582
if isinstance(revision_id, unicode):
2584
revision_id.encode('ascii')
2585
except UnicodeEncodeError:
2586
raise errors.NonAsciiRevisionId(method, self)
2589
revision_id.decode('ascii')
2590
except UnicodeDecodeError:
2591
raise errors.NonAsciiRevisionId(method, self)
2593
def revision_graph_can_have_wrong_parents(self):
2594
"""Is it possible for this repository to have a revision graph with
2597
If True, then this repository must also implement
2598
_find_inconsistent_revision_parents so that check and reconcile can
2599
check for inconsistencies before proceeding with other checks that may
2600
depend on the revision index being consistent.
2602
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2605
# remove these delegates a while after bzr 0.15
2606
def __make_delegated(name, from_module):
2607
def _deprecated_repository_forwarder():
2608
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2609
% (name, from_module),
2612
m = __import__(from_module, globals(), locals(), [name])
2614
return getattr(m, name)
2615
except AttributeError:
2616
raise AttributeError('module %s has no name %s'
2618
globals()[name] = _deprecated_repository_forwarder
2621
'AllInOneRepository',
2622
'WeaveMetaDirRepository',
2623
'PreSplitOutRepositoryFormat',
2624
'RepositoryFormat4',
2625
'RepositoryFormat5',
2626
'RepositoryFormat6',
2627
'RepositoryFormat7',
2629
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2633
'RepositoryFormatKnit',
2634
'RepositoryFormatKnit1',
2636
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2639
def install_revision(repository, rev, revision_tree):
2640
"""Install all revision data into a repository."""
2641
install_revisions(repository, [(rev, revision_tree, None)])
2644
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2645
"""Install all revision data into a repository.
2647
Accepts an iterable of revision, tree, signature tuples. The signature
2650
repository.start_write_group()
2652
inventory_cache = lru_cache.LRUCache(10)
2653
for n, (revision, revision_tree, signature) in enumerate(iterable):
2654
_install_revision(repository, revision, revision_tree, signature,
2657
pb.update('Transferring revisions', n + 1, num_revisions)
2659
repository.abort_write_group()
2662
repository.commit_write_group()
2665
def _install_revision(repository, rev, revision_tree, signature,
2667
"""Install all revision data into a repository."""
2668
present_parents = []
2670
for p_id in rev.parent_ids:
2671
if repository.has_revision(p_id):
2672
present_parents.append(p_id)
2673
parent_trees[p_id] = repository.revision_tree(p_id)
2675
parent_trees[p_id] = repository.revision_tree(
2676
_mod_revision.NULL_REVISION)
2678
inv = revision_tree.inventory
2679
entries = inv.iter_entries()
2680
# backwards compatibility hack: skip the root id.
2681
if not repository.supports_rich_root():
2682
path, root = entries.next()
2683
if root.revision != rev.revision_id:
2684
raise errors.IncompatibleRevision(repr(repository))
2686
for path, ie in entries:
2687
text_keys[(ie.file_id, ie.revision)] = ie
2688
text_parent_map = repository.texts.get_parent_map(text_keys)
2689
missing_texts = set(text_keys) - set(text_parent_map)
2690
# Add the texts that are not already present
2691
for text_key in missing_texts:
2692
ie = text_keys[text_key]
2694
# FIXME: TODO: The following loop overlaps/duplicates that done by
2695
# commit to determine parents. There is a latent/real bug here where
2696
# the parents inserted are not those commit would do - in particular
2697
# they are not filtered by heads(). RBC, AB
2698
for revision, tree in parent_trees.iteritems():
2699
if ie.file_id not in tree:
2701
parent_id = tree.inventory[ie.file_id].revision
2702
if parent_id in text_parents:
2704
text_parents.append((ie.file_id, parent_id))
2705
lines = revision_tree.get_file(ie.file_id).readlines()
2706
repository.texts.add_lines(text_key, text_parents, lines)
2708
# install the inventory
2709
if repository._format._commit_inv_deltas and len(rev.parent_ids):
2710
# Cache this inventory
2711
inventory_cache[rev.revision_id] = inv
2713
basis_inv = inventory_cache[rev.parent_ids[0]]
2715
repository.add_inventory(rev.revision_id, inv, present_parents)
2717
delta = inv._make_delta(basis_inv)
2718
repository.add_inventory_by_delta(rev.parent_ids[0], delta,
2719
rev.revision_id, present_parents)
2721
repository.add_inventory(rev.revision_id, inv, present_parents)
2722
except errors.RevisionAlreadyPresent:
2724
if signature is not None:
2725
repository.add_signature_text(rev.revision_id, signature)
2726
repository.add_revision(rev.revision_id, rev, inv)
2729
class MetaDirRepository(Repository):
2730
"""Repositories in the new meta-dir layout.
2732
:ivar _transport: Transport for access to repository control files,
2733
typically pointing to .bzr/repository.
2736
def __init__(self, _format, a_bzrdir, control_files):
2737
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2738
self._transport = control_files._transport
2740
def is_shared(self):
2741
"""Return True if this repository is flagged as a shared repository."""
2742
return self._transport.has('shared-storage')
2745
def set_make_working_trees(self, new_value):
2746
"""Set the policy flag for making working trees when creating branches.
2748
This only applies to branches that use this repository.
2750
The default is 'True'.
2751
:param new_value: True to restore the default, False to disable making
2756
self._transport.delete('no-working-trees')
2757
except errors.NoSuchFile:
2760
self._transport.put_bytes('no-working-trees', '',
2761
mode=self.bzrdir._get_file_mode())
2763
def make_working_trees(self):
2764
"""Returns the policy for making working trees on new branches."""
2765
return not self._transport.has('no-working-trees')
2768
class MetaDirVersionedFileRepository(MetaDirRepository):
2769
"""Repositories in a meta-dir, that work via versioned file objects."""
2771
def __init__(self, _format, a_bzrdir, control_files):
2772
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2776
network_format_registry = registry.FormatRegistry()
2777
"""Registry of formats indexed by their network name.
2779
The network name for a repository format is an identifier that can be used when
2780
referring to formats with smart server operations. See
2781
RepositoryFormat.network_name() for more detail.
2785
format_registry = registry.FormatRegistry(network_format_registry)
2786
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2788
This can contain either format instances themselves, or classes/factories that
2789
can be called to obtain one.
2793
#####################################################################
2794
# Repository Formats
2796
class RepositoryFormat(object):
2797
"""A repository format.
2799
Formats provide four things:
2800
* An initialization routine to construct repository data on disk.
2801
* a optional format string which is used when the BzrDir supports
2803
* an open routine which returns a Repository instance.
2804
* A network name for referring to the format in smart server RPC
2807
There is one and only one Format subclass for each on-disk format. But
2808
there can be one Repository subclass that is used for several different
2809
formats. The _format attribute on a Repository instance can be used to
2810
determine the disk format.
2812
Formats are placed in a registry by their format string for reference
2813
during opening. These should be subclasses of RepositoryFormat for
2816
Once a format is deprecated, just deprecate the initialize and open
2817
methods on the format class. Do not deprecate the object, as the
2818
object may be created even when a repository instance hasn't been
2821
Common instance attributes:
2822
_matchingbzrdir - the bzrdir format that the repository format was
2823
originally written to work with. This can be used if manually
2824
constructing a bzrdir and repository, or more commonly for test suite
2828
# Set to True or False in derived classes. True indicates that the format
2829
# supports ghosts gracefully.
2830
supports_ghosts = None
2831
# Can this repository be given external locations to lookup additional
2832
# data. Set to True or False in derived classes.
2833
supports_external_lookups = None
2834
# Does this format support CHK bytestring lookups. Set to True or False in
2836
supports_chks = None
2837
# Should commit add an inventory, or an inventory delta to the repository.
2838
_commit_inv_deltas = True
2839
# What order should fetch operations request streams in?
2840
# The default is unordered as that is the cheapest for an origin to
2842
_fetch_order = 'unordered'
2843
# Does this repository format use deltas that can be fetched as-deltas ?
2844
# (E.g. knits, where the knit deltas can be transplanted intact.
2845
# We default to False, which will ensure that enough data to get
2846
# a full text out of any fetch stream will be grabbed.
2847
_fetch_uses_deltas = False
2848
# Should fetch trigger a reconcile after the fetch? Only needed for
2849
# some repository formats that can suffer internal inconsistencies.
2850
_fetch_reconcile = False
2851
# Does this format have < O(tree_size) delta generation. Used to hint what
2852
# code path for commit, amongst other things.
2854
# Does doing a pack operation compress data? Useful for the pack UI command
2855
# (so if there is one pack, the operation can still proceed because it may
2856
# help), and for fetching when data won't have come from the same
2858
pack_compresses = False
2861
return "<%s>" % self.__class__.__name__
2863
def __eq__(self, other):
2864
# format objects are generally stateless
2865
return isinstance(other, self.__class__)
2867
def __ne__(self, other):
2868
return not self == other
2871
def find_format(klass, a_bzrdir):
2872
"""Return the format for the repository object in a_bzrdir.
2874
This is used by bzr native formats that have a "format" file in
2875
the repository. Other methods may be used by different types of
2879
transport = a_bzrdir.get_repository_transport(None)
2880
format_string = transport.get("format").read()
2881
return format_registry.get(format_string)
2882
except errors.NoSuchFile:
2883
raise errors.NoRepositoryPresent(a_bzrdir)
2885
raise errors.UnknownFormatError(format=format_string,
2889
def register_format(klass, format):
2890
format_registry.register(format.get_format_string(), format)
2893
def unregister_format(klass, format):
2894
format_registry.remove(format.get_format_string())
2897
def get_default_format(klass):
2898
"""Return the current default format."""
2899
from bzrlib import bzrdir
2900
return bzrdir.format_registry.make_bzrdir('default').repository_format
2902
def get_format_string(self):
2903
"""Return the ASCII format string that identifies this format.
2905
Note that in pre format ?? repositories the format string is
2906
not permitted nor written to disk.
2908
raise NotImplementedError(self.get_format_string)
2910
def get_format_description(self):
2911
"""Return the short description for this format."""
2912
raise NotImplementedError(self.get_format_description)
2914
# TODO: this shouldn't be in the base class, it's specific to things that
2915
# use weaves or knits -- mbp 20070207
2916
def _get_versioned_file_store(self,
2921
versionedfile_class=None,
2922
versionedfile_kwargs={},
2924
if versionedfile_class is None:
2925
versionedfile_class = self._versionedfile_class
2926
weave_transport = control_files._transport.clone(name)
2927
dir_mode = control_files._dir_mode
2928
file_mode = control_files._file_mode
2929
return VersionedFileStore(weave_transport, prefixed=prefixed,
2931
file_mode=file_mode,
2932
versionedfile_class=versionedfile_class,
2933
versionedfile_kwargs=versionedfile_kwargs,
2936
def initialize(self, a_bzrdir, shared=False):
2937
"""Initialize a repository of this format in a_bzrdir.
2939
:param a_bzrdir: The bzrdir to put the new repository in it.
2940
:param shared: The repository should be initialized as a sharable one.
2941
:returns: The new repository object.
2943
This may raise UninitializableFormat if shared repository are not
2944
compatible the a_bzrdir.
2946
raise NotImplementedError(self.initialize)
2948
def is_supported(self):
2949
"""Is this format supported?
2951
Supported formats must be initializable and openable.
2952
Unsupported formats may not support initialization or committing or
2953
some other features depending on the reason for not being supported.
2957
def network_name(self):
2958
"""A simple byte string uniquely identifying this format for RPC calls.
2960
MetaDir repository formats use their disk format string to identify the
2961
repository over the wire. All in one formats such as bzr < 0.8, and
2962
foreign formats like svn/git and hg should use some marker which is
2963
unique and immutable.
2965
raise NotImplementedError(self.network_name)
2967
def check_conversion_target(self, target_format):
2968
raise NotImplementedError(self.check_conversion_target)
2970
def open(self, a_bzrdir, _found=False):
2971
"""Return an instance of this format for the bzrdir a_bzrdir.
2973
_found is a private parameter, do not use it.
2975
raise NotImplementedError(self.open)
2978
class MetaDirRepositoryFormat(RepositoryFormat):
2979
"""Common base class for the new repositories using the metadir layout."""
2981
rich_root_data = False
2982
supports_tree_reference = False
2983
supports_external_lookups = False
2986
def _matchingbzrdir(self):
2987
matching = bzrdir.BzrDirMetaFormat1()
2988
matching.repository_format = self
2992
super(MetaDirRepositoryFormat, self).__init__()
2994
def _create_control_files(self, a_bzrdir):
2995
"""Create the required files and the initial control_files object."""
2996
# FIXME: RBC 20060125 don't peek under the covers
2997
# NB: no need to escape relative paths that are url safe.
2998
repository_transport = a_bzrdir.get_repository_transport(self)
2999
control_files = lockable_files.LockableFiles(repository_transport,
3000
'lock', lockdir.LockDir)
3001
control_files.create_lock()
3002
return control_files
3004
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
3005
"""Upload the initial blank content."""
3006
control_files = self._create_control_files(a_bzrdir)
3007
control_files.lock_write()
3008
transport = control_files._transport
3010
utf8_files += [('shared-storage', '')]
3012
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
3013
for (filename, content_stream) in files:
3014
transport.put_file(filename, content_stream,
3015
mode=a_bzrdir._get_file_mode())
3016
for (filename, content_bytes) in utf8_files:
3017
transport.put_bytes_non_atomic(filename, content_bytes,
3018
mode=a_bzrdir._get_file_mode())
3020
control_files.unlock()
3022
def network_name(self):
3023
"""Metadir formats have matching disk and network format strings."""
3024
return self.get_format_string()
3027
# Pre-0.8 formats that don't have a disk format string (because they are
3028
# versioned by the matching control directory). We use the control directories
3029
# disk format string as a key for the network_name because they meet the
3030
# constraints (simple string, unique, immutable).
3031
network_format_registry.register_lazy(
3032
"Bazaar-NG branch, format 5\n",
3033
'bzrlib.repofmt.weaverepo',
3034
'RepositoryFormat5',
3036
network_format_registry.register_lazy(
3037
"Bazaar-NG branch, format 6\n",
3038
'bzrlib.repofmt.weaverepo',
3039
'RepositoryFormat6',
3042
# formats which have no format string are not discoverable or independently
3043
# creatable on disk, so are not registered in format_registry. They're
3044
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
3045
# needed, it's constructed directly by the BzrDir. Non-native formats where
3046
# the repository is not separately opened are similar.
3048
format_registry.register_lazy(
3049
'Bazaar-NG Repository format 7',
3050
'bzrlib.repofmt.weaverepo',
3054
format_registry.register_lazy(
3055
'Bazaar-NG Knit Repository Format 1',
3056
'bzrlib.repofmt.knitrepo',
3057
'RepositoryFormatKnit1',
3060
format_registry.register_lazy(
3061
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
3062
'bzrlib.repofmt.knitrepo',
3063
'RepositoryFormatKnit3',
3066
format_registry.register_lazy(
3067
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
3068
'bzrlib.repofmt.knitrepo',
3069
'RepositoryFormatKnit4',
3072
# Pack-based formats. There is one format for pre-subtrees, and one for
3073
# post-subtrees to allow ease of testing.
3074
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
3075
format_registry.register_lazy(
3076
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
3077
'bzrlib.repofmt.pack_repo',
3078
'RepositoryFormatKnitPack1',
3080
format_registry.register_lazy(
3081
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
3082
'bzrlib.repofmt.pack_repo',
3083
'RepositoryFormatKnitPack3',
3085
format_registry.register_lazy(
3086
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
3087
'bzrlib.repofmt.pack_repo',
3088
'RepositoryFormatKnitPack4',
3090
format_registry.register_lazy(
3091
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
3092
'bzrlib.repofmt.pack_repo',
3093
'RepositoryFormatKnitPack5',
3095
format_registry.register_lazy(
3096
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
3097
'bzrlib.repofmt.pack_repo',
3098
'RepositoryFormatKnitPack5RichRoot',
3100
format_registry.register_lazy(
3101
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
3102
'bzrlib.repofmt.pack_repo',
3103
'RepositoryFormatKnitPack5RichRootBroken',
3105
format_registry.register_lazy(
3106
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
3107
'bzrlib.repofmt.pack_repo',
3108
'RepositoryFormatKnitPack6',
3110
format_registry.register_lazy(
3111
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
3112
'bzrlib.repofmt.pack_repo',
3113
'RepositoryFormatKnitPack6RichRoot',
3116
# Development formats.
3117
# Obsolete but kept pending a CHK based subtree format.
3118
format_registry.register_lazy(
3119
("Bazaar development format 2 with subtree support "
3120
"(needs bzr.dev from before 1.8)\n"),
3121
'bzrlib.repofmt.pack_repo',
3122
'RepositoryFormatPackDevelopment2Subtree',
3125
# 1.14->1.16 go below here
3126
format_registry.register_lazy(
3127
'Bazaar development format - group compression and chk inventory'
3128
' (needs bzr.dev from 1.14)\n',
3129
'bzrlib.repofmt.groupcompress_repo',
3130
'RepositoryFormatCHK1',
3133
format_registry.register_lazy(
3134
'Bazaar development format - chk repository with bencode revision '
3135
'serialization (needs bzr.dev from 1.16)\n',
3136
'bzrlib.repofmt.groupcompress_repo',
3137
'RepositoryFormatCHK2',
3139
format_registry.register_lazy(
3140
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',
3141
'bzrlib.repofmt.groupcompress_repo',
3142
'RepositoryFormat2a',
3146
class InterRepository(InterObject):
3147
"""This class represents operations taking place between two repositories.
3149
Its instances have methods like copy_content and fetch, and contain
3150
references to the source and target repositories these operations can be
3153
Often we will provide convenience methods on 'repository' which carry out
3154
operations with another repository - they will always forward to
3155
InterRepository.get(other).method_name(parameters).
3158
_walk_to_common_revisions_batch_size = 50
3160
"""The available optimised InterRepository types."""
3163
def copy_content(self, revision_id=None):
3164
"""Make a complete copy of the content in self into destination.
3166
This is a destructive operation! Do not use it on existing
3169
:param revision_id: Only copy the content needed to construct
3170
revision_id and its parents.
3173
self.target.set_make_working_trees(self.source.make_working_trees())
3174
except NotImplementedError:
3176
self.target.fetch(self.source, revision_id=revision_id)
3179
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3181
"""Fetch the content required to construct revision_id.
3183
The content is copied from self.source to self.target.
3185
:param revision_id: if None all content is copied, if NULL_REVISION no
3187
:param pb: optional progress bar to use for progress reports. If not
3188
provided a default one will be created.
3191
from bzrlib.fetch import RepoFetcher
3192
f = RepoFetcher(to_repository=self.target,
3193
from_repository=self.source,
3194
last_revision=revision_id,
3195
fetch_spec=fetch_spec,
3196
pb=pb, find_ghosts=find_ghosts)
3198
def _walk_to_common_revisions(self, revision_ids):
3199
"""Walk out from revision_ids in source to revisions target has.
3201
:param revision_ids: The start point for the search.
3202
:return: A set of revision ids.
3204
target_graph = self.target.get_graph()
3205
revision_ids = frozenset(revision_ids)
3206
missing_revs = set()
3207
source_graph = self.source.get_graph()
3208
# ensure we don't pay silly lookup costs.
3209
searcher = source_graph._make_breadth_first_searcher(revision_ids)
3210
null_set = frozenset([_mod_revision.NULL_REVISION])
3211
searcher_exhausted = False
3215
# Iterate the searcher until we have enough next_revs
3216
while len(next_revs) < self._walk_to_common_revisions_batch_size:
3218
next_revs_part, ghosts_part = searcher.next_with_ghosts()
3219
next_revs.update(next_revs_part)
3220
ghosts.update(ghosts_part)
3221
except StopIteration:
3222
searcher_exhausted = True
3224
# If there are ghosts in the source graph, and the caller asked for
3225
# them, make sure that they are present in the target.
3226
# We don't care about other ghosts as we can't fetch them and
3227
# haven't been asked to.
3228
ghosts_to_check = set(revision_ids.intersection(ghosts))
3229
revs_to_get = set(next_revs).union(ghosts_to_check)
3231
have_revs = set(target_graph.get_parent_map(revs_to_get))
3232
# we always have NULL_REVISION present.
3233
have_revs = have_revs.union(null_set)
3234
# Check if the target is missing any ghosts we need.
3235
ghosts_to_check.difference_update(have_revs)
3237
# One of the caller's revision_ids is a ghost in both the
3238
# source and the target.
3239
raise errors.NoSuchRevision(
3240
self.source, ghosts_to_check.pop())
3241
missing_revs.update(next_revs - have_revs)
3242
# Because we may have walked past the original stop point, make
3243
# sure everything is stopped
3244
stop_revs = searcher.find_seen_ancestors(have_revs)
3245
searcher.stop_searching_any(stop_revs)
3246
if searcher_exhausted:
3248
return searcher.get_result()
3251
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3252
"""Return the revision ids that source has that target does not.
3254
:param revision_id: only return revision ids included by this
3256
:param find_ghosts: If True find missing revisions in deep history
3257
rather than just finding the surface difference.
3258
:return: A bzrlib.graph.SearchResult.
3260
# stop searching at found target revisions.
3261
if not find_ghosts and revision_id is not None:
3262
return self._walk_to_common_revisions([revision_id])
3263
# generic, possibly worst case, slow code path.
3264
target_ids = set(self.target.all_revision_ids())
3265
if revision_id is not None:
3266
source_ids = self.source.get_ancestry(revision_id)
3267
if source_ids[0] is not None:
3268
raise AssertionError()
3271
source_ids = self.source.all_revision_ids()
3272
result_set = set(source_ids).difference(target_ids)
3273
return self.source.revision_ids_to_search_result(result_set)
3276
def _same_model(source, target):
3277
"""True if source and target have the same data representation.
3279
Note: this is always called on the base class; overriding it in a
3280
subclass will have no effect.
3283
InterRepository._assert_same_model(source, target)
3285
except errors.IncompatibleRepositories, e:
3289
def _assert_same_model(source, target):
3290
"""Raise an exception if two repositories do not use the same model.
3292
if source.supports_rich_root() != target.supports_rich_root():
3293
raise errors.IncompatibleRepositories(source, target,
3294
"different rich-root support")
3295
if source._serializer != target._serializer:
3296
raise errors.IncompatibleRepositories(source, target,
3297
"different serializers")
3300
class InterSameDataRepository(InterRepository):
3301
"""Code for converting between repositories that represent the same data.
3303
Data format and model must match for this to work.
3307
def _get_repo_format_to_test(self):
3308
"""Repository format for testing with.
3310
InterSameData can pull from subtree to subtree and from non-subtree to
3311
non-subtree, so we test this with the richest repository format.
3313
from bzrlib.repofmt import knitrepo
3314
return knitrepo.RepositoryFormatKnit3()
3317
def is_compatible(source, target):
3318
return InterRepository._same_model(source, target)
3321
class InterWeaveRepo(InterSameDataRepository):
3322
"""Optimised code paths between Weave based repositories.
3324
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
3325
implemented lazy inter-object optimisation.
3329
def _get_repo_format_to_test(self):
3330
from bzrlib.repofmt import weaverepo
3331
return weaverepo.RepositoryFormat7()
3334
def is_compatible(source, target):
3335
"""Be compatible with known Weave formats.
3337
We don't test for the stores being of specific types because that
3338
could lead to confusing results, and there is no need to be
3341
from bzrlib.repofmt.weaverepo import (
3347
return (isinstance(source._format, (RepositoryFormat5,
3349
RepositoryFormat7)) and
3350
isinstance(target._format, (RepositoryFormat5,
3352
RepositoryFormat7)))
3353
except AttributeError:
3357
def copy_content(self, revision_id=None):
3358
"""See InterRepository.copy_content()."""
3359
# weave specific optimised path:
3361
self.target.set_make_working_trees(self.source.make_working_trees())
3362
except (errors.RepositoryUpgradeRequired, NotImplemented):
3364
# FIXME do not peek!
3365
if self.source._transport.listable():
3366
pb = ui.ui_factory.nested_progress_bar()
3368
self.target.texts.insert_record_stream(
3369
self.source.texts.get_record_stream(
3370
self.source.texts.keys(), 'topological', False))
3371
pb.update('copying inventory', 0, 1)
3372
self.target.inventories.insert_record_stream(
3373
self.source.inventories.get_record_stream(
3374
self.source.inventories.keys(), 'topological', False))
3375
self.target.signatures.insert_record_stream(
3376
self.source.signatures.get_record_stream(
3377
self.source.signatures.keys(),
3379
self.target.revisions.insert_record_stream(
3380
self.source.revisions.get_record_stream(
3381
self.source.revisions.keys(),
3382
'topological', True))
3386
self.target.fetch(self.source, revision_id=revision_id)
3389
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3390
"""See InterRepository.missing_revision_ids()."""
3391
# we want all revisions to satisfy revision_id in source.
3392
# but we don't want to stat every file here and there.
3393
# we want then, all revisions other needs to satisfy revision_id
3394
# checked, but not those that we have locally.
3395
# so the first thing is to get a subset of the revisions to
3396
# satisfy revision_id in source, and then eliminate those that
3397
# we do already have.
3398
# this is slow on high latency connection to self, but as this
3399
# disk format scales terribly for push anyway due to rewriting
3400
# inventory.weave, this is considered acceptable.
3402
if revision_id is not None:
3403
source_ids = self.source.get_ancestry(revision_id)
3404
if source_ids[0] is not None:
3405
raise AssertionError()
3408
source_ids = self.source._all_possible_ids()
3409
source_ids_set = set(source_ids)
3410
# source_ids is the worst possible case we may need to pull.
3411
# now we want to filter source_ids against what we actually
3412
# have in target, but don't try to check for existence where we know
3413
# we do not have a revision as that would be pointless.
3414
target_ids = set(self.target._all_possible_ids())
3415
possibly_present_revisions = target_ids.intersection(source_ids_set)
3416
actually_present_revisions = set(
3417
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3418
required_revisions = source_ids_set.difference(actually_present_revisions)
3419
if revision_id is not None:
3420
# we used get_ancestry to determine source_ids then we are assured all
3421
# revisions referenced are present as they are installed in topological order.
3422
# and the tip revision was validated by get_ancestry.
3423
result_set = required_revisions
3425
# if we just grabbed the possibly available ids, then
3426
# we only have an estimate of whats available and need to validate
3427
# that against the revision records.
3429
self.source._eliminate_revisions_not_present(required_revisions))
3430
return self.source.revision_ids_to_search_result(result_set)
3433
class InterKnitRepo(InterSameDataRepository):
3434
"""Optimised code paths between Knit based repositories."""
3437
def _get_repo_format_to_test(self):
3438
from bzrlib.repofmt import knitrepo
3439
return knitrepo.RepositoryFormatKnit1()
3442
def is_compatible(source, target):
3443
"""Be compatible with known Knit formats.
3445
We don't test for the stores being of specific types because that
3446
could lead to confusing results, and there is no need to be
3449
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
3451
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
3452
isinstance(target._format, RepositoryFormatKnit))
3453
except AttributeError:
3455
return are_knits and InterRepository._same_model(source, target)
3458
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3459
"""See InterRepository.missing_revision_ids()."""
3460
if revision_id is not None:
3461
source_ids = self.source.get_ancestry(revision_id)
3462
if source_ids[0] is not None:
3463
raise AssertionError()
3466
source_ids = self.source.all_revision_ids()
3467
source_ids_set = set(source_ids)
3468
# source_ids is the worst possible case we may need to pull.
3469
# now we want to filter source_ids against what we actually
3470
# have in target, but don't try to check for existence where we know
3471
# we do not have a revision as that would be pointless.
3472
target_ids = set(self.target.all_revision_ids())
3473
possibly_present_revisions = target_ids.intersection(source_ids_set)
3474
actually_present_revisions = set(
3475
self.target._eliminate_revisions_not_present(possibly_present_revisions))
3476
required_revisions = source_ids_set.difference(actually_present_revisions)
3477
if revision_id is not None:
3478
# we used get_ancestry to determine source_ids then we are assured all
3479
# revisions referenced are present as they are installed in topological order.
3480
# and the tip revision was validated by get_ancestry.
3481
result_set = required_revisions
3483
# if we just grabbed the possibly available ids, then
3484
# we only have an estimate of whats available and need to validate
3485
# that against the revision records.
3487
self.source._eliminate_revisions_not_present(required_revisions))
3488
return self.source.revision_ids_to_search_result(result_set)
3491
InterRepository.register_optimiser(InterSameDataRepository)
3492
InterRepository.register_optimiser(InterWeaveRepo)
3493
InterRepository.register_optimiser(InterKnitRepo)
3496
class CopyConverter(object):
3497
"""A repository conversion tool which just performs a copy of the content.
3499
This is slow but quite reliable.
3502
def __init__(self, target_format):
3503
"""Create a CopyConverter.
3505
:param target_format: The format the resulting repository should be.
3507
self.target_format = target_format
3509
def convert(self, repo, pb):
3510
"""Perform the conversion of to_convert, giving feedback via pb.
3512
:param to_convert: The disk object to convert.
3513
:param pb: a progress bar to use for progress information.
3518
# this is only useful with metadir layouts - separated repo content.
3519
# trigger an assertion if not such
3520
repo._format.get_format_string()
3521
self.repo_dir = repo.bzrdir
3522
self.step('Moving repository to repository.backup')
3523
self.repo_dir.transport.move('repository', 'repository.backup')
3524
backup_transport = self.repo_dir.transport.clone('repository.backup')
3525
repo._format.check_conversion_target(self.target_format)
3526
self.source_repo = repo._format.open(self.repo_dir,
3528
_override_transport=backup_transport)
3529
self.step('Creating new repository')
3530
converted = self.target_format.initialize(self.repo_dir,
3531
self.source_repo.is_shared())
3532
converted.lock_write()
3534
self.step('Copying content into repository.')
3535
self.source_repo.copy_content_into(converted)
3538
self.step('Deleting old repository content.')
3539
self.repo_dir.transport.delete_tree('repository.backup')
3540
self.pb.note('repository converted')
3542
def step(self, message):
3543
"""Update the pb by a step."""
3545
self.pb.update(message, self.count, self.total)
3557
def _unescaper(match, _map=_unescape_map):
3558
code = match.group(1)
3562
if not code.startswith('#'):
3564
return unichr(int(code[1:])).encode('utf8')
3570
def _unescape_xml(data):
3571
"""Unescape predefined XML entities in a string of data."""
3573
if _unescape_re is None:
3574
_unescape_re = re.compile('\&([^;]*);')
3575
return _unescape_re.sub(_unescaper, data)
3578
class _VersionedFileChecker(object):
3580
def __init__(self, repository, text_key_references=None):
3581
self.repository = repository
3582
self.text_index = self.repository._generate_text_key_index(
3583
text_key_references=text_key_references)
3585
def calculate_file_version_parents(self, text_key):
3586
"""Calculate the correct parents for a file version according to
3589
parent_keys = self.text_index[text_key]
3590
if parent_keys == [_mod_revision.NULL_REVISION]:
3592
return tuple(parent_keys)
3594
def check_file_version_parents(self, texts, progress_bar=None):
3595
"""Check the parents stored in a versioned file are correct.
3597
It also detects file versions that are not referenced by their
3598
corresponding revision's inventory.
3600
:returns: A tuple of (wrong_parents, dangling_file_versions).
3601
wrong_parents is a dict mapping {revision_id: (stored_parents,
3602
correct_parents)} for each revision_id where the stored parents
3603
are not correct. dangling_file_versions is a set of (file_id,
3604
revision_id) tuples for versions that are present in this versioned
3605
file, but not used by the corresponding inventory.
3608
self.file_ids = set([file_id for file_id, _ in
3609
self.text_index.iterkeys()])
3610
# text keys is now grouped by file_id
3611
n_versions = len(self.text_index)
3612
progress_bar.update('loading text store', 0, n_versions)
3613
parent_map = self.repository.texts.get_parent_map(self.text_index)
3614
# On unlistable transports this could well be empty/error...
3615
text_keys = self.repository.texts.keys()
3616
unused_keys = frozenset(text_keys) - set(self.text_index)
3617
for num, key in enumerate(self.text_index.iterkeys()):
3618
if progress_bar is not None:
3619
progress_bar.update('checking text graph', num, n_versions)
3620
correct_parents = self.calculate_file_version_parents(key)
3622
knit_parents = parent_map[key]
3623
except errors.RevisionNotPresent:
3626
if correct_parents != knit_parents:
3627
wrong_parents[key] = (knit_parents, correct_parents)
3628
return wrong_parents, unused_keys
3631
def _old_get_graph(repository, revision_id):
3632
"""DO NOT USE. That is all. I'm serious."""
3633
graph = repository.get_graph()
3634
revision_graph = dict(((key, value) for key, value in
3635
graph.iter_ancestry([revision_id]) if value is not None))
3636
return _strip_NULL_ghosts(revision_graph)
3639
def _strip_NULL_ghosts(revision_graph):
3640
"""Also don't use this. more compatibility code for unmigrated clients."""
3641
# Filter ghosts, and null:
3642
if _mod_revision.NULL_REVISION in revision_graph:
3643
del revision_graph[_mod_revision.NULL_REVISION]
3644
for key, parents in revision_graph.items():
3645
revision_graph[key] = tuple(parent for parent in parents if parent
3647
return revision_graph
3650
class StreamSink(object):
3651
"""An object that can insert a stream into a repository.
3653
This interface handles the complexity of reserialising inventories and
3654
revisions from different formats, and allows unidirectional insertion into
3655
stacked repositories without looking for the missing basis parents
3659
def __init__(self, target_repo):
3660
self.target_repo = target_repo
3662
def insert_stream(self, stream, src_format, resume_tokens):
3663
"""Insert a stream's content into the target repository.
3665
:param src_format: a bzr repository format.
3667
:return: a list of resume tokens and an iterable of keys additional
3668
items required before the insertion can be completed.
3670
self.target_repo.lock_write()
3673
self.target_repo.resume_write_group(resume_tokens)
3676
self.target_repo.start_write_group()
3679
# locked_insert_stream performs a commit|suspend.
3680
return self._locked_insert_stream(stream, src_format, is_resume)
3682
self.target_repo.abort_write_group(suppress_errors=True)
3685
self.target_repo.unlock()
3687
def _locked_insert_stream(self, stream, src_format, is_resume):
3688
to_serializer = self.target_repo._format._serializer
3689
src_serializer = src_format._serializer
3691
if to_serializer == src_serializer:
3692
# If serializers match and the target is a pack repository, set the
3693
# write cache size on the new pack. This avoids poor performance
3694
# on transports where append is unbuffered (such as
3695
# RemoteTransport). This is safe to do because nothing should read
3696
# back from the target repository while a stream with matching
3697
# serialization is being inserted.
3698
# The exception is that a delta record from the source that should
3699
# be a fulltext may need to be expanded by the target (see
3700
# test_fetch_revisions_with_deltas_into_pack); but we take care to
3701
# explicitly flush any buffered writes first in that rare case.
3703
new_pack = self.target_repo._pack_collection._new_pack
3704
except AttributeError:
3705
# Not a pack repository
3708
new_pack.set_write_cache_size(1024*1024)
3709
delta_deserializer = inventory_delta.InventoryDeltaSerializer()
3710
for substream_type, substream in stream:
3711
if substream_type == 'texts':
3712
self.target_repo.texts.insert_record_stream(substream)
3713
elif substream_type == 'inventories':
3714
if src_serializer == to_serializer:
3715
self.target_repo.inventories.insert_record_stream(
3718
self._extract_and_insert_inventories(
3719
substream, src_serializer,
3720
delta_deserializer.parse_text_bytes)
3721
elif substream_type == 'chk_bytes':
3722
# XXX: This doesn't support conversions, as it assumes the
3723
# conversion was done in the fetch code.
3724
self.target_repo.chk_bytes.insert_record_stream(substream)
3725
elif substream_type == 'revisions':
3726
# This may fallback to extract-and-insert more often than
3727
# required if the serializers are different only in terms of
3729
if src_serializer == to_serializer:
3730
self.target_repo.revisions.insert_record_stream(
3733
self._extract_and_insert_revisions(substream,
3735
elif substream_type == 'signatures':
3736
self.target_repo.signatures.insert_record_stream(substream)
3738
raise AssertionError('kaboom! %s' % (substream_type,))
3739
# Done inserting data, and the missing_keys calculations will try to
3740
# read back from the inserted data, so flush the writes to the new pack
3741
# (if this is pack format).
3742
if new_pack is not None:
3743
new_pack._write_data('', flush=True)
3744
# Find all the new revisions (including ones from resume_tokens)
3745
missing_keys = self.target_repo.get_missing_parent_inventories(
3746
check_for_missing_texts=is_resume)
3748
for prefix, versioned_file in (
3749
('texts', self.target_repo.texts),
3750
('inventories', self.target_repo.inventories),
3751
('revisions', self.target_repo.revisions),
3752
('signatures', self.target_repo.signatures),
3753
('chk_bytes', self.target_repo.chk_bytes),
3755
if versioned_file is None:
3757
missing_keys.update((prefix,) + key for key in
3758
versioned_file.get_missing_compression_parent_keys())
3759
except NotImplementedError:
3760
# cannot even attempt suspending, and missing would have failed
3761
# during stream insertion.
3762
missing_keys = set()
3765
# suspend the write group and tell the caller what we is
3766
# missing. We know we can suspend or else we would not have
3767
# entered this code path. (All repositories that can handle
3768
# missing keys can handle suspending a write group).
3769
write_group_tokens = self.target_repo.suspend_write_group()
3770
return write_group_tokens, missing_keys
3771
hint = self.target_repo.commit_write_group()
3772
if (to_serializer != src_serializer and
3773
self.target_repo._format.pack_compresses):
3774
self.target_repo.pack(hint=hint)
3777
def _extract_and_insert_inventories(self, substream, serializer,
3779
"""Generate a new inventory versionedfile in target, converting data.
3781
The inventory is retrieved from the source, (deserializing it), and
3782
stored in the target (reserializing it in a different format).
3784
target_rich_root = self.target_repo._format.rich_root_data
3785
target_tree_refs = self.target_repo._format.supports_tree_reference
3786
for record in substream:
3787
if record.storage_kind == 'inventory-delta':
3788
# Insert the delta directly
3789
delta_tuple = record.get_bytes_as('inventory-delta')
3790
basis_id, new_id, inv_delta, format_flags = delta_tuple
3791
# Make sure the delta is compatible with the target
3792
if format_flags[0] and not target_rich_root:
3793
raise errors.IncompatibleRevision(self.target_repo._format)
3794
if format_flags[1] and not target_tree_refs:
3795
raise errors.IncompatibleRevision(self.target_repo._format)
3796
revision_id = new_id[0]
3797
parents = [key[0] for key in record.parents]
3798
self.target_repo.add_inventory_by_delta(
3799
basis_id, inv_delta, revision_id, parents)
3801
# It's not a delta, so it must be a fulltext in the source
3802
# serializer's format.
3803
bytes = record.get_bytes_as('fulltext')
3804
revision_id = record.key[0]
3805
inv = serializer.read_inventory_from_string(bytes, revision_id)
3806
parents = [key[0] for key in record.parents]
3807
self.target_repo.add_inventory(revision_id, inv, parents)
3808
# No need to keep holding this full inv in memory when the rest of
3809
# the substream is likely to be all deltas.
3812
def _extract_and_insert_revisions(self, substream, serializer):
3813
for record in substream:
3814
bytes = record.get_bytes_as('fulltext')
3815
revision_id = record.key[0]
3816
rev = serializer.read_revision_from_string(bytes)
3817
if rev.revision_id != revision_id:
3818
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
3819
self.target_repo.add_revision(revision_id, rev)
3822
if self.target_repo._format._fetch_reconcile:
3823
self.target_repo.reconcile()
3826
class StreamSource(object):
3827
"""A source of a stream for fetching between repositories."""
3829
def __init__(self, from_repository, to_format):
3830
"""Create a StreamSource streaming from from_repository."""
3831
self.from_repository = from_repository
3832
self.to_format = to_format
3834
def delta_on_metadata(self):
3835
"""Return True if delta's are permitted on metadata streams.
3837
That is on revisions and signatures.
3839
src_serializer = self.from_repository._format._serializer
3840
target_serializer = self.to_format._serializer
3841
return (self.to_format._fetch_uses_deltas and
3842
src_serializer == target_serializer)
3844
def _fetch_revision_texts(self, revs):
3845
# fetch signatures first and then the revision texts
3846
# may need to be a InterRevisionStore call here.
3847
from_sf = self.from_repository.signatures
3848
# A missing signature is just skipped.
3849
keys = [(rev_id,) for rev_id in revs]
3850
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
3852
self.to_format._fetch_order,
3853
not self.to_format._fetch_uses_deltas))
3854
# If a revision has a delta, this is actually expanded inside the
3855
# insert_record_stream code now, which is an alternate fix for
3857
from_rf = self.from_repository.revisions
3858
revisions = from_rf.get_record_stream(
3860
self.to_format._fetch_order,
3861
not self.delta_on_metadata())
3862
return [('signatures', signatures), ('revisions', revisions)]
3864
def _generate_root_texts(self, revs):
3865
"""This will be called by get_stream between fetching weave texts and
3866
fetching the inventory weave.
3868
if self._rich_root_upgrade():
3870
return bzrlib.fetch.Inter1and2Helper(
3871
self.from_repository).generate_root_texts(revs)
3875
def get_stream(self, search):
3877
revs = search.get_keys()
3878
graph = self.from_repository.get_graph()
3879
revs = list(graph.iter_topo_order(revs))
3880
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
3882
for knit_kind, file_id, revisions in data_to_fetch:
3883
if knit_kind != phase:
3885
# Make a new progress bar for this phase
3886
if knit_kind == "file":
3887
# Accumulate file texts
3888
text_keys.extend([(file_id, revision) for revision in
3890
elif knit_kind == "inventory":
3891
# Now copy the file texts.
3892
from_texts = self.from_repository.texts
3893
yield ('texts', from_texts.get_record_stream(
3894
text_keys, self.to_format._fetch_order,
3895
not self.to_format._fetch_uses_deltas))
3896
# Cause an error if a text occurs after we have done the
3899
# Before we process the inventory we generate the root
3900
# texts (if necessary) so that the inventories references
3902
for _ in self._generate_root_texts(revs):
3904
# we fetch only the referenced inventories because we do not
3905
# know for unselected inventories whether all their required
3906
# texts are present in the other repository - it could be
3908
for info in self._get_inventory_stream(revs):
3910
elif knit_kind == "signatures":
3911
# Nothing to do here; this will be taken care of when
3912
# _fetch_revision_texts happens.
3914
elif knit_kind == "revisions":
3915
for record in self._fetch_revision_texts(revs):
3918
raise AssertionError("Unknown knit kind %r" % knit_kind)
3920
def get_stream_for_missing_keys(self, missing_keys):
3921
# missing keys can only occur when we are byte copying and not
3922
# translating (because translation means we don't send
3923
# unreconstructable deltas ever).
3925
keys['texts'] = set()
3926
keys['revisions'] = set()
3927
keys['inventories'] = set()
3928
keys['chk_bytes'] = set()
3929
keys['signatures'] = set()
3930
for key in missing_keys:
3931
keys[key[0]].add(key[1:])
3932
if len(keys['revisions']):
3933
# If we allowed copying revisions at this point, we could end up
3934
# copying a revision without copying its required texts: a
3935
# violation of the requirements for repository integrity.
3936
raise AssertionError(
3937
'cannot copy revisions to fill in missing deltas %s' % (
3938
keys['revisions'],))
3939
for substream_kind, keys in keys.iteritems():
3940
vf = getattr(self.from_repository, substream_kind)
3941
if vf is None and keys:
3942
raise AssertionError(
3943
"cannot fill in keys for a versioned file we don't"
3944
" have: %s needs %s" % (substream_kind, keys))
3946
# No need to stream something we don't have
3948
if substream_kind == 'inventories':
3949
# Some missing keys are genuinely ghosts, filter those out.
3950
present = self.from_repository.inventories.get_parent_map(keys)
3951
revs = [key[0] for key in present]
3952
# As with the original stream, we may need to generate root
3953
# texts for the inventories we're about to stream.
3954
for _ in self._generate_root_texts(revs):
3956
# Get the inventory stream more-or-less as we do for the
3957
# original stream; there's no reason to assume that records
3958
# direct from the source will be suitable for the sink. (Think
3959
# e.g. 2a -> 1.9-rich-root).
3960
for info in self._get_inventory_stream(revs, missing=True):
3964
# Ask for full texts always so that we don't need more round trips
3965
# after this stream.
3966
# Some of the missing keys are genuinely ghosts, so filter absent
3967
# records. The Sink is responsible for doing another check to
3968
# ensure that ghosts don't introduce missing data for future
3970
stream = versionedfile.filter_absent(vf.get_record_stream(keys,
3971
self.to_format._fetch_order, True))
3972
yield substream_kind, stream
3974
def inventory_fetch_order(self):
3975
if self._rich_root_upgrade():
3976
return 'topological'
3978
return self.to_format._fetch_order
3980
def _rich_root_upgrade(self):
3981
return (not self.from_repository._format.rich_root_data and
3982
self.to_format.rich_root_data)
3984
def _get_inventory_stream(self, revision_ids, missing=False):
3985
from_format = self.from_repository._format
3986
if (from_format.supports_chks and self.to_format.supports_chks):
3987
raise AssertionError(
3988
"this case should be handled by GroupCHKStreamSource")
3989
elif (not from_format.supports_chks):
3990
# Source repository doesn't support chks. So we can transmit the
3991
# inventories 'as-is' and either they are just accepted on the
3992
# target, or the Sink will properly convert it.
3993
# (XXX: this assumes that all non-chk formats are understood as-is
3994
# by any Sink, but that presumably isn't true for foreign repo
3995
# formats added by bzr-svn etc?)
3996
return self._get_simple_inventory_stream(revision_ids,
3999
# Make chk->non-chk (and chk with different serializers) fetch:
4000
# copy the inventories as (format-neutral) inventory deltas.
4001
return self._get_convertable_inventory_stream(revision_ids,
4004
def _get_simple_inventory_stream(self, revision_ids, missing=False):
4005
# NB: This currently reopens the inventory weave in source;
4006
# using a single stream interface instead would avoid this.
4007
from_weave = self.from_repository.inventories
4009
delta_closure = True
4011
delta_closure = not self.delta_on_metadata()
4012
yield ('inventories', from_weave.get_record_stream(
4013
[(rev_id,) for rev_id in revision_ids],
4014
self.inventory_fetch_order(), delta_closure))
4016
def _get_convertable_inventory_stream(self, revision_ids, fulltexts=False):
4017
# The source is using CHKs, but the target either doesn't or is has a
4018
# different serializer. The StreamSink code expects to be able to
4019
# convert on the target, so we need to put bytes-on-the-wire that can
4020
# be converted. That means inventory deltas (if the remote is <1.18,
4021
# RemoteStreamSink will fallback to VFS to insert the deltas).
4022
yield ('inventories',
4023
self._stream_invs_as_deltas(revision_ids, fulltexts=fulltexts))
4025
def _stream_invs_as_deltas(self, revision_ids, fulltexts=False):
4026
from_repo = self.from_repository
4027
revision_keys = [(rev_id,) for rev_id in revision_ids]
4028
parent_map = from_repo.inventories.get_parent_map(revision_keys)
4029
# XXX: possibly repos could implement a more efficient iter_inv_deltas
4031
inventories = self.from_repository.iter_inventories(
4032
revision_ids, 'topological')
4033
# XXX: ideally these flags would be per-revision, not per-repo (e.g.
4034
# streaming a non-rich-root revision out of a rich-root repo back into
4035
# a non-rich-root repo ought to be allowed)
4036
format = from_repo._format
4037
flags = (format.rich_root_data, format.supports_tree_reference)
4038
invs_sent_so_far = set([_mod_revision.NULL_REVISION])
4039
for inv in inventories:
4040
key = (inv.revision_id,)
4041
parents = parent_map.get(key, ())
4042
if fulltexts or parents == ():
4043
# Either the caller asked for fulltexts, or there is no parent,
4044
# so, stream as a delta from null:.
4045
basis_id = _mod_revision.NULL_REVISION
4046
parent_inv = Inventory(None)
4047
delta = inv._make_delta(parent_inv)
4049
# Make a delta against each parent so that we can find the
4052
parent_ids = [parent_key[0] for parent_key in parents]
4053
parent_ids.append(_mod_revision.NULL_REVISION)
4054
for parent_id in parent_ids:
4055
if parent_id not in invs_sent_so_far:
4056
# We don't know that the remote side has this basis, so
4059
if parent_id == _mod_revision.NULL_REVISION:
4060
parent_inv = Inventory(None)
4062
parent_inv = from_repo.get_inventory(parent_id)
4063
candidate_delta = inv._make_delta(parent_inv)
4064
if (best_delta is None or
4065
len(best_delta) > len(candidate_delta)):
4066
best_delta = candidate_delta
4067
basis_id = parent_id
4069
invs_sent_so_far.add(basis_id)
4070
yield versionedfile.InventoryDeltaContentFactory(
4071
key, parents, None, delta, basis_id, flags)
4074
def _iter_for_revno(repo, partial_history_cache, stop_index=None,
4075
stop_revision=None):
4076
"""Extend the partial history to include a given index
4078
If a stop_index is supplied, stop when that index has been reached.
4079
If a stop_revision is supplied, stop when that revision is
4080
encountered. Otherwise, stop when the beginning of history is
4083
:param stop_index: The index which should be present. When it is
4084
present, history extension will stop.
4085
:param stop_revision: The revision id which should be present. When
4086
it is encountered, history extension will stop.
4088
start_revision = partial_history_cache[-1]
4089
iterator = repo.iter_reverse_revision_history(start_revision)
4091
#skip the last revision in the list
4094
if (stop_index is not None and
4095
len(partial_history_cache) > stop_index):
4097
if partial_history_cache[-1] == stop_revision:
4099
revision_id = iterator.next()
4100
partial_history_cache.append(revision_id)
4101
except StopIteration: