1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.store.text import TextStore
50
from bzrlib.testament import Testament
51
from bzrlib.util import bencode
54
from bzrlib.decorators import needs_read_lock, needs_write_lock
55
from bzrlib.inter import InterObject
56
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
57
from bzrlib.symbol_versioning import (
60
from bzrlib.trace import mutter, mutter_callsite, note, warning
63
# Old formats display a warning, but only once
64
_deprecation_warning_done = False
67
class CommitBuilder(object):
68
"""Provides an interface to build up a commit.
70
This allows describing a tree to be committed without needing to
71
know the internals of the format of the repository.
74
# all clients should supply tree roots.
75
record_root_entry = True
76
# the default CommitBuilder does not manage trees whose root is versioned.
77
_versioned_root = False
79
def __init__(self, repository, parents, config, timestamp=None,
80
timezone=None, committer=None, revprops=None,
82
"""Initiate a CommitBuilder.
84
:param repository: Repository to commit to.
85
:param parents: Revision ids of the parents of the new revision.
86
:param config: Configuration to use.
87
:param timestamp: Optional timestamp recorded for commit.
88
:param timezone: Optional timezone for timestamp.
89
:param committer: Optional committer to set for commit.
90
:param revprops: Optional dictionary of revision properties.
91
:param revision_id: Optional revision id.
96
self._committer = self._config.username()
98
assert isinstance(committer, basestring), type(committer)
99
self._committer = committer
101
self.new_inventory = Inventory(None)
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._revprops.update(revprops)
110
if timestamp is None:
111
timestamp = time.time()
112
# Restrict resolution to 1ms
113
self._timestamp = round(timestamp, 3)
116
self._timezone = osutils.local_time_offset()
118
self._timezone = int(timezone)
120
self._generate_revision_if_needed()
121
self.__heads = graph.HeadsCache(repository.get_graph()).heads
123
def commit(self, message):
124
"""Make the actual commit.
126
:return: The revision id of the recorded revision.
128
rev = _mod_revision.Revision(
129
timestamp=self._timestamp,
130
timezone=self._timezone,
131
committer=self._committer,
133
inventory_sha1=self.inv_sha1,
134
revision_id=self._new_revision_id,
135
properties=self._revprops)
136
rev.parent_ids = self.parents
137
self.repository.add_revision(self._new_revision_id, rev,
138
self.new_inventory, self._config)
139
self.repository.commit_write_group()
140
return self._new_revision_id
143
"""Abort the commit that is being built.
145
self.repository.abort_write_group()
147
def revision_tree(self):
148
"""Return the tree that was just committed.
150
After calling commit() this can be called to get a RevisionTree
151
representing the newly committed tree. This is preferred to
152
calling Repository.revision_tree() because that may require
153
deserializing the inventory, while we already have a copy in
156
return RevisionTree(self.repository, self.new_inventory,
157
self._new_revision_id)
159
def finish_inventory(self):
160
"""Tell the builder that the inventory is finished."""
161
if self.new_inventory.root is None:
162
raise AssertionError('Root entry should be supplied to'
163
' record_entry_contents, as of bzr 0.10.',
164
DeprecationWarning, stacklevel=2)
165
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
166
self.new_inventory.revision_id = self._new_revision_id
167
self.inv_sha1 = self.repository.add_inventory(
168
self._new_revision_id,
173
def _gen_revision_id(self):
174
"""Return new revision-id."""
175
return generate_ids.gen_revision_id(self._config.username(),
178
def _generate_revision_if_needed(self):
179
"""Create a revision id if None was supplied.
181
If the repository can not support user-specified revision ids
182
they should override this function and raise CannotSetRevisionId
183
if _new_revision_id is not None.
185
:raises: CannotSetRevisionId
187
if self._new_revision_id is None:
188
self._new_revision_id = self._gen_revision_id()
189
self.random_revid = True
191
self.random_revid = False
193
def _heads(self, file_id, revision_ids):
194
"""Calculate the graph heads for revision_ids in the graph of file_id.
196
This can use either a per-file graph or a global revision graph as we
197
have an identity relationship between the two graphs.
199
return self.__heads(revision_ids)
201
def _check_root(self, ie, parent_invs, tree):
202
"""Helper for record_entry_contents.
204
:param ie: An entry being added.
205
:param parent_invs: The inventories of the parent revisions of the
207
:param tree: The tree that is being committed.
209
# In this revision format, root entries have no knit or weave When
210
# serializing out to disk and back in root.revision is always
212
ie.revision = self._new_revision_id
214
def _get_delta(self, ie, basis_inv, path):
215
"""Get a delta against the basis inventory for ie."""
216
if ie.file_id not in basis_inv:
218
return (None, path, ie.file_id, ie)
219
elif ie != basis_inv[ie.file_id]:
221
# TODO: avoid tis id2path call.
222
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
227
def record_entry_contents(self, ie, parent_invs, path, tree,
229
"""Record the content of ie from tree into the commit if needed.
231
Side effect: sets ie.revision when unchanged
233
:param ie: An inventory entry present in the commit.
234
:param parent_invs: The inventories of the parent revisions of the
236
:param path: The path the entry is at in the tree.
237
:param tree: The tree which contains this entry and should be used to
239
:param content_summary: Summary data from the tree about the paths
240
content - stat, length, exec, sha/link target. This is only
241
accessed when the entry has a revision of None - that is when it is
242
a candidate to commit.
243
:return: A tuple (change_delta, version_recorded). change_delta is
244
an inventory_delta change for this entry against the basis tree of
245
the commit, or None if no change occured against the basis tree.
246
version_recorded is True if a new version of the entry has been
247
recorded. For instance, committing a merge where a file was only
248
changed on the other side will return (delta, False).
250
if self.new_inventory.root is None:
251
if ie.parent_id is not None:
252
raise errors.RootMissing()
253
self._check_root(ie, parent_invs, tree)
254
if ie.revision is None:
255
kind = content_summary[0]
257
# ie is carried over from a prior commit
259
# XXX: repository specific check for nested tree support goes here - if
260
# the repo doesn't want nested trees we skip it ?
261
if (kind == 'tree-reference' and
262
not self.repository._format.supports_tree_reference):
263
# mismatch between commit builder logic and repository:
264
# this needs the entry creation pushed down into the builder.
265
raise NotImplementedError('Missing repository subtree support.')
266
self.new_inventory.add(ie)
268
# TODO: slow, take it out of the inner loop.
270
basis_inv = parent_invs[0]
272
basis_inv = Inventory(root_id=None)
274
# ie.revision is always None if the InventoryEntry is considered
275
# for committing. We may record the previous parents revision if the
276
# content is actually unchanged against a sole head.
277
if ie.revision is not None:
278
if not self._versioned_root and path == '':
279
# repositories that do not version the root set the root's
280
# revision to the new commit even when no change occurs, and
281
# this masks when a change may have occurred against the basis,
282
# so calculate if one happened.
283
if ie.file_id in basis_inv:
284
delta = (basis_inv.id2path(ie.file_id), path,
288
delta = (None, path, ie.file_id, ie)
291
# we don't need to commit this, because the caller already
292
# determined that an existing revision of this file is
294
return None, (ie.revision == self._new_revision_id)
295
# XXX: Friction: parent_candidates should return a list not a dict
296
# so that we don't have to walk the inventories again.
297
parent_candiate_entries = ie.parent_candidates(parent_invs)
298
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
300
for inv in parent_invs:
301
if ie.file_id in inv:
302
old_rev = inv[ie.file_id].revision
303
if old_rev in head_set:
304
heads.append(inv[ie.file_id].revision)
305
head_set.remove(inv[ie.file_id].revision)
308
# now we check to see if we need to write a new record to the
310
# We write a new entry unless there is one head to the ancestors, and
311
# the kind-derived content is unchanged.
313
# Cheapest check first: no ancestors, or more the one head in the
314
# ancestors, we write a new node.
318
# There is a single head, look it up for comparison
319
parent_entry = parent_candiate_entries[heads[0]]
320
# if the non-content specific data has changed, we'll be writing a
322
if (parent_entry.parent_id != ie.parent_id or
323
parent_entry.name != ie.name):
325
# now we need to do content specific checks:
327
# if the kind changed the content obviously has
328
if kind != parent_entry.kind:
331
assert content_summary[2] is not None, \
332
"Files must not have executable = None"
334
if (# if the file length changed we have to store:
335
parent_entry.text_size != content_summary[1] or
336
# if the exec bit has changed we have to store:
337
parent_entry.executable != content_summary[2]):
339
elif parent_entry.text_sha1 == content_summary[3]:
340
# all meta and content is unchanged (using a hash cache
341
# hit to check the sha)
342
ie.revision = parent_entry.revision
343
ie.text_size = parent_entry.text_size
344
ie.text_sha1 = parent_entry.text_sha1
345
ie.executable = parent_entry.executable
346
return self._get_delta(ie, basis_inv, path), False
348
# Either there is only a hash change(no hash cache entry,
349
# or same size content change), or there is no change on
351
# Provide the parent's hash to the store layer, so that the
352
# content is unchanged we will not store a new node.
353
nostore_sha = parent_entry.text_sha1
355
# We want to record a new node regardless of the presence or
356
# absence of a content change in the file.
358
ie.executable = content_summary[2]
359
lines = tree.get_file(ie.file_id, path).readlines()
361
ie.text_sha1, ie.text_size = self._add_text_to_weave(
362
ie.file_id, lines, heads, nostore_sha)
363
except errors.ExistingContent:
364
# Turns out that the file content was unchanged, and we were
365
# only going to store a new node if it was changed. Carry over
367
ie.revision = parent_entry.revision
368
ie.text_size = parent_entry.text_size
369
ie.text_sha1 = parent_entry.text_sha1
370
ie.executable = parent_entry.executable
371
return self._get_delta(ie, basis_inv, path), False
372
elif kind == 'directory':
374
# all data is meta here, nothing specific to directory, so
376
ie.revision = parent_entry.revision
377
return self._get_delta(ie, basis_inv, path), False
379
self._add_text_to_weave(ie.file_id, lines, heads, None)
380
elif kind == 'symlink':
381
current_link_target = content_summary[3]
383
# symlink target is not generic metadata, check if it has
385
if current_link_target != parent_entry.symlink_target:
388
# unchanged, carry over.
389
ie.revision = parent_entry.revision
390
ie.symlink_target = parent_entry.symlink_target
391
return self._get_delta(ie, basis_inv, path), False
392
ie.symlink_target = current_link_target
394
self._add_text_to_weave(ie.file_id, lines, heads, None)
395
elif kind == 'tree-reference':
397
if content_summary[3] != parent_entry.reference_revision:
400
# unchanged, carry over.
401
ie.reference_revision = parent_entry.reference_revision
402
ie.revision = parent_entry.revision
403
return self._get_delta(ie, basis_inv, path), False
404
ie.reference_revision = content_summary[3]
406
self._add_text_to_weave(ie.file_id, lines, heads, None)
408
raise NotImplementedError('unknown kind')
409
ie.revision = self._new_revision_id
410
return self._get_delta(ie, basis_inv, path), True
412
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
413
versionedfile = self.repository.weave_store.get_weave_or_empty(
414
file_id, self.repository.get_transaction())
415
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
416
# than add_lines, and allows committing when a parent is ghosted for
418
# Note: as we read the content directly from the tree, we know its not
419
# been turned into unicode or badly split - but a broken tree
420
# implementation could give us bad output from readlines() so this is
421
# not a guarantee of safety. What would be better is always checking
422
# the content during test suite execution. RBC 20070912
424
return versionedfile.add_lines_with_ghosts(
425
self._new_revision_id, parents, new_lines,
426
nostore_sha=nostore_sha, random_id=self.random_revid,
427
check_content=False)[0:2]
429
versionedfile.clear_cache()
432
class RootCommitBuilder(CommitBuilder):
433
"""This commitbuilder actually records the root id"""
435
# the root entry gets versioned properly by this builder.
436
_versioned_root = True
438
def _check_root(self, ie, parent_invs, tree):
439
"""Helper for record_entry_contents.
441
:param ie: An entry being added.
442
:param parent_invs: The inventories of the parent revisions of the
444
:param tree: The tree that is being committed.
448
######################################################################
451
class Repository(object):
452
"""Repository holding history for one or more branches.
454
The repository holds and retrieves historical information including
455
revisions and file history. It's normally accessed only by the Branch,
456
which views a particular line of development through that history.
458
The Repository builds on top of Stores and a Transport, which respectively
459
describe the disk data format and the way of accessing the (possibly
463
# What class to use for a CommitBuilder. Often its simpler to change this
464
# in a Repository class subclass rather than to override
465
# get_commit_builder.
466
_commit_builder_class = CommitBuilder
467
# The search regex used by xml based repositories to determine what things
468
# where changed in a single commit.
469
_file_ids_altered_regex = lazy_regex.lazy_compile(
470
r'file_id="(?P<file_id>[^"]+)"'
471
r'.* revision="(?P<revision_id>[^"]+)"'
474
def abort_write_group(self):
475
"""Commit the contents accrued within the current write group.
477
:seealso: start_write_group.
479
if self._write_group is not self.get_transaction():
480
# has an unlock or relock occured ?
481
raise errors.BzrError('mismatched lock context and write group.')
482
self._abort_write_group()
483
self._write_group = None
485
def _abort_write_group(self):
486
"""Template method for per-repository write group cleanup.
488
This is called during abort before the write group is considered to be
489
finished and should cleanup any internal state accrued during the write
490
group. There is no requirement that data handed to the repository be
491
*not* made available - this is not a rollback - but neither should any
492
attempt be made to ensure that data added is fully commited. Abort is
493
invoked when an error has occured so futher disk or network operations
494
may not be possible or may error and if possible should not be
499
def add_inventory(self, revision_id, inv, parents):
500
"""Add the inventory inv to the repository as revision_id.
502
:param parents: The revision ids of the parents that revision_id
503
is known to have and are in the repository already.
505
:returns: The validator(which is a sha1 digest, though what is sha'd is
506
repository format specific) of the serialized inventory.
508
assert self.is_in_write_group()
509
_mod_revision.check_not_reserved_id(revision_id)
510
assert inv.revision_id is None or inv.revision_id == revision_id, \
511
"Mismatch between inventory revision" \
512
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
513
assert inv.root is not None
514
inv_lines = self._serialise_inventory_to_lines(inv)
515
inv_vf = self.get_inventory_weave()
516
return self._inventory_add_lines(inv_vf, revision_id, parents,
517
inv_lines, check_content=False)
519
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
521
"""Store lines in inv_vf and return the sha1 of the inventory."""
523
for parent in parents:
525
final_parents.append(parent)
526
return inv_vf.add_lines(revision_id, final_parents, lines,
527
check_content=check_content)[0]
530
def add_revision(self, revision_id, rev, inv=None, config=None):
531
"""Add rev to the revision store as revision_id.
533
:param revision_id: the revision id to use.
534
:param rev: The revision object.
535
:param inv: The inventory for the revision. if None, it will be looked
536
up in the inventory storer
537
:param config: If None no digital signature will be created.
538
If supplied its signature_needed method will be used
539
to determine if a signature should be made.
541
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
543
_mod_revision.check_not_reserved_id(revision_id)
544
if config is not None and config.signature_needed():
546
inv = self.get_inventory(revision_id)
547
plaintext = Testament(rev, inv).as_short_text()
548
self.store_revision_signature(
549
gpg.GPGStrategy(config), plaintext, revision_id)
550
if not revision_id in self.get_inventory_weave():
552
raise errors.WeaveRevisionNotPresent(revision_id,
553
self.get_inventory_weave())
555
# yes, this is not suitable for adding with ghosts.
556
self.add_inventory(revision_id, inv, rev.parent_ids)
557
self._revision_store.add_revision(rev, self.get_transaction())
559
def _add_revision_text(self, revision_id, text):
560
revision = self._revision_store._serializer.read_revision_from_string(
562
self._revision_store._add_revision(revision, StringIO(text),
563
self.get_transaction())
565
def all_revision_ids(self):
566
"""Returns a list of all the revision ids in the repository.
568
This is deprecated because code should generally work on the graph
569
reachable from a particular revision, and ignore any other revisions
570
that might be present. There is no direct replacement method.
572
if 'evil' in debug.debug_flags:
573
mutter_callsite(2, "all_revision_ids is linear with history.")
574
return self._all_revision_ids()
576
def _all_revision_ids(self):
577
"""Returns a list of all the revision ids in the repository.
579
These are in as much topological order as the underlying store can
582
raise NotImplementedError(self._all_revision_ids)
584
def break_lock(self):
585
"""Break a lock if one is present from another instance.
587
Uses the ui factory to ask for confirmation if the lock may be from
590
self.control_files.break_lock()
593
def _eliminate_revisions_not_present(self, revision_ids):
594
"""Check every revision id in revision_ids to see if we have it.
596
Returns a set of the present revisions.
599
for id in revision_ids:
600
if self.has_revision(id):
605
def create(a_bzrdir):
606
"""Construct the current default format repository in a_bzrdir."""
607
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
609
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
610
"""instantiate a Repository.
612
:param _format: The format of the repository on disk.
613
:param a_bzrdir: The BzrDir of the repository.
615
In the future we will have a single api for all stores for
616
getting file texts, inventories and revisions, then
617
this construct will accept instances of those things.
619
super(Repository, self).__init__()
620
self._format = _format
621
# the following are part of the public API for Repository:
622
self.bzrdir = a_bzrdir
623
self.control_files = control_files
624
self._revision_store = _revision_store
625
# backwards compatibility
626
self.weave_store = text_store
628
self._reconcile_does_inventory_gc = True
629
self._reconcile_fixes_text_parents = False
630
self._reconcile_backsup_inventory = True
631
# not right yet - should be more semantically clear ?
633
self.control_store = control_store
634
self.control_weaves = control_store
635
# TODO: make sure to construct the right store classes, etc, depending
636
# on whether escaping is required.
637
self._warn_if_deprecated()
638
self._write_group = None
639
self.base = control_files._transport.base
642
return '%s(%r)' % (self.__class__.__name__,
645
def has_same_location(self, other):
646
"""Returns a boolean indicating if this repository is at the same
647
location as another repository.
649
This might return False even when two repository objects are accessing
650
the same physical repository via different URLs.
652
if self.__class__ is not other.__class__:
654
return (self.control_files._transport.base ==
655
other.control_files._transport.base)
657
def is_in_write_group(self):
658
"""Return True if there is an open write group.
660
:seealso: start_write_group.
662
return self._write_group is not None
665
return self.control_files.is_locked()
667
def is_write_locked(self):
668
"""Return True if this object is write locked."""
669
return self.is_locked() and self.control_files._lock_mode == 'w'
671
def lock_write(self, token=None):
672
"""Lock this repository for writing.
674
This causes caching within the repository obejct to start accumlating
675
data during reads, and allows a 'write_group' to be obtained. Write
676
groups must be used for actual data insertion.
678
:param token: if this is already locked, then lock_write will fail
679
unless the token matches the existing lock.
680
:returns: a token if this instance supports tokens, otherwise None.
681
:raises TokenLockingNotSupported: when a token is given but this
682
instance doesn't support using token locks.
683
:raises MismatchedToken: if the specified token doesn't match the token
684
of the existing lock.
685
:seealso: start_write_group.
687
A token should be passed in if you know that you have locked the object
688
some other way, and need to synchronise this object's state with that
691
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
693
result = self.control_files.lock_write(token=token)
698
self.control_files.lock_read()
701
def get_physical_lock_status(self):
702
return self.control_files.get_physical_lock_status()
704
def leave_lock_in_place(self):
705
"""Tell this repository not to release the physical lock when this
708
If lock_write doesn't return a token, then this method is not supported.
710
self.control_files.leave_in_place()
712
def dont_leave_lock_in_place(self):
713
"""Tell this repository to release the physical lock when this
714
object is unlocked, even if it didn't originally acquire it.
716
If lock_write doesn't return a token, then this method is not supported.
718
self.control_files.dont_leave_in_place()
721
def gather_stats(self, revid=None, committers=None):
722
"""Gather statistics from a revision id.
724
:param revid: The revision id to gather statistics from, if None, then
725
no revision specific statistics are gathered.
726
:param committers: Optional parameter controlling whether to grab
727
a count of committers from the revision specific statistics.
728
:return: A dictionary of statistics. Currently this contains:
729
committers: The number of committers if requested.
730
firstrev: A tuple with timestamp, timezone for the penultimate left
731
most ancestor of revid, if revid is not the NULL_REVISION.
732
latestrev: A tuple with timestamp, timezone for revid, if revid is
733
not the NULL_REVISION.
734
revisions: The total revision count in the repository.
735
size: An estimate disk size of the repository in bytes.
738
if revid and committers:
739
result['committers'] = 0
740
if revid and revid != _mod_revision.NULL_REVISION:
742
all_committers = set()
743
revisions = self.get_ancestry(revid)
744
# pop the leading None
746
first_revision = None
748
# ignore the revisions in the middle - just grab first and last
749
revisions = revisions[0], revisions[-1]
750
for revision in self.get_revisions(revisions):
751
if not first_revision:
752
first_revision = revision
754
all_committers.add(revision.committer)
755
last_revision = revision
757
result['committers'] = len(all_committers)
758
result['firstrev'] = (first_revision.timestamp,
759
first_revision.timezone)
760
result['latestrev'] = (last_revision.timestamp,
761
last_revision.timezone)
763
# now gather global repository information
764
if self.bzrdir.root_transport.listable():
765
c, t = self._revision_store.total_size(self.get_transaction())
766
result['revisions'] = c
770
def find_branches(self, using=False):
771
"""Find branches underneath this repository.
773
This will include branches inside other branches.
775
:param using: If True, list only branches using this repository.
777
if using and not self.is_shared():
779
return [self.bzrdir.open_branch()]
780
except errors.NotBranchError:
782
class Evaluator(object):
785
self.first_call = True
787
def __call__(self, bzrdir):
788
# On the first call, the parameter is always the bzrdir
789
# containing the current repo.
790
if not self.first_call:
792
repository = bzrdir.open_repository()
793
except errors.NoRepositoryPresent:
796
return False, (None, repository)
797
self.first_call = False
799
value = (bzrdir.open_branch(), None)
800
except errors.NotBranchError:
805
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
806
self.bzrdir.root_transport, evaluate=Evaluator()):
807
if branch is not None:
808
branches.append(branch)
809
if not using and repository is not None:
810
branches.extend(repository.find_branches())
813
def get_data_stream(self, revision_ids):
814
raise NotImplementedError(self.get_data_stream)
816
def insert_data_stream(self, stream):
817
"""XXX What does this really do?
819
Is it a substitute for fetch?
820
Should it manage its own write group ?
822
for item_key, bytes in stream:
823
if item_key[0] == 'file':
824
(file_id,) = item_key[1:]
825
knit = self.weave_store.get_weave_or_empty(
826
file_id, self.get_transaction())
827
elif item_key == ('inventory',):
828
knit = self.get_inventory_weave()
829
elif item_key == ('revisions',):
830
knit = self._revision_store.get_revision_file(
831
self.get_transaction())
832
elif item_key == ('signatures',):
833
knit = self._revision_store.get_signature_file(
834
self.get_transaction())
836
raise RepositoryDataStreamError(
837
"Unrecognised data stream key '%s'" % (item_key,))
838
decoded_list = bencode.bdecode(bytes)
839
format = decoded_list.pop(0)
842
for version, options, parents, some_bytes in decoded_list:
843
data_list.append((version, options, len(some_bytes), parents))
844
knit_bytes += some_bytes
845
buffer = StringIO(knit_bytes)
846
def reader_func(count):
850
return buffer.read(count)
851
knit.insert_data_stream(
852
(format, data_list, reader_func))
855
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
856
"""Return the revision ids that other has that this does not.
858
These are returned in topological order.
860
revision_id: only return revision ids included by revision_id.
862
return InterRepository.get(other, self).missing_revision_ids(
863
revision_id, find_ghosts)
867
"""Open the repository rooted at base.
869
For instance, if the repository is at URL/.bzr/repository,
870
Repository.open(URL) -> a Repository instance.
872
control = bzrdir.BzrDir.open(base)
873
return control.open_repository()
875
def copy_content_into(self, destination, revision_id=None):
876
"""Make a complete copy of the content in self into destination.
878
This is a destructive operation! Do not use it on existing
881
return InterRepository.get(self, destination).copy_content(revision_id)
883
def commit_write_group(self):
884
"""Commit the contents accrued within the current write group.
886
:seealso: start_write_group.
888
if self._write_group is not self.get_transaction():
889
# has an unlock or relock occured ?
890
raise errors.BzrError('mismatched lock context %r and '
892
(self.get_transaction(), self._write_group))
893
self._commit_write_group()
894
self._write_group = None
896
def _commit_write_group(self):
897
"""Template method for per-repository write group cleanup.
899
This is called before the write group is considered to be
900
finished and should ensure that all data handed to the repository
901
for writing during the write group is safely committed (to the
902
extent possible considering file system caching etc).
905
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
906
"""Fetch the content required to construct revision_id from source.
908
If revision_id is None all content is copied.
909
:param find_ghosts: Find and copy revisions in the source that are
910
ghosts in the target (and not reachable directly by walking out to
911
the first-present revision in target from revision_id).
913
# fast path same-url fetch operations
914
if self.has_same_location(source):
915
# check that last_revision is in 'from' and then return a
917
if (revision_id is not None and
918
not _mod_revision.is_null(revision_id)):
919
self.get_revision(revision_id)
921
inter = InterRepository.get(source, self)
923
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
924
except NotImplementedError:
925
raise errors.IncompatibleRepositories(source, self)
927
def create_bundle(self, target, base, fileobj, format=None):
928
return serializer.write_bundle(self, target, base, fileobj, format)
930
def get_commit_builder(self, branch, parents, config, timestamp=None,
931
timezone=None, committer=None, revprops=None,
933
"""Obtain a CommitBuilder for this repository.
935
:param branch: Branch to commit to.
936
:param parents: Revision ids of the parents of the new revision.
937
:param config: Configuration to use.
938
:param timestamp: Optional timestamp recorded for commit.
939
:param timezone: Optional timezone for timestamp.
940
:param committer: Optional committer to set for commit.
941
:param revprops: Optional dictionary of revision properties.
942
:param revision_id: Optional revision id.
944
result = self._commit_builder_class(self, parents, config,
945
timestamp, timezone, committer, revprops, revision_id)
946
self.start_write_group()
950
if (self.control_files._lock_count == 1 and
951
self.control_files._lock_mode == 'w'):
952
if self._write_group is not None:
953
self.abort_write_group()
954
self.control_files.unlock()
955
raise errors.BzrError(
956
'Must end write groups before releasing write locks.')
957
self.control_files.unlock()
960
def clone(self, a_bzrdir, revision_id=None):
961
"""Clone this repository into a_bzrdir using the current format.
963
Currently no check is made that the format of this repository and
964
the bzrdir format are compatible. FIXME RBC 20060201.
966
:return: The newly created destination repository.
968
# TODO: deprecate after 0.16; cloning this with all its settings is
969
# probably not very useful -- mbp 20070423
970
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
971
self.copy_content_into(dest_repo, revision_id)
974
def start_write_group(self):
975
"""Start a write group in the repository.
977
Write groups are used by repositories which do not have a 1:1 mapping
978
between file ids and backend store to manage the insertion of data from
979
both fetch and commit operations.
981
A write lock is required around the start_write_group/commit_write_group
982
for the support of lock-requiring repository formats.
984
One can only insert data into a repository inside a write group.
988
if not self.is_write_locked():
989
raise errors.NotWriteLocked(self)
990
if self._write_group:
991
raise errors.BzrError('already in a write group')
992
self._start_write_group()
993
# so we can detect unlock/relock - the write group is now entered.
994
self._write_group = self.get_transaction()
996
def _start_write_group(self):
997
"""Template method for per-repository write group startup.
999
This is called before the write group is considered to be
1004
def sprout(self, to_bzrdir, revision_id=None):
1005
"""Create a descendent repository for new development.
1007
Unlike clone, this does not copy the settings of the repository.
1009
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1010
dest_repo.fetch(self, revision_id=revision_id)
1013
def _create_sprouting_repo(self, a_bzrdir, shared):
1014
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1015
# use target default format.
1016
dest_repo = a_bzrdir.create_repository()
1018
# Most control formats need the repository to be specifically
1019
# created, but on some old all-in-one formats it's not needed
1021
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1022
except errors.UninitializableFormat:
1023
dest_repo = a_bzrdir.open_repository()
1027
def has_revision(self, revision_id):
1028
"""True if this repository has a copy of the revision."""
1029
return revision_id in self.has_revisions((revision_id,))
1031
def has_revisions(self, revision_ids):
1032
"""Probe to find out the presence of multiple revisions.
1034
:param revision_ids: An iterable of revision_ids.
1035
:return: A set of the revision_ids that were present.
1037
raise NotImplementedError(self.has_revisions)
1039
return self._revision_store.has_revision_id(revision_id,
1040
self.get_transaction())
1043
def get_revision(self, revision_id):
1044
"""Return the Revision object for a named revision."""
1045
return self.get_revisions([revision_id])[0]
1048
def get_revision_reconcile(self, revision_id):
1049
"""'reconcile' helper routine that allows access to a revision always.
1051
This variant of get_revision does not cross check the weave graph
1052
against the revision one as get_revision does: but it should only
1053
be used by reconcile, or reconcile-alike commands that are correcting
1054
or testing the revision graph.
1056
return self._get_revisions([revision_id])[0]
1059
def get_revisions(self, revision_ids):
1060
"""Get many revisions at once."""
1061
return self._get_revisions(revision_ids)
1064
def _get_revisions(self, revision_ids):
1065
"""Core work logic to get many revisions without sanity checks."""
1066
for rev_id in revision_ids:
1067
if not rev_id or not isinstance(rev_id, basestring):
1068
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1069
revs = self._revision_store.get_revisions(revision_ids,
1070
self.get_transaction())
1072
assert not isinstance(rev.revision_id, unicode)
1073
for parent_id in rev.parent_ids:
1074
assert not isinstance(parent_id, unicode)
1078
def get_revision_xml(self, revision_id):
1079
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1080
# would have already do it.
1081
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1082
rev = self.get_revision(revision_id)
1083
rev_tmp = StringIO()
1084
# the current serializer..
1085
self._revision_store._serializer.write_revision(rev, rev_tmp)
1087
return rev_tmp.getvalue()
1090
def get_deltas_for_revisions(self, revisions):
1091
"""Produce a generator of revision deltas.
1093
Note that the input is a sequence of REVISIONS, not revision_ids.
1094
Trees will be held in memory until the generator exits.
1095
Each delta is relative to the revision's lefthand predecessor.
1097
required_trees = set()
1098
for revision in revisions:
1099
required_trees.add(revision.revision_id)
1100
required_trees.update(revision.parent_ids[:1])
1101
trees = dict((t.get_revision_id(), t) for
1102
t in self.revision_trees(required_trees))
1103
for revision in revisions:
1104
if not revision.parent_ids:
1105
old_tree = self.revision_tree(None)
1107
old_tree = trees[revision.parent_ids[0]]
1108
yield trees[revision.revision_id].changes_from(old_tree)
1111
def get_revision_delta(self, revision_id):
1112
"""Return the delta for one revision.
1114
The delta is relative to the left-hand predecessor of the
1117
r = self.get_revision(revision_id)
1118
return list(self.get_deltas_for_revisions([r]))[0]
1121
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1122
signature = gpg_strategy.sign(plaintext)
1123
self.add_signature_text(revision_id, signature)
1126
def add_signature_text(self, revision_id, signature):
1127
self._revision_store.add_revision_signature_text(revision_id,
1129
self.get_transaction())
1131
def find_text_key_references(self):
1132
"""Find the text key references within the repository.
1134
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
1135
revision_ids. Each altered file-ids has the exact revision_ids that
1136
altered it listed explicitly.
1137
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1138
to whether they were referred to by the inventory of the
1139
revision_id that they contain. The inventory texts from all present
1140
revision ids are assessed to generate this report.
1142
revision_ids = self.all_revision_ids()
1143
w = self.get_inventory_weave()
1144
pb = ui.ui_factory.nested_progress_bar()
1146
return self._find_text_key_references_from_xml_inventory_lines(
1147
w.iter_lines_added_or_present_in_versions(revision_ids, pb=pb))
1151
def _find_text_key_references_from_xml_inventory_lines(self,
1153
"""Core routine for extracting references to texts from inventories.
1155
This performs the translation of xml lines to revision ids.
1157
:param line_iterator: An iterator of lines, origin_version_id
1158
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1159
to whether they were referred to by the inventory of the
1160
revision_id that they contain. Note that if that revision_id was
1161
not part of the line_iterator's output then False will be given -
1162
even though it may actually refer to that key.
1164
if not self._serializer.support_altered_by_hack:
1165
raise AssertionError(
1166
"_find_text_key_references_from_xml_inventory_lines only "
1167
"supported for branches which store inventory as unnested xml"
1168
", not on %r" % self)
1171
# this code needs to read every new line in every inventory for the
1172
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1173
# not present in one of those inventories is unnecessary but not
1174
# harmful because we are filtering by the revision id marker in the
1175
# inventory lines : we only select file ids altered in one of those
1176
# revisions. We don't need to see all lines in the inventory because
1177
# only those added in an inventory in rev X can contain a revision=X
1179
unescape_revid_cache = {}
1180
unescape_fileid_cache = {}
1182
# jam 20061218 In a big fetch, this handles hundreds of thousands
1183
# of lines, so it has had a lot of inlining and optimizing done.
1184
# Sorry that it is a little bit messy.
1185
# Move several functions to be local variables, since this is a long
1187
search = self._file_ids_altered_regex.search
1188
unescape = _unescape_xml
1189
setdefault = result.setdefault
1190
for line, version_id in line_iterator:
1191
match = search(line)
1194
# One call to match.group() returning multiple items is quite a
1195
# bit faster than 2 calls to match.group() each returning 1
1196
file_id, revision_id = match.group('file_id', 'revision_id')
1198
# Inlining the cache lookups helps a lot when you make 170,000
1199
# lines and 350k ids, versus 8.4 unique ids.
1200
# Using a cache helps in 2 ways:
1201
# 1) Avoids unnecessary decoding calls
1202
# 2) Re-uses cached strings, which helps in future set and
1204
# (2) is enough that removing encoding entirely along with
1205
# the cache (so we are using plain strings) results in no
1206
# performance improvement.
1208
revision_id = unescape_revid_cache[revision_id]
1210
unescaped = unescape(revision_id)
1211
unescape_revid_cache[revision_id] = unescaped
1212
revision_id = unescaped
1214
# Note that unconditionally unescaping means that we deserialise
1215
# every fileid, which for general 'pull' is not great, but we don't
1216
# really want to have some many fulltexts that this matters anyway.
1219
file_id = unescape_fileid_cache[file_id]
1221
unescaped = unescape(file_id)
1222
unescape_fileid_cache[file_id] = unescaped
1225
key = (file_id, revision_id)
1226
setdefault(key, False)
1227
if revision_id == version_id:
1231
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1233
"""Helper routine for fileids_altered_by_revision_ids.
1235
This performs the translation of xml lines to revision ids.
1237
:param line_iterator: An iterator of lines, origin_version_id
1238
:param revision_ids: The revision ids to filter for. This should be a
1239
set or other type which supports efficient __contains__ lookups, as
1240
the revision id from each parsed line will be looked up in the
1241
revision_ids filter.
1242
:return: a dictionary mapping altered file-ids to an iterable of
1243
revision_ids. Each altered file-ids has the exact revision_ids that
1244
altered it listed explicitly.
1247
setdefault = result.setdefault
1248
for file_id, revision_id in \
1249
self._find_text_key_references_from_xml_inventory_lines(
1250
line_iterator).iterkeys():
1251
# once data is all ensured-consistent; then this is
1252
# if revision_id == version_id
1253
if revision_id in revision_ids:
1254
setdefault(file_id, set()).add(revision_id)
1257
def fileids_altered_by_revision_ids(self, revision_ids):
1258
"""Find the file ids and versions affected by revisions.
1260
:param revisions: an iterable containing revision ids.
1261
:return: a dictionary mapping altered file-ids to an iterable of
1262
revision_ids. Each altered file-ids has the exact revision_ids that
1263
altered it listed explicitly.
1265
selected_revision_ids = set(revision_ids)
1266
w = self.get_inventory_weave()
1267
pb = ui.ui_factory.nested_progress_bar()
1269
return self._find_file_ids_from_xml_inventory_lines(
1270
w.iter_lines_added_or_present_in_versions(
1271
selected_revision_ids, pb=pb),
1272
selected_revision_ids)
1276
def iter_files_bytes(self, desired_files):
1277
"""Iterate through file versions.
1279
Files will not necessarily be returned in the order they occur in
1280
desired_files. No specific order is guaranteed.
1282
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1283
value supplied by the caller as part of desired_files. It should
1284
uniquely identify the file version in the caller's context. (Examples:
1285
an index number or a TreeTransform trans_id.)
1287
bytes_iterator is an iterable of bytestrings for the file. The
1288
kind of iterable and length of the bytestrings are unspecified, but for
1289
this implementation, it is a list of lines produced by
1290
VersionedFile.get_lines().
1292
:param desired_files: a list of (file_id, revision_id, identifier)
1295
transaction = self.get_transaction()
1296
for file_id, revision_id, callable_data in desired_files:
1298
weave = self.weave_store.get_weave(file_id, transaction)
1299
except errors.NoSuchFile:
1300
raise errors.NoSuchIdInRepository(self, file_id)
1301
yield callable_data, weave.get_lines(revision_id)
1303
def _generate_text_key_index(self, text_key_references=None,
1305
"""Generate a new text key index for the repository.
1307
This is an expensive function that will take considerable time to run.
1309
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1310
list of parents, also text keys. When a given key has no parents,
1311
the parents list will be [NULL_REVISION].
1313
# All revisions, to find inventory parents.
1314
if ancestors is None:
1315
revision_graph = self.get_revision_graph_with_ghosts()
1316
ancestors = revision_graph.get_ancestors()
1317
if text_key_references is None:
1318
text_key_references = self.find_text_key_references()
1319
pb = ui.ui_factory.nested_progress_bar()
1321
return self._do_generate_text_key_index(ancestors,
1322
text_key_references, pb)
1326
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1327
"""Helper for _generate_text_key_index to avoid deep nesting."""
1328
revision_order = tsort.topo_sort(ancestors)
1329
invalid_keys = set()
1331
for revision_id in revision_order:
1332
revision_keys[revision_id] = set()
1333
text_count = len(text_key_references)
1334
# a cache of the text keys to allow reuse; costs a dict of all the
1335
# keys, but saves a 2-tuple for every child of a given key.
1337
for text_key, valid in text_key_references.iteritems():
1339
invalid_keys.add(text_key)
1341
revision_keys[text_key[1]].add(text_key)
1342
text_key_cache[text_key] = text_key
1343
del text_key_references
1345
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1346
NULL_REVISION = _mod_revision.NULL_REVISION
1347
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1348
# too small for large or very branchy trees. However, for 55K path
1349
# trees, it would be easy to use too much memory trivially. Ideally we
1350
# could gauge this by looking at available real memory etc, but this is
1351
# always a tricky proposition.
1352
inventory_cache = lru_cache.LRUCache(10)
1353
batch_size = 10 # should be ~150MB on a 55K path tree
1354
batch_count = len(revision_order) / batch_size + 1
1356
pb.update("Calculating text parents.", processed_texts, text_count)
1357
for offset in xrange(batch_count):
1358
to_query = revision_order[offset * batch_size:(offset + 1) *
1362
for rev_tree in self.revision_trees(to_query):
1363
revision_id = rev_tree.get_revision_id()
1364
parent_ids = ancestors[revision_id]
1365
for text_key in revision_keys[revision_id]:
1366
pb.update("Calculating text parents.", processed_texts)
1367
processed_texts += 1
1368
candidate_parents = []
1369
for parent_id in parent_ids:
1370
parent_text_key = (text_key[0], parent_id)
1372
check_parent = parent_text_key not in \
1373
revision_keys[parent_id]
1375
# the parent parent_id is a ghost:
1376
check_parent = False
1377
# truncate the derived graph against this ghost.
1378
parent_text_key = None
1380
# look at the parent commit details inventories to
1381
# determine possible candidates in the per file graph.
1384
inv = inventory_cache[parent_id]
1386
inv = self.revision_tree(parent_id).inventory
1387
inventory_cache[parent_id] = inv
1388
parent_entry = inv._byid.get(text_key[0], None)
1389
if parent_entry is not None:
1391
text_key[0], parent_entry.revision)
1393
parent_text_key = None
1394
if parent_text_key is not None:
1395
candidate_parents.append(
1396
text_key_cache[parent_text_key])
1397
parent_heads = text_graph.heads(candidate_parents)
1398
new_parents = list(parent_heads)
1399
new_parents.sort(key=lambda x:candidate_parents.index(x))
1400
if new_parents == []:
1401
new_parents = [NULL_REVISION]
1402
text_index[text_key] = new_parents
1404
for text_key in invalid_keys:
1405
text_index[text_key] = [NULL_REVISION]
1408
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1409
"""Get an iterable listing the keys of all the data introduced by a set
1412
The keys will be ordered so that the corresponding items can be safely
1413
fetched and inserted in that order.
1415
:returns: An iterable producing tuples of (knit-kind, file-id,
1416
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1417
'revisions'. file-id is None unless knit-kind is 'file'.
1419
# XXX: it's a bit weird to control the inventory weave caching in this
1420
# generator. Ideally the caching would be done in fetch.py I think. Or
1421
# maybe this generator should explicitly have the contract that it
1422
# should not be iterated until the previously yielded item has been
1425
inv_w = self.get_inventory_weave()
1426
inv_w.enable_cache()
1428
# file ids that changed
1429
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
1431
num_file_ids = len(file_ids)
1432
for file_id, altered_versions in file_ids.iteritems():
1433
if _files_pb is not None:
1434
_files_pb.update("fetch texts", count, num_file_ids)
1436
yield ("file", file_id, altered_versions)
1437
# We're done with the files_pb. Note that it finished by the caller,
1438
# just as it was created by the caller.
1442
yield ("inventory", None, revision_ids)
1446
revisions_with_signatures = set()
1447
for rev_id in revision_ids:
1449
self.get_signature_text(rev_id)
1450
except errors.NoSuchRevision:
1454
revisions_with_signatures.add(rev_id)
1456
yield ("signatures", None, revisions_with_signatures)
1459
yield ("revisions", None, revision_ids)
1462
def get_inventory_weave(self):
1463
return self.control_weaves.get_weave('inventory',
1464
self.get_transaction())
1467
def get_inventory(self, revision_id):
1468
"""Get Inventory object by revision id."""
1469
return self.iter_inventories([revision_id]).next()
1471
def iter_inventories(self, revision_ids):
1472
"""Get many inventories by revision_ids.
1474
This will buffer some or all of the texts used in constructing the
1475
inventories in memory, but will only parse a single inventory at a
1478
:return: An iterator of inventories.
1480
assert None not in revision_ids
1481
assert _mod_revision.NULL_REVISION not in revision_ids
1482
return self._iter_inventories(revision_ids)
1484
def _iter_inventories(self, revision_ids):
1485
"""single-document based inventory iteration."""
1486
texts = self.get_inventory_weave().get_texts(revision_ids)
1487
for text, revision_id in zip(texts, revision_ids):
1488
yield self.deserialise_inventory(revision_id, text)
1490
def deserialise_inventory(self, revision_id, xml):
1491
"""Transform the xml into an inventory object.
1493
:param revision_id: The expected revision id of the inventory.
1494
:param xml: A serialised inventory.
1496
result = self._serializer.read_inventory_from_string(xml, revision_id)
1497
if result.revision_id != revision_id:
1498
raise AssertionError('revision id mismatch %s != %s' % (
1499
result.revision_id, revision_id))
1502
def serialise_inventory(self, inv):
1503
return self._serializer.write_inventory_to_string(inv)
1505
def _serialise_inventory_to_lines(self, inv):
1506
return self._serializer.write_inventory_to_lines(inv)
1508
def get_serializer_format(self):
1509
return self._serializer.format_num
1512
def get_inventory_xml(self, revision_id):
1513
"""Get inventory XML as a file object."""
1515
assert isinstance(revision_id, str), type(revision_id)
1516
iw = self.get_inventory_weave()
1517
return iw.get_text(revision_id)
1519
raise errors.HistoryMissing(self, 'inventory', revision_id)
1522
def get_inventory_sha1(self, revision_id):
1523
"""Return the sha1 hash of the inventory entry
1525
return self.get_revision(revision_id).inventory_sha1
1528
def get_revision_graph(self, revision_id=None):
1529
"""Return a dictionary containing the revision graph.
1531
NB: This method should not be used as it accesses the entire graph all
1532
at once, which is much more data than most operations should require.
1534
:param revision_id: The revision_id to get a graph from. If None, then
1535
the entire revision graph is returned. This is a deprecated mode of
1536
operation and will be removed in the future.
1537
:return: a dictionary of revision_id->revision_parents_list.
1539
raise NotImplementedError(self.get_revision_graph)
1542
def get_revision_graph_with_ghosts(self, revision_ids=None):
1543
"""Return a graph of the revisions with ghosts marked as applicable.
1545
:param revision_ids: an iterable of revisions to graph or None for all.
1546
:return: a Graph object with the graph reachable from revision_ids.
1548
if 'evil' in debug.debug_flags:
1550
"get_revision_graph_with_ghosts scales with size of history.")
1551
result = deprecated_graph.Graph()
1552
if not revision_ids:
1553
pending = set(self.all_revision_ids())
1556
pending = set(revision_ids)
1557
# special case NULL_REVISION
1558
if _mod_revision.NULL_REVISION in pending:
1559
pending.remove(_mod_revision.NULL_REVISION)
1560
required = set(pending)
1563
revision_id = pending.pop()
1565
rev = self.get_revision(revision_id)
1566
except errors.NoSuchRevision:
1567
if revision_id in required:
1570
result.add_ghost(revision_id)
1572
for parent_id in rev.parent_ids:
1573
# is this queued or done ?
1574
if (parent_id not in pending and
1575
parent_id not in done):
1577
pending.add(parent_id)
1578
result.add_node(revision_id, rev.parent_ids)
1579
done.add(revision_id)
1582
def _get_history_vf(self):
1583
"""Get a versionedfile whose history graph reflects all revisions.
1585
For weave repositories, this is the inventory weave.
1587
return self.get_inventory_weave()
1589
def iter_reverse_revision_history(self, revision_id):
1590
"""Iterate backwards through revision ids in the lefthand history
1592
:param revision_id: The revision id to start with. All its lefthand
1593
ancestors will be traversed.
1595
if revision_id in (None, _mod_revision.NULL_REVISION):
1597
next_id = revision_id
1598
versionedfile = self._get_history_vf()
1601
parents = versionedfile.get_parents(next_id)
1602
if len(parents) == 0:
1605
next_id = parents[0]
1608
def get_revision_inventory(self, revision_id):
1609
"""Return inventory of a past revision."""
1610
# TODO: Unify this with get_inventory()
1611
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1612
# must be the same as its revision, so this is trivial.
1613
if revision_id is None:
1614
# This does not make sense: if there is no revision,
1615
# then it is the current tree inventory surely ?!
1616
# and thus get_root_id() is something that looks at the last
1617
# commit on the branch, and the get_root_id is an inventory check.
1618
raise NotImplementedError
1619
# return Inventory(self.get_root_id())
1621
return self.get_inventory(revision_id)
1624
def is_shared(self):
1625
"""Return True if this repository is flagged as a shared repository."""
1626
raise NotImplementedError(self.is_shared)
1629
def reconcile(self, other=None, thorough=False):
1630
"""Reconcile this repository."""
1631
from bzrlib.reconcile import RepoReconciler
1632
reconciler = RepoReconciler(self, thorough=thorough)
1633
reconciler.reconcile()
1636
def _refresh_data(self):
1637
"""Helper called from lock_* to ensure coherency with disk.
1639
The default implementation does nothing; it is however possible
1640
for repositories to maintain loaded indices across multiple locks
1641
by checking inside their implementation of this method to see
1642
whether their indices are still valid. This depends of course on
1643
the disk format being validatable in this manner.
1647
def revision_tree(self, revision_id):
1648
"""Return Tree for a revision on this branch.
1650
`revision_id` may be None for the empty tree revision.
1652
# TODO: refactor this to use an existing revision object
1653
# so we don't need to read it in twice.
1654
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1655
return RevisionTree(self, Inventory(root_id=None),
1656
_mod_revision.NULL_REVISION)
1658
inv = self.get_revision_inventory(revision_id)
1659
return RevisionTree(self, inv, revision_id)
1662
def revision_trees(self, revision_ids):
1663
"""Return Tree for a revision on this branch.
1665
`revision_id` may not be None or 'null:'"""
1666
inventories = self.iter_inventories(revision_ids)
1667
for inv in inventories:
1668
yield RevisionTree(self, inv, inv.revision_id)
1671
def get_ancestry(self, revision_id, topo_sorted=True):
1672
"""Return a list of revision-ids integrated by a revision.
1674
The first element of the list is always None, indicating the origin
1675
revision. This might change when we have history horizons, or
1676
perhaps we should have a new API.
1678
This is topologically sorted.
1680
if _mod_revision.is_null(revision_id):
1682
if not self.has_revision(revision_id):
1683
raise errors.NoSuchRevision(self, revision_id)
1684
w = self.get_inventory_weave()
1685
candidates = w.get_ancestry(revision_id, topo_sorted)
1686
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
1689
"""Compress the data within the repository.
1691
This operation only makes sense for some repository types. For other
1692
types it should be a no-op that just returns.
1694
This stub method does not require a lock, but subclasses should use
1695
@needs_write_lock as this is a long running call its reasonable to
1696
implicitly lock for the user.
1700
def print_file(self, file, revision_id):
1701
"""Print `file` to stdout.
1703
FIXME RBC 20060125 as John Meinel points out this is a bad api
1704
- it writes to stdout, it assumes that that is valid etc. Fix
1705
by creating a new more flexible convenience function.
1707
tree = self.revision_tree(revision_id)
1708
# use inventory as it was in that revision
1709
file_id = tree.inventory.path2id(file)
1711
# TODO: jam 20060427 Write a test for this code path
1712
# it had a bug in it, and was raising the wrong
1714
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1715
tree.print_file(file_id)
1717
def get_transaction(self):
1718
return self.control_files.get_transaction()
1720
def revision_parents(self, revision_id):
1721
return self.get_inventory_weave().parent_names(revision_id)
1723
@deprecated_method(symbol_versioning.one_one)
1724
def get_parents(self, revision_ids):
1725
"""See StackedParentsProvider.get_parents"""
1726
parent_map = self.get_parent_map(revision_ids)
1727
return [parent_map.get(r, None) for r in revision_ids]
1729
def get_parent_map(self, keys):
1730
"""See graph._StackedParentsProvider.get_parent_map"""
1732
for revision_id in keys:
1733
if revision_id == _mod_revision.NULL_REVISION:
1734
parent_map[revision_id] = ()
1737
parent_id_list = self.get_revision(revision_id).parent_ids
1738
except errors.NoSuchRevision:
1741
if len(parent_id_list) == 0:
1742
parent_ids = (_mod_revision.NULL_REVISION,)
1744
parent_ids = tuple(parent_id_list)
1745
parent_map[revision_id] = parent_ids
1748
def _make_parents_provider(self):
1751
def get_graph(self, other_repository=None):
1752
"""Return the graph walker for this repository format"""
1753
parents_provider = self._make_parents_provider()
1754
if (other_repository is not None and
1755
other_repository.bzrdir.transport.base !=
1756
self.bzrdir.transport.base):
1757
parents_provider = graph._StackedParentsProvider(
1758
[parents_provider, other_repository._make_parents_provider()])
1759
return graph.Graph(parents_provider)
1761
def _get_versioned_file_checker(self):
1762
"""Return an object suitable for checking versioned files."""
1763
return _VersionedFileChecker(self)
1766
def set_make_working_trees(self, new_value):
1767
"""Set the policy flag for making working trees when creating branches.
1769
This only applies to branches that use this repository.
1771
The default is 'True'.
1772
:param new_value: True to restore the default, False to disable making
1775
raise NotImplementedError(self.set_make_working_trees)
1777
def make_working_trees(self):
1778
"""Returns the policy for making working trees on new branches."""
1779
raise NotImplementedError(self.make_working_trees)
1782
def sign_revision(self, revision_id, gpg_strategy):
1783
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1784
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1787
def has_signature_for_revision_id(self, revision_id):
1788
"""Query for a revision signature for revision_id in the repository."""
1789
return self._revision_store.has_signature(revision_id,
1790
self.get_transaction())
1793
def get_signature_text(self, revision_id):
1794
"""Return the text for a signature."""
1795
return self._revision_store.get_signature_text(revision_id,
1796
self.get_transaction())
1799
def check(self, revision_ids=None):
1800
"""Check consistency of all history of given revision_ids.
1802
Different repository implementations should override _check().
1804
:param revision_ids: A non-empty list of revision_ids whose ancestry
1805
will be checked. Typically the last revision_id of a branch.
1807
return self._check(revision_ids)
1809
def _check(self, revision_ids):
1810
result = check.Check(self)
1814
def _warn_if_deprecated(self):
1815
global _deprecation_warning_done
1816
if _deprecation_warning_done:
1818
_deprecation_warning_done = True
1819
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1820
% (self._format, self.bzrdir.transport.base))
1822
def supports_rich_root(self):
1823
return self._format.rich_root_data
1825
def _check_ascii_revisionid(self, revision_id, method):
1826
"""Private helper for ascii-only repositories."""
1827
# weave repositories refuse to store revisionids that are non-ascii.
1828
if revision_id is not None:
1829
# weaves require ascii revision ids.
1830
if isinstance(revision_id, unicode):
1832
revision_id.encode('ascii')
1833
except UnicodeEncodeError:
1834
raise errors.NonAsciiRevisionId(method, self)
1837
revision_id.decode('ascii')
1838
except UnicodeDecodeError:
1839
raise errors.NonAsciiRevisionId(method, self)
1841
def revision_graph_can_have_wrong_parents(self):
1842
"""Is it possible for this repository to have a revision graph with
1845
If True, then this repository must also implement
1846
_find_inconsistent_revision_parents so that check and reconcile can
1847
check for inconsistencies before proceeding with other checks that may
1848
depend on the revision index being consistent.
1850
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1852
# remove these delegates a while after bzr 0.15
1853
def __make_delegated(name, from_module):
1854
def _deprecated_repository_forwarder():
1855
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1856
% (name, from_module),
1859
m = __import__(from_module, globals(), locals(), [name])
1861
return getattr(m, name)
1862
except AttributeError:
1863
raise AttributeError('module %s has no name %s'
1865
globals()[name] = _deprecated_repository_forwarder
1868
'AllInOneRepository',
1869
'WeaveMetaDirRepository',
1870
'PreSplitOutRepositoryFormat',
1871
'RepositoryFormat4',
1872
'RepositoryFormat5',
1873
'RepositoryFormat6',
1874
'RepositoryFormat7',
1876
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1880
'RepositoryFormatKnit',
1881
'RepositoryFormatKnit1',
1883
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1886
def install_revision(repository, rev, revision_tree):
1887
"""Install all revision data into a repository."""
1888
install_revisions(repository, [(rev, revision_tree, None)])
1891
def install_revisions(repository, iterable, num_revisions=None, pb=None):
1892
"""Install all revision data into a repository.
1894
Accepts an iterable of revision, tree, signature tuples. The signature
1897
repository.start_write_group()
1899
for n, (revision, revision_tree, signature) in enumerate(iterable):
1900
_install_revision(repository, revision, revision_tree, signature)
1902
pb.update('Transferring revisions', n + 1, num_revisions)
1904
repository.abort_write_group()
1907
repository.commit_write_group()
1910
def _install_revision(repository, rev, revision_tree, signature):
1911
"""Install all revision data into a repository."""
1912
present_parents = []
1914
for p_id in rev.parent_ids:
1915
if repository.has_revision(p_id):
1916
present_parents.append(p_id)
1917
parent_trees[p_id] = repository.revision_tree(p_id)
1919
parent_trees[p_id] = repository.revision_tree(None)
1921
inv = revision_tree.inventory
1922
entries = inv.iter_entries()
1923
# backwards compatibility hack: skip the root id.
1924
if not repository.supports_rich_root():
1925
path, root = entries.next()
1926
if root.revision != rev.revision_id:
1927
raise errors.IncompatibleRevision(repr(repository))
1928
# Add the texts that are not already present
1929
for path, ie in entries:
1930
w = repository.weave_store.get_weave_or_empty(ie.file_id,
1931
repository.get_transaction())
1932
if ie.revision not in w:
1934
# FIXME: TODO: The following loop *may* be overlapping/duplicate
1935
# with InventoryEntry.find_previous_heads(). if it is, then there
1936
# is a latent bug here where the parents may have ancestors of each
1938
for revision, tree in parent_trees.iteritems():
1939
if ie.file_id not in tree:
1941
parent_id = tree.inventory[ie.file_id].revision
1942
if parent_id in text_parents:
1944
text_parents.append(parent_id)
1946
vfile = repository.weave_store.get_weave_or_empty(ie.file_id,
1947
repository.get_transaction())
1948
lines = revision_tree.get_file(ie.file_id).readlines()
1949
vfile.add_lines(rev.revision_id, text_parents, lines)
1951
# install the inventory
1952
repository.add_inventory(rev.revision_id, inv, present_parents)
1953
except errors.RevisionAlreadyPresent:
1955
if signature is not None:
1956
repository.add_signature_text(rev.revision_id, signature)
1957
repository.add_revision(rev.revision_id, rev, inv)
1960
class MetaDirRepository(Repository):
1961
"""Repositories in the new meta-dir layout."""
1963
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1964
super(MetaDirRepository, self).__init__(_format,
1970
dir_mode = self.control_files._dir_mode
1971
file_mode = self.control_files._file_mode
1974
def is_shared(self):
1975
"""Return True if this repository is flagged as a shared repository."""
1976
return self.control_files._transport.has('shared-storage')
1979
def set_make_working_trees(self, new_value):
1980
"""Set the policy flag for making working trees when creating branches.
1982
This only applies to branches that use this repository.
1984
The default is 'True'.
1985
:param new_value: True to restore the default, False to disable making
1990
self.control_files._transport.delete('no-working-trees')
1991
except errors.NoSuchFile:
1994
self.control_files.put_utf8('no-working-trees', '')
1996
def make_working_trees(self):
1997
"""Returns the policy for making working trees on new branches."""
1998
return not self.control_files._transport.has('no-working-trees')
2001
class RepositoryFormatRegistry(registry.Registry):
2002
"""Registry of RepositoryFormats."""
2004
def get(self, format_string):
2005
r = registry.Registry.get(self, format_string)
2011
format_registry = RepositoryFormatRegistry()
2012
"""Registry of formats, indexed by their identifying format string.
2014
This can contain either format instances themselves, or classes/factories that
2015
can be called to obtain one.
2019
#####################################################################
2020
# Repository Formats
2022
class RepositoryFormat(object):
2023
"""A repository format.
2025
Formats provide three things:
2026
* An initialization routine to construct repository data on disk.
2027
* a format string which is used when the BzrDir supports versioned
2029
* an open routine which returns a Repository instance.
2031
There is one and only one Format subclass for each on-disk format. But
2032
there can be one Repository subclass that is used for several different
2033
formats. The _format attribute on a Repository instance can be used to
2034
determine the disk format.
2036
Formats are placed in an dict by their format string for reference
2037
during opening. These should be subclasses of RepositoryFormat
2040
Once a format is deprecated, just deprecate the initialize and open
2041
methods on the format class. Do not deprecate the object, as the
2042
object will be created every system load.
2044
Common instance attributes:
2045
_matchingbzrdir - the bzrdir format that the repository format was
2046
originally written to work with. This can be used if manually
2047
constructing a bzrdir and repository, or more commonly for test suite
2051
# Set to True or False in derived classes. True indicates that the format
2052
# supports ghosts gracefully.
2053
supports_ghosts = None
2056
return "<%s>" % self.__class__.__name__
2058
def __eq__(self, other):
2059
# format objects are generally stateless
2060
return isinstance(other, self.__class__)
2062
def __ne__(self, other):
2063
return not self == other
2066
def find_format(klass, a_bzrdir):
2067
"""Return the format for the repository object in a_bzrdir.
2069
This is used by bzr native formats that have a "format" file in
2070
the repository. Other methods may be used by different types of
2074
transport = a_bzrdir.get_repository_transport(None)
2075
format_string = transport.get("format").read()
2076
return format_registry.get(format_string)
2077
except errors.NoSuchFile:
2078
raise errors.NoRepositoryPresent(a_bzrdir)
2080
raise errors.UnknownFormatError(format=format_string)
2083
def register_format(klass, format):
2084
format_registry.register(format.get_format_string(), format)
2087
def unregister_format(klass, format):
2088
format_registry.remove(format.get_format_string())
2091
def get_default_format(klass):
2092
"""Return the current default format."""
2093
from bzrlib import bzrdir
2094
return bzrdir.format_registry.make_bzrdir('default').repository_format
2096
def _get_control_store(self, repo_transport, control_files):
2097
"""Return the control store for this repository."""
2098
raise NotImplementedError(self._get_control_store)
2100
def get_format_string(self):
2101
"""Return the ASCII format string that identifies this format.
2103
Note that in pre format ?? repositories the format string is
2104
not permitted nor written to disk.
2106
raise NotImplementedError(self.get_format_string)
2108
def get_format_description(self):
2109
"""Return the short description for this format."""
2110
raise NotImplementedError(self.get_format_description)
2112
def _get_revision_store(self, repo_transport, control_files):
2113
"""Return the revision store object for this a_bzrdir."""
2114
raise NotImplementedError(self._get_revision_store)
2116
def _get_text_rev_store(self,
2123
"""Common logic for getting a revision store for a repository.
2125
see self._get_revision_store for the subclass-overridable method to
2126
get the store for a repository.
2128
from bzrlib.store.revision.text import TextRevisionStore
2129
dir_mode = control_files._dir_mode
2130
file_mode = control_files._file_mode
2131
text_store = TextStore(transport.clone(name),
2133
compressed=compressed,
2135
file_mode=file_mode)
2136
_revision_store = TextRevisionStore(text_store, serializer)
2137
return _revision_store
2139
# TODO: this shouldn't be in the base class, it's specific to things that
2140
# use weaves or knits -- mbp 20070207
2141
def _get_versioned_file_store(self,
2146
versionedfile_class=None,
2147
versionedfile_kwargs={},
2149
if versionedfile_class is None:
2150
versionedfile_class = self._versionedfile_class
2151
weave_transport = control_files._transport.clone(name)
2152
dir_mode = control_files._dir_mode
2153
file_mode = control_files._file_mode
2154
return VersionedFileStore(weave_transport, prefixed=prefixed,
2156
file_mode=file_mode,
2157
versionedfile_class=versionedfile_class,
2158
versionedfile_kwargs=versionedfile_kwargs,
2161
def initialize(self, a_bzrdir, shared=False):
2162
"""Initialize a repository of this format in a_bzrdir.
2164
:param a_bzrdir: The bzrdir to put the new repository in it.
2165
:param shared: The repository should be initialized as a sharable one.
2166
:returns: The new repository object.
2168
This may raise UninitializableFormat if shared repository are not
2169
compatible the a_bzrdir.
2171
raise NotImplementedError(self.initialize)
2173
def is_supported(self):
2174
"""Is this format supported?
2176
Supported formats must be initializable and openable.
2177
Unsupported formats may not support initialization or committing or
2178
some other features depending on the reason for not being supported.
2182
def check_conversion_target(self, target_format):
2183
raise NotImplementedError(self.check_conversion_target)
2185
def open(self, a_bzrdir, _found=False):
2186
"""Return an instance of this format for the bzrdir a_bzrdir.
2188
_found is a private parameter, do not use it.
2190
raise NotImplementedError(self.open)
2193
class MetaDirRepositoryFormat(RepositoryFormat):
2194
"""Common base class for the new repositories using the metadir layout."""
2196
rich_root_data = False
2197
supports_tree_reference = False
2198
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2201
super(MetaDirRepositoryFormat, self).__init__()
2203
def _create_control_files(self, a_bzrdir):
2204
"""Create the required files and the initial control_files object."""
2205
# FIXME: RBC 20060125 don't peek under the covers
2206
# NB: no need to escape relative paths that are url safe.
2207
repository_transport = a_bzrdir.get_repository_transport(self)
2208
control_files = lockable_files.LockableFiles(repository_transport,
2209
'lock', lockdir.LockDir)
2210
control_files.create_lock()
2211
return control_files
2213
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2214
"""Upload the initial blank content."""
2215
control_files = self._create_control_files(a_bzrdir)
2216
control_files.lock_write()
2218
control_files._transport.mkdir_multi(dirs,
2219
mode=control_files._dir_mode)
2220
for file, content in files:
2221
control_files.put(file, content)
2222
for file, content in utf8_files:
2223
control_files.put_utf8(file, content)
2225
control_files.put_utf8('shared-storage', '')
2227
control_files.unlock()
2230
# formats which have no format string are not discoverable
2231
# and not independently creatable, so are not registered. They're
2232
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2233
# needed, it's constructed directly by the BzrDir. Non-native formats where
2234
# the repository is not separately opened are similar.
2236
format_registry.register_lazy(
2237
'Bazaar-NG Repository format 7',
2238
'bzrlib.repofmt.weaverepo',
2242
format_registry.register_lazy(
2243
'Bazaar-NG Knit Repository Format 1',
2244
'bzrlib.repofmt.knitrepo',
2245
'RepositoryFormatKnit1',
2248
format_registry.register_lazy(
2249
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2250
'bzrlib.repofmt.knitrepo',
2251
'RepositoryFormatKnit3',
2254
format_registry.register_lazy(
2255
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2256
'bzrlib.repofmt.knitrepo',
2257
'RepositoryFormatKnit4',
2260
# Pack-based formats. There is one format for pre-subtrees, and one for
2261
# post-subtrees to allow ease of testing.
2262
# NOTE: These are experimental in 0.92.
2263
format_registry.register_lazy(
2264
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2265
'bzrlib.repofmt.pack_repo',
2266
'RepositoryFormatKnitPack1',
2268
format_registry.register_lazy(
2269
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2270
'bzrlib.repofmt.pack_repo',
2271
'RepositoryFormatKnitPack3',
2273
format_registry.register_lazy(
2274
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2275
'bzrlib.repofmt.pack_repo',
2276
'RepositoryFormatKnitPack4',
2280
class InterRepository(InterObject):
2281
"""This class represents operations taking place between two repositories.
2283
Its instances have methods like copy_content and fetch, and contain
2284
references to the source and target repositories these operations can be
2287
Often we will provide convenience methods on 'repository' which carry out
2288
operations with another repository - they will always forward to
2289
InterRepository.get(other).method_name(parameters).
2293
"""The available optimised InterRepository types."""
2295
def copy_content(self, revision_id=None):
2296
raise NotImplementedError(self.copy_content)
2298
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2299
"""Fetch the content required to construct revision_id.
2301
The content is copied from self.source to self.target.
2303
:param revision_id: if None all content is copied, if NULL_REVISION no
2305
:param pb: optional progress bar to use for progress reports. If not
2306
provided a default one will be created.
2308
Returns the copied revision count and the failed revisions in a tuple:
2311
raise NotImplementedError(self.fetch)
2313
def _walk_to_common_revisions(self, revision_ids):
2314
"""Walk out from revision_ids in source to revisions target has.
2316
:param revision_ids: The start point for the search.
2317
:return: A set of revision ids.
2319
graph = self.source.get_graph()
2320
missing_revs = set()
2321
# ensure we don't pay silly lookup costs.
2322
revision_ids = frozenset(revision_ids)
2323
searcher = graph._make_breadth_first_searcher(revision_ids)
2324
null_set = frozenset([_mod_revision.NULL_REVISION])
2327
next_revs, ghosts = searcher.next_with_ghosts()
2328
except StopIteration:
2330
if revision_ids.intersection(ghosts):
2331
absent_ids = set(revision_ids.intersection(ghosts))
2332
# If all absent_ids are present in target, no error is needed.
2333
absent_ids.difference_update(
2334
self.target.has_revisions(absent_ids))
2336
raise errors.NoSuchRevision(self.source, absent_ids.pop())
2337
# we don't care about other ghosts as we can't fetch them and
2338
# haven't been asked to.
2339
next_revs = set(next_revs)
2340
next_revs.difference_update(null_set)
2341
have_revs = self.target.has_revisions(next_revs)
2342
missing_revs.update(next_revs - have_revs)
2343
searcher.stop_searching_any(have_revs)
2347
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2348
"""Return the revision ids that source has that target does not.
2350
These are returned in topological order.
2352
:param revision_id: only return revision ids included by this
2354
:param find_ghosts: If True find missing revisions in deep history
2355
rather than just finding the surface difference.
2357
# stop searching at found target revisions.
2358
if not find_ghosts and revision_id is not None:
2359
return self._walk_to_common_revisions([revision_id])
2360
# generic, possibly worst case, slow code path.
2361
target_ids = set(self.target.all_revision_ids())
2362
if revision_id is not None:
2363
source_ids = self.source.get_ancestry(revision_id)
2364
assert source_ids[0] is None
2367
source_ids = self.source.all_revision_ids()
2368
result_set = set(source_ids).difference(target_ids)
2369
# this may look like a no-op: its not. It preserves the ordering
2370
# other_ids had while only returning the members from other_ids
2371
# that we've decided we need.
2372
return [rev_id for rev_id in source_ids if rev_id in result_set]
2375
def _same_model(source, target):
2376
"""True if source and target have the same data representation."""
2377
if source.supports_rich_root() != target.supports_rich_root():
2379
if source._serializer != target._serializer:
2384
class InterSameDataRepository(InterRepository):
2385
"""Code for converting between repositories that represent the same data.
2387
Data format and model must match for this to work.
2391
def _get_repo_format_to_test(self):
2392
"""Repository format for testing with.
2394
InterSameData can pull from subtree to subtree and from non-subtree to
2395
non-subtree, so we test this with the richest repository format.
2397
from bzrlib.repofmt import knitrepo
2398
return knitrepo.RepositoryFormatKnit3()
2401
def is_compatible(source, target):
2402
return InterRepository._same_model(source, target)
2405
def copy_content(self, revision_id=None):
2406
"""Make a complete copy of the content in self into destination.
2408
This copies both the repository's revision data, and configuration information
2409
such as the make_working_trees setting.
2411
This is a destructive operation! Do not use it on existing
2414
:param revision_id: Only copy the content needed to construct
2415
revision_id and its parents.
2418
self.target.set_make_working_trees(self.source.make_working_trees())
2419
except NotImplementedError:
2421
# but don't bother fetching if we have the needed data now.
2422
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2423
self.target.has_revision(revision_id)):
2425
self.target.fetch(self.source, revision_id=revision_id)
2428
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2429
"""See InterRepository.fetch()."""
2430
from bzrlib.fetch import GenericRepoFetcher
2431
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2432
self.source, self.source._format, self.target,
2433
self.target._format)
2434
f = GenericRepoFetcher(to_repository=self.target,
2435
from_repository=self.source,
2436
last_revision=revision_id,
2437
pb=pb, find_ghosts=find_ghosts)
2438
return f.count_copied, f.failed_revisions
2441
class InterWeaveRepo(InterSameDataRepository):
2442
"""Optimised code paths between Weave based repositories.
2444
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2445
implemented lazy inter-object optimisation.
2449
def _get_repo_format_to_test(self):
2450
from bzrlib.repofmt import weaverepo
2451
return weaverepo.RepositoryFormat7()
2454
def is_compatible(source, target):
2455
"""Be compatible with known Weave formats.
2457
We don't test for the stores being of specific types because that
2458
could lead to confusing results, and there is no need to be
2461
from bzrlib.repofmt.weaverepo import (
2467
return (isinstance(source._format, (RepositoryFormat5,
2469
RepositoryFormat7)) and
2470
isinstance(target._format, (RepositoryFormat5,
2472
RepositoryFormat7)))
2473
except AttributeError:
2477
def copy_content(self, revision_id=None):
2478
"""See InterRepository.copy_content()."""
2479
# weave specific optimised path:
2481
self.target.set_make_working_trees(self.source.make_working_trees())
2482
except NotImplementedError:
2484
# FIXME do not peek!
2485
if self.source.control_files._transport.listable():
2486
pb = ui.ui_factory.nested_progress_bar()
2488
self.target.weave_store.copy_all_ids(
2489
self.source.weave_store,
2491
from_transaction=self.source.get_transaction(),
2492
to_transaction=self.target.get_transaction())
2493
pb.update('copying inventory', 0, 1)
2494
self.target.control_weaves.copy_multi(
2495
self.source.control_weaves, ['inventory'],
2496
from_transaction=self.source.get_transaction(),
2497
to_transaction=self.target.get_transaction())
2498
self.target._revision_store.text_store.copy_all_ids(
2499
self.source._revision_store.text_store,
2504
self.target.fetch(self.source, revision_id=revision_id)
2507
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2508
"""See InterRepository.fetch()."""
2509
from bzrlib.fetch import GenericRepoFetcher
2510
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2511
self.source, self.source._format, self.target, self.target._format)
2512
f = GenericRepoFetcher(to_repository=self.target,
2513
from_repository=self.source,
2514
last_revision=revision_id,
2515
pb=pb, find_ghosts=find_ghosts)
2516
return f.count_copied, f.failed_revisions
2519
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2520
"""See InterRepository.missing_revision_ids()."""
2521
# we want all revisions to satisfy revision_id in source.
2522
# but we don't want to stat every file here and there.
2523
# we want then, all revisions other needs to satisfy revision_id
2524
# checked, but not those that we have locally.
2525
# so the first thing is to get a subset of the revisions to
2526
# satisfy revision_id in source, and then eliminate those that
2527
# we do already have.
2528
# this is slow on high latency connection to self, but as as this
2529
# disk format scales terribly for push anyway due to rewriting
2530
# inventory.weave, this is considered acceptable.
2532
if revision_id is not None:
2533
source_ids = self.source.get_ancestry(revision_id)
2534
assert source_ids[0] is None
2537
source_ids = self.source._all_possible_ids()
2538
source_ids_set = set(source_ids)
2539
# source_ids is the worst possible case we may need to pull.
2540
# now we want to filter source_ids against what we actually
2541
# have in target, but don't try to check for existence where we know
2542
# we do not have a revision as that would be pointless.
2543
target_ids = set(self.target._all_possible_ids())
2544
possibly_present_revisions = target_ids.intersection(source_ids_set)
2545
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2546
required_revisions = source_ids_set.difference(actually_present_revisions)
2547
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2548
if revision_id is not None:
2549
# we used get_ancestry to determine source_ids then we are assured all
2550
# revisions referenced are present as they are installed in topological order.
2551
# and the tip revision was validated by get_ancestry.
2552
return required_topo_revisions
2554
# if we just grabbed the possibly available ids, then
2555
# we only have an estimate of whats available and need to validate
2556
# that against the revision records.
2557
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2560
class InterKnitRepo(InterSameDataRepository):
2561
"""Optimised code paths between Knit based repositories."""
2564
def _get_repo_format_to_test(self):
2565
from bzrlib.repofmt import knitrepo
2566
return knitrepo.RepositoryFormatKnit1()
2569
def is_compatible(source, target):
2570
"""Be compatible with known Knit formats.
2572
We don't test for the stores being of specific types because that
2573
could lead to confusing results, and there is no need to be
2576
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2578
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2579
isinstance(target._format, RepositoryFormatKnit))
2580
except AttributeError:
2582
return are_knits and InterRepository._same_model(source, target)
2585
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2586
"""See InterRepository.fetch()."""
2587
from bzrlib.fetch import KnitRepoFetcher
2588
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2589
self.source, self.source._format, self.target, self.target._format)
2590
f = KnitRepoFetcher(to_repository=self.target,
2591
from_repository=self.source,
2592
last_revision=revision_id,
2593
pb=pb, find_ghosts=find_ghosts)
2594
return f.count_copied, f.failed_revisions
2597
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2598
"""See InterRepository.missing_revision_ids()."""
2599
if revision_id is not None:
2600
source_ids = self.source.get_ancestry(revision_id)
2601
assert source_ids[0] is None
2604
source_ids = self.source.all_revision_ids()
2605
source_ids_set = set(source_ids)
2606
# source_ids is the worst possible case we may need to pull.
2607
# now we want to filter source_ids against what we actually
2608
# have in target, but don't try to check for existence where we know
2609
# we do not have a revision as that would be pointless.
2610
target_ids = set(self.target.all_revision_ids())
2611
possibly_present_revisions = target_ids.intersection(source_ids_set)
2612
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2613
required_revisions = source_ids_set.difference(actually_present_revisions)
2614
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2615
if revision_id is not None:
2616
# we used get_ancestry to determine source_ids then we are assured all
2617
# revisions referenced are present as they are installed in topological order.
2618
# and the tip revision was validated by get_ancestry.
2619
return required_topo_revisions
2621
# if we just grabbed the possibly available ids, then
2622
# we only have an estimate of whats available and need to validate
2623
# that against the revision records.
2624
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2627
class InterPackRepo(InterSameDataRepository):
2628
"""Optimised code paths between Pack based repositories."""
2631
def _get_repo_format_to_test(self):
2632
from bzrlib.repofmt import pack_repo
2633
return pack_repo.RepositoryFormatKnitPack1()
2636
def is_compatible(source, target):
2637
"""Be compatible with known Pack formats.
2639
We don't test for the stores being of specific types because that
2640
could lead to confusing results, and there is no need to be
2643
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2645
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2646
isinstance(target._format, RepositoryFormatPack))
2647
except AttributeError:
2649
return are_packs and InterRepository._same_model(source, target)
2652
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2653
"""See InterRepository.fetch()."""
2654
from bzrlib.repofmt.pack_repo import Packer
2655
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2656
self.source, self.source._format, self.target, self.target._format)
2657
self.count_copied = 0
2658
if revision_id is None:
2660
# everything to do - use pack logic
2661
# to fetch from all packs to one without
2662
# inventory parsing etc, IFF nothing to be copied is in the target.
2664
revision_ids = self.source.all_revision_ids()
2665
# implementing the TODO will involve:
2666
# - detecting when all of a pack is selected
2667
# - avoiding as much as possible pre-selection, so the
2668
# more-core routines such as create_pack_from_packs can filter in
2669
# a just-in-time fashion. (though having a HEADS list on a
2670
# repository might make this a lot easier, because we could
2671
# sensibly detect 'new revisions' without doing a full index scan.
2672
elif _mod_revision.is_null(revision_id):
2677
revision_ids = self.missing_revision_ids(revision_id,
2678
find_ghosts=find_ghosts)
2679
except errors.NoSuchRevision:
2680
raise errors.InstallFailed([revision_id])
2681
packs = self.source._pack_collection.all_packs()
2682
pack = Packer(self.target._pack_collection, packs, '.fetch',
2683
revision_ids).pack()
2684
if pack is not None:
2685
self.target._pack_collection._save_pack_names()
2686
# Trigger an autopack. This may duplicate effort as we've just done
2687
# a pack creation, but for now it is simpler to think about as
2688
# 'upload data, then repack if needed'.
2689
self.target._pack_collection.autopack()
2690
return (pack.get_revision_count(), [])
2695
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2696
"""See InterRepository.missing_revision_ids().
2698
:param find_ghosts: Find ghosts throughout the ancestry of
2701
if not find_ghosts and revision_id is not None:
2702
return self._walk_to_common_revisions([revision_id])
2703
elif revision_id is not None:
2704
source_ids = self.source.get_ancestry(revision_id)
2705
assert source_ids[0] is None
2708
source_ids = self.source.all_revision_ids()
2709
# source_ids is the worst possible case we may need to pull.
2710
# now we want to filter source_ids against what we actually
2711
# have in target, but don't try to check for existence where we know
2712
# we do not have a revision as that would be pointless.
2713
target_ids = set(self.target.all_revision_ids())
2714
return [r for r in source_ids if (r not in target_ids)]
2717
class InterModel1and2(InterRepository):
2720
def _get_repo_format_to_test(self):
2724
def is_compatible(source, target):
2725
if not source.supports_rich_root() and target.supports_rich_root():
2731
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2732
"""See InterRepository.fetch()."""
2733
from bzrlib.fetch import Model1toKnit2Fetcher
2734
f = Model1toKnit2Fetcher(to_repository=self.target,
2735
from_repository=self.source,
2736
last_revision=revision_id,
2737
pb=pb, find_ghosts=find_ghosts)
2738
return f.count_copied, f.failed_revisions
2741
def copy_content(self, revision_id=None):
2742
"""Make a complete copy of the content in self into destination.
2744
This is a destructive operation! Do not use it on existing
2747
:param revision_id: Only copy the content needed to construct
2748
revision_id and its parents.
2751
self.target.set_make_working_trees(self.source.make_working_trees())
2752
except NotImplementedError:
2754
# but don't bother fetching if we have the needed data now.
2755
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2756
self.target.has_revision(revision_id)):
2758
self.target.fetch(self.source, revision_id=revision_id)
2761
class InterKnit1and2(InterKnitRepo):
2764
def _get_repo_format_to_test(self):
2768
def is_compatible(source, target):
2769
"""Be compatible with Knit1 source and Knit3 target"""
2770
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2772
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2773
RepositoryFormatKnit3)
2774
from bzrlib.repofmt.pack_repo import (RepositoryFormatKnitPack1,
2775
RepositoryFormatKnitPack3)
2776
return (isinstance(source._format,
2777
(RepositoryFormatKnit1, RepositoryFormatKnitPack1)) and
2778
isinstance(target._format,
2779
(RepositoryFormatKnit3, RepositoryFormatKnitPack3))
2781
except AttributeError:
2785
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2786
"""See InterRepository.fetch()."""
2787
from bzrlib.fetch import Knit1to2Fetcher
2788
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2789
self.source, self.source._format, self.target,
2790
self.target._format)
2791
f = Knit1to2Fetcher(to_repository=self.target,
2792
from_repository=self.source,
2793
last_revision=revision_id,
2794
pb=pb, find_ghosts=find_ghosts)
2795
return f.count_copied, f.failed_revisions
2798
class InterDifferingSerializer(InterKnitRepo):
2801
def _get_repo_format_to_test(self):
2805
def is_compatible(source, target):
2806
"""Be compatible with Knit2 source and Knit3 target"""
2807
if source.supports_rich_root() != target.supports_rich_root():
2809
# Ideally, we'd support fetching if the source had no tree references
2810
# even if it supported them...
2811
if (getattr(source, '_format.supports_tree_reference', False) and
2812
not getattr(target, '_format.supports_tree_reference', False)):
2817
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2818
"""See InterRepository.fetch()."""
2819
revision_ids = self.target.missing_revision_ids(self.source,
2820
revision_id, find_ghosts=find_ghosts)
2821
def revisions_iterator():
2822
for current_revision_id in revision_ids:
2823
revision = self.source.get_revision(current_revision_id)
2824
tree = self.source.revision_tree(current_revision_id)
2826
signature = self.source.get_signature_text(
2827
current_revision_id)
2828
except errors.NoSuchRevision:
2830
yield revision, tree, signature
2832
my_pb = ui.ui_factory.nested_progress_bar()
2837
install_revisions(self.target, revisions_iterator(),
2838
len(revision_ids), pb)
2840
if my_pb is not None:
2842
return len(revision_ids), 0
2845
class InterRemoteToOther(InterRepository):
2847
def __init__(self, source, target):
2848
InterRepository.__init__(self, source, target)
2849
self._real_inter = None
2852
def is_compatible(source, target):
2853
if not isinstance(source, remote.RemoteRepository):
2855
# Is source's model compatible with target's model?
2856
source._ensure_real()
2857
real_source = source._real_repository
2858
assert not isinstance(real_source, remote.RemoteRepository), (
2859
"We don't support remote repos backed by remote repos yet.")
2860
return InterRepository._same_model(real_source, target)
2863
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2864
"""See InterRepository.fetch()."""
2865
from bzrlib.fetch import RemoteToOtherFetcher
2866
mutter("Using fetch logic to copy between %s(remote) and %s(%s)",
2867
self.source, self.target, self.target._format)
2868
# TODO: jam 20070210 This should be an assert, not a translate
2869
revision_id = osutils.safe_revision_id(revision_id)
2870
f = RemoteToOtherFetcher(to_repository=self.target,
2871
from_repository=self.source,
2872
last_revision=revision_id,
2873
pb=pb, find_ghosts=find_ghosts)
2874
return f.count_copied, f.failed_revisions
2877
def _get_repo_format_to_test(self):
2881
class InterOtherToRemote(InterRepository):
2883
def __init__(self, source, target):
2884
InterRepository.__init__(self, source, target)
2885
self._real_inter = None
2888
def is_compatible(source, target):
2889
if isinstance(target, remote.RemoteRepository):
2893
def _ensure_real_inter(self):
2894
if self._real_inter is None:
2895
self.target._ensure_real()
2896
real_target = self.target._real_repository
2897
self._real_inter = InterRepository.get(self.source, real_target)
2899
def copy_content(self, revision_id=None):
2900
self._ensure_real_inter()
2901
self._real_inter.copy_content(revision_id=revision_id)
2903
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2904
self._ensure_real_inter()
2905
self._real_inter.fetch(revision_id=revision_id, pb=pb,
2906
find_ghosts=find_ghosts)
2909
def _get_repo_format_to_test(self):
2913
InterRepository.register_optimiser(InterDifferingSerializer)
2914
InterRepository.register_optimiser(InterSameDataRepository)
2915
InterRepository.register_optimiser(InterWeaveRepo)
2916
InterRepository.register_optimiser(InterKnitRepo)
2917
InterRepository.register_optimiser(InterModel1and2)
2918
InterRepository.register_optimiser(InterKnit1and2)
2919
InterRepository.register_optimiser(InterPackRepo)
2920
InterRepository.register_optimiser(InterRemoteToOther)
2921
InterRepository.register_optimiser(InterOtherToRemote)
2924
class CopyConverter(object):
2925
"""A repository conversion tool which just performs a copy of the content.
2927
This is slow but quite reliable.
2930
def __init__(self, target_format):
2931
"""Create a CopyConverter.
2933
:param target_format: The format the resulting repository should be.
2935
self.target_format = target_format
2937
def convert(self, repo, pb):
2938
"""Perform the conversion of to_convert, giving feedback via pb.
2940
:param to_convert: The disk object to convert.
2941
:param pb: a progress bar to use for progress information.
2946
# this is only useful with metadir layouts - separated repo content.
2947
# trigger an assertion if not such
2948
repo._format.get_format_string()
2949
self.repo_dir = repo.bzrdir
2950
self.step('Moving repository to repository.backup')
2951
self.repo_dir.transport.move('repository', 'repository.backup')
2952
backup_transport = self.repo_dir.transport.clone('repository.backup')
2953
repo._format.check_conversion_target(self.target_format)
2954
self.source_repo = repo._format.open(self.repo_dir,
2956
_override_transport=backup_transport)
2957
self.step('Creating new repository')
2958
converted = self.target_format.initialize(self.repo_dir,
2959
self.source_repo.is_shared())
2960
converted.lock_write()
2962
self.step('Copying content into repository.')
2963
self.source_repo.copy_content_into(converted)
2966
self.step('Deleting old repository content.')
2967
self.repo_dir.transport.delete_tree('repository.backup')
2968
self.pb.note('repository converted')
2970
def step(self, message):
2971
"""Update the pb by a step."""
2973
self.pb.update(message, self.count, self.total)
2985
def _unescaper(match, _map=_unescape_map):
2986
code = match.group(1)
2990
if not code.startswith('#'):
2992
return unichr(int(code[1:])).encode('utf8')
2998
def _unescape_xml(data):
2999
"""Unescape predefined XML entities in a string of data."""
3001
if _unescape_re is None:
3002
_unescape_re = re.compile('\&([^;]*);')
3003
return _unescape_re.sub(_unescaper, data)
3006
class _VersionedFileChecker(object):
3008
def __init__(self, repository):
3009
self.repository = repository
3010
self.text_index = self.repository._generate_text_key_index()
3012
def calculate_file_version_parents(self, revision_id, file_id):
3013
"""Calculate the correct parents for a file version according to
3016
parent_keys = self.text_index[(file_id, revision_id)]
3017
if parent_keys == [_mod_revision.NULL_REVISION]:
3019
# strip the file_id, for the weave api
3020
return tuple([revision_id for file_id, revision_id in parent_keys])
3022
def check_file_version_parents(self, weave, file_id):
3023
"""Check the parents stored in a versioned file are correct.
3025
It also detects file versions that are not referenced by their
3026
corresponding revision's inventory.
3028
:returns: A tuple of (wrong_parents, dangling_file_versions).
3029
wrong_parents is a dict mapping {revision_id: (stored_parents,
3030
correct_parents)} for each revision_id where the stored parents
3031
are not correct. dangling_file_versions is a set of (file_id,
3032
revision_id) tuples for versions that are present in this versioned
3033
file, but not used by the corresponding inventory.
3036
unused_versions = set()
3037
for num, revision_id in enumerate(weave.versions()):
3039
correct_parents = self.calculate_file_version_parents(
3040
revision_id, file_id)
3042
# The version is not part of the used keys.
3043
unused_versions.add(revision_id)
3046
knit_parents = tuple(weave.get_parents(revision_id))
3047
except errors.RevisionNotPresent:
3049
if correct_parents != knit_parents:
3050
wrong_parents[revision_id] = (knit_parents, correct_parents)
3051
return wrong_parents, unused_versions