1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
39
revision as _mod_revision,
45
from bzrlib.bundle import serializer
46
from bzrlib.revisiontree import RevisionTree
47
from bzrlib.store.versioned import VersionedFileStore
48
from bzrlib.store.text import TextStore
49
from bzrlib.testament import Testament
50
from bzrlib.util import bencode
53
from bzrlib.decorators import needs_read_lock, needs_write_lock
54
from bzrlib.inter import InterObject
55
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
56
from bzrlib.symbol_versioning import (
59
from bzrlib.trace import mutter, mutter_callsite, note, warning
62
# Old formats display a warning, but only once
63
_deprecation_warning_done = False
66
class CommitBuilder(object):
67
"""Provides an interface to build up a commit.
69
This allows describing a tree to be committed without needing to
70
know the internals of the format of the repository.
73
# all clients should supply tree roots.
74
record_root_entry = True
75
# the default CommitBuilder does not manage trees whose root is versioned.
76
_versioned_root = False
78
def __init__(self, repository, parents, config, timestamp=None,
79
timezone=None, committer=None, revprops=None,
81
"""Initiate a CommitBuilder.
83
:param repository: Repository to commit to.
84
:param parents: Revision ids of the parents of the new revision.
85
:param config: Configuration to use.
86
:param timestamp: Optional timestamp recorded for commit.
87
:param timezone: Optional timezone for timestamp.
88
:param committer: Optional committer to set for commit.
89
:param revprops: Optional dictionary of revision properties.
90
:param revision_id: Optional revision id.
95
self._committer = self._config.username()
97
self._committer = committer
99
self.new_inventory = Inventory(None)
100
self._new_revision_id = revision_id
101
self.parents = parents
102
self.repository = repository
105
if revprops is not None:
106
self._revprops.update(revprops)
108
if timestamp is None:
109
timestamp = time.time()
110
# Restrict resolution to 1ms
111
self._timestamp = round(timestamp, 3)
114
self._timezone = osutils.local_time_offset()
116
self._timezone = int(timezone)
118
self._generate_revision_if_needed()
119
self.__heads = graph.HeadsCache(repository.get_graph()).heads
121
def commit(self, message):
122
"""Make the actual commit.
124
:return: The revision id of the recorded revision.
126
rev = _mod_revision.Revision(
127
timestamp=self._timestamp,
128
timezone=self._timezone,
129
committer=self._committer,
131
inventory_sha1=self.inv_sha1,
132
revision_id=self._new_revision_id,
133
properties=self._revprops)
134
rev.parent_ids = self.parents
135
self.repository.add_revision(self._new_revision_id, rev,
136
self.new_inventory, self._config)
137
self.repository.commit_write_group()
138
return self._new_revision_id
141
"""Abort the commit that is being built.
143
self.repository.abort_write_group()
145
def revision_tree(self):
146
"""Return the tree that was just committed.
148
After calling commit() this can be called to get a RevisionTree
149
representing the newly committed tree. This is preferred to
150
calling Repository.revision_tree() because that may require
151
deserializing the inventory, while we already have a copy in
154
return RevisionTree(self.repository, self.new_inventory,
155
self._new_revision_id)
157
def finish_inventory(self):
158
"""Tell the builder that the inventory is finished."""
159
if self.new_inventory.root is None:
160
raise AssertionError('Root entry should be supplied to'
161
' record_entry_contents, as of bzr 0.10.',
162
DeprecationWarning, stacklevel=2)
163
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
164
self.new_inventory.revision_id = self._new_revision_id
165
self.inv_sha1 = self.repository.add_inventory(
166
self._new_revision_id,
171
def _gen_revision_id(self):
172
"""Return new revision-id."""
173
return generate_ids.gen_revision_id(self._config.username(),
176
def _generate_revision_if_needed(self):
177
"""Create a revision id if None was supplied.
179
If the repository can not support user-specified revision ids
180
they should override this function and raise CannotSetRevisionId
181
if _new_revision_id is not None.
183
:raises: CannotSetRevisionId
185
if self._new_revision_id is None:
186
self._new_revision_id = self._gen_revision_id()
187
self.random_revid = True
189
self.random_revid = False
191
def _heads(self, file_id, revision_ids):
192
"""Calculate the graph heads for revision_ids in the graph of file_id.
194
This can use either a per-file graph or a global revision graph as we
195
have an identity relationship between the two graphs.
197
return self.__heads(revision_ids)
199
def _check_root(self, ie, parent_invs, tree):
200
"""Helper for record_entry_contents.
202
:param ie: An entry being added.
203
:param parent_invs: The inventories of the parent revisions of the
205
:param tree: The tree that is being committed.
207
# In this revision format, root entries have no knit or weave When
208
# serializing out to disk and back in root.revision is always
210
ie.revision = self._new_revision_id
212
def _get_delta(self, ie, basis_inv, path):
213
"""Get a delta against the basis inventory for ie."""
214
if ie.file_id not in basis_inv:
216
return (None, path, ie.file_id, ie)
217
elif ie != basis_inv[ie.file_id]:
219
# TODO: avoid tis id2path call.
220
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
225
def record_entry_contents(self, ie, parent_invs, path, tree,
227
"""Record the content of ie from tree into the commit if needed.
229
Side effect: sets ie.revision when unchanged
231
:param ie: An inventory entry present in the commit.
232
:param parent_invs: The inventories of the parent revisions of the
234
:param path: The path the entry is at in the tree.
235
:param tree: The tree which contains this entry and should be used to
237
:param content_summary: Summary data from the tree about the paths
238
content - stat, length, exec, sha/link target. This is only
239
accessed when the entry has a revision of None - that is when it is
240
a candidate to commit.
241
:return: A tuple (change_delta, version_recorded). change_delta is
242
an inventory_delta change for this entry against the basis tree of
243
the commit, or None if no change occured against the basis tree.
244
version_recorded is True if a new version of the entry has been
245
recorded. For instance, committing a merge where a file was only
246
changed on the other side will return (delta, False).
248
if self.new_inventory.root is None:
249
if ie.parent_id is not None:
250
raise errors.RootMissing()
251
self._check_root(ie, parent_invs, tree)
252
if ie.revision is None:
253
kind = content_summary[0]
255
# ie is carried over from a prior commit
257
# XXX: repository specific check for nested tree support goes here - if
258
# the repo doesn't want nested trees we skip it ?
259
if (kind == 'tree-reference' and
260
not self.repository._format.supports_tree_reference):
261
# mismatch between commit builder logic and repository:
262
# this needs the entry creation pushed down into the builder.
263
raise NotImplementedError('Missing repository subtree support.')
264
self.new_inventory.add(ie)
266
# TODO: slow, take it out of the inner loop.
268
basis_inv = parent_invs[0]
270
basis_inv = Inventory(root_id=None)
272
# ie.revision is always None if the InventoryEntry is considered
273
# for committing. We may record the previous parents revision if the
274
# content is actually unchanged against a sole head.
275
if ie.revision is not None:
276
if not self._versioned_root and path == '':
277
# repositories that do not version the root set the root's
278
# revision to the new commit even when no change occurs, and
279
# this masks when a change may have occurred against the basis,
280
# so calculate if one happened.
281
if ie.file_id in basis_inv:
282
delta = (basis_inv.id2path(ie.file_id), path,
286
delta = (None, path, ie.file_id, ie)
289
# we don't need to commit this, because the caller already
290
# determined that an existing revision of this file is
292
return None, (ie.revision == self._new_revision_id)
293
# XXX: Friction: parent_candidates should return a list not a dict
294
# so that we don't have to walk the inventories again.
295
parent_candiate_entries = ie.parent_candidates(parent_invs)
296
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
298
for inv in parent_invs:
299
if ie.file_id in inv:
300
old_rev = inv[ie.file_id].revision
301
if old_rev in head_set:
302
heads.append(inv[ie.file_id].revision)
303
head_set.remove(inv[ie.file_id].revision)
306
# now we check to see if we need to write a new record to the
308
# We write a new entry unless there is one head to the ancestors, and
309
# the kind-derived content is unchanged.
311
# Cheapest check first: no ancestors, or more the one head in the
312
# ancestors, we write a new node.
316
# There is a single head, look it up for comparison
317
parent_entry = parent_candiate_entries[heads[0]]
318
# if the non-content specific data has changed, we'll be writing a
320
if (parent_entry.parent_id != ie.parent_id or
321
parent_entry.name != ie.name):
323
# now we need to do content specific checks:
325
# if the kind changed the content obviously has
326
if kind != parent_entry.kind:
329
if content_summary[2] is None:
330
raise ValueError("Files must not have executable = None")
332
if (# if the file length changed we have to store:
333
parent_entry.text_size != content_summary[1] or
334
# if the exec bit has changed we have to store:
335
parent_entry.executable != content_summary[2]):
337
elif parent_entry.text_sha1 == content_summary[3]:
338
# all meta and content is unchanged (using a hash cache
339
# hit to check the sha)
340
ie.revision = parent_entry.revision
341
ie.text_size = parent_entry.text_size
342
ie.text_sha1 = parent_entry.text_sha1
343
ie.executable = parent_entry.executable
344
return self._get_delta(ie, basis_inv, path), False
346
# Either there is only a hash change(no hash cache entry,
347
# or same size content change), or there is no change on
349
# Provide the parent's hash to the store layer, so that the
350
# content is unchanged we will not store a new node.
351
nostore_sha = parent_entry.text_sha1
353
# We want to record a new node regardless of the presence or
354
# absence of a content change in the file.
356
ie.executable = content_summary[2]
357
lines = tree.get_file(ie.file_id, path).readlines()
359
ie.text_sha1, ie.text_size = self._add_text_to_weave(
360
ie.file_id, lines, heads, nostore_sha)
361
except errors.ExistingContent:
362
# Turns out that the file content was unchanged, and we were
363
# only going to store a new node if it was changed. Carry over
365
ie.revision = parent_entry.revision
366
ie.text_size = parent_entry.text_size
367
ie.text_sha1 = parent_entry.text_sha1
368
ie.executable = parent_entry.executable
369
return self._get_delta(ie, basis_inv, path), False
370
elif kind == 'directory':
372
# all data is meta here, nothing specific to directory, so
374
ie.revision = parent_entry.revision
375
return self._get_delta(ie, basis_inv, path), False
377
self._add_text_to_weave(ie.file_id, lines, heads, None)
378
elif kind == 'symlink':
379
current_link_target = content_summary[3]
381
# symlink target is not generic metadata, check if it has
383
if current_link_target != parent_entry.symlink_target:
386
# unchanged, carry over.
387
ie.revision = parent_entry.revision
388
ie.symlink_target = parent_entry.symlink_target
389
return self._get_delta(ie, basis_inv, path), False
390
ie.symlink_target = current_link_target
392
self._add_text_to_weave(ie.file_id, lines, heads, None)
393
elif kind == 'tree-reference':
395
if content_summary[3] != parent_entry.reference_revision:
398
# unchanged, carry over.
399
ie.reference_revision = parent_entry.reference_revision
400
ie.revision = parent_entry.revision
401
return self._get_delta(ie, basis_inv, path), False
402
ie.reference_revision = content_summary[3]
404
self._add_text_to_weave(ie.file_id, lines, heads, None)
406
raise NotImplementedError('unknown kind')
407
ie.revision = self._new_revision_id
408
return self._get_delta(ie, basis_inv, path), True
410
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
411
# Note: as we read the content directly from the tree, we know its not
412
# been turned into unicode or badly split - but a broken tree
413
# implementation could give us bad output from readlines() so this is
414
# not a guarantee of safety. What would be better is always checking
415
# the content during test suite execution. RBC 20070912
416
parent_keys = tuple((file_id, parent) for parent in parents)
417
return self.repository.texts.add_lines(
418
(file_id, self._new_revision_id), parent_keys, new_lines,
419
nostore_sha=nostore_sha, random_id=self.random_revid,
420
check_content=False)[0:2]
423
class RootCommitBuilder(CommitBuilder):
424
"""This commitbuilder actually records the root id"""
426
# the root entry gets versioned properly by this builder.
427
_versioned_root = True
429
def _check_root(self, ie, parent_invs, tree):
430
"""Helper for record_entry_contents.
432
:param ie: An entry being added.
433
:param parent_invs: The inventories of the parent revisions of the
435
:param tree: The tree that is being committed.
439
######################################################################
442
class Repository(object):
443
"""Repository holding history for one or more branches.
445
The repository holds and retrieves historical information including
446
revisions and file history. It's normally accessed only by the Branch,
447
which views a particular line of development through that history.
449
The Repository builds on top of Stores and a Transport, which respectively
450
describe the disk data format and the way of accessing the (possibly
453
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
454
the serialised revisions for the repository. This can be used to obtain
455
revision graph information or to access raw serialised revisions.
456
The result of trying to insert data into the repository via this store
457
is undefined: it should be considered read-only except for implementors
461
# What class to use for a CommitBuilder. Often its simpler to change this
462
# in a Repository class subclass rather than to override
463
# get_commit_builder.
464
_commit_builder_class = CommitBuilder
465
# The search regex used by xml based repositories to determine what things
466
# where changed in a single commit.
467
_file_ids_altered_regex = lazy_regex.lazy_compile(
468
r'file_id="(?P<file_id>[^"]+)"'
469
r'.* revision="(?P<revision_id>[^"]+)"'
472
def abort_write_group(self):
473
"""Commit the contents accrued within the current write group.
475
:seealso: start_write_group.
477
if self._write_group is not self.get_transaction():
478
# has an unlock or relock occured ?
479
raise errors.BzrError('mismatched lock context and write group.')
480
self._abort_write_group()
481
self._write_group = None
483
def _abort_write_group(self):
484
"""Template method for per-repository write group cleanup.
486
This is called during abort before the write group is considered to be
487
finished and should cleanup any internal state accrued during the write
488
group. There is no requirement that data handed to the repository be
489
*not* made available - this is not a rollback - but neither should any
490
attempt be made to ensure that data added is fully commited. Abort is
491
invoked when an error has occured so futher disk or network operations
492
may not be possible or may error and if possible should not be
496
def add_inventory(self, revision_id, inv, parents):
497
"""Add the inventory inv to the repository as revision_id.
499
:param parents: The revision ids of the parents that revision_id
500
is known to have and are in the repository already.
502
:returns: The validator(which is a sha1 digest, though what is sha'd is
503
repository format specific) of the serialized inventory.
505
if not self.is_in_write_group():
506
raise AssertionError("%r not in write group" % (self,))
507
_mod_revision.check_not_reserved_id(revision_id)
508
if not (inv.revision_id is None or inv.revision_id == revision_id):
509
raise AssertionError(
510
"Mismatch between inventory revision"
511
" id and insertion revid (%r, %r)"
512
% (inv.revision_id, revision_id))
514
raise AssertionError()
515
inv_lines = self._serialise_inventory_to_lines(inv)
516
return self._inventory_add_lines(revision_id, parents,
517
inv_lines, check_content=False)
519
def _inventory_add_lines(self, revision_id, parents, lines,
521
"""Store lines in inv_vf and return the sha1 of the inventory."""
522
parents = [(parent,) for parent in parents]
523
return self.inventories.add_lines((revision_id,), parents, lines,
524
check_content=check_content)[0]
526
def add_revision(self, revision_id, rev, inv=None, config=None):
527
"""Add rev to the revision store as revision_id.
529
:param revision_id: the revision id to use.
530
:param rev: The revision object.
531
:param inv: The inventory for the revision. if None, it will be looked
532
up in the inventory storer
533
:param config: If None no digital signature will be created.
534
If supplied its signature_needed method will be used
535
to determine if a signature should be made.
537
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
539
_mod_revision.check_not_reserved_id(revision_id)
540
if config is not None and config.signature_needed():
542
inv = self.get_inventory(revision_id)
543
plaintext = Testament(rev, inv).as_short_text()
544
self.store_revision_signature(
545
gpg.GPGStrategy(config), plaintext, revision_id)
546
# check inventory present
547
if not self.inventories.get_parent_map([(revision_id,)]):
549
raise errors.WeaveRevisionNotPresent(revision_id,
552
# yes, this is not suitable for adding with ghosts.
553
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
556
rev.inventory_sha1 = self.inventories.get_sha1s([(revision_id,)])[0]
557
self._add_revision(rev)
559
def _add_revision(self, revision):
560
text = self._serializer.write_revision_to_string(revision)
561
key = (revision.revision_id,)
562
parents = tuple((parent,) for parent in revision.parent_ids)
563
self.revisions.add_lines(key, parents, osutils.split_lines(text))
565
def all_revision_ids(self):
566
"""Returns a list of all the revision ids in the repository.
568
This is deprecated because code should generally work on the graph
569
reachable from a particular revision, and ignore any other revisions
570
that might be present. There is no direct replacement method.
572
if 'evil' in debug.debug_flags:
573
mutter_callsite(2, "all_revision_ids is linear with history.")
574
return self._all_revision_ids()
576
def _all_revision_ids(self):
577
"""Returns a list of all the revision ids in the repository.
579
These are in as much topological order as the underlying store can
582
raise NotImplementedError(self._all_revision_ids)
584
def break_lock(self):
585
"""Break a lock if one is present from another instance.
587
Uses the ui factory to ask for confirmation if the lock may be from
590
self.control_files.break_lock()
593
def _eliminate_revisions_not_present(self, revision_ids):
594
"""Check every revision id in revision_ids to see if we have it.
596
Returns a set of the present revisions.
599
graph = self.get_graph()
600
parent_map = graph.get_parent_map(revision_ids)
601
# The old API returned a list, should this actually be a set?
602
return parent_map.keys()
605
def create(a_bzrdir):
606
"""Construct the current default format repository in a_bzrdir."""
607
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
609
def __init__(self, _format, a_bzrdir, control_files):
610
"""instantiate a Repository.
612
:param _format: The format of the repository on disk.
613
:param a_bzrdir: The BzrDir of the repository.
614
:param revisions: The revisions store for the repository.
616
In the future we will have a single api for all stores for
617
getting file texts, inventories and revisions, then
618
this construct will accept instances of those things.
620
super(Repository, self).__init__()
621
self._format = _format
622
# the following are part of the public API for Repository:
623
self.bzrdir = a_bzrdir
624
self.control_files = control_files
625
self._transport = control_files._transport
627
self._reconcile_does_inventory_gc = True
628
self._reconcile_fixes_text_parents = False
629
self._reconcile_backsup_inventory = True
630
# not right yet - should be more semantically clear ?
632
# TODO: make sure to construct the right store classes, etc, depending
633
# on whether escaping is required.
634
self._warn_if_deprecated()
635
self._write_group = None
636
self.base = control_files._transport.base
639
return '%s(%r)' % (self.__class__.__name__,
642
def has_same_location(self, other):
643
"""Returns a boolean indicating if this repository is at the same
644
location as another repository.
646
This might return False even when two repository objects are accessing
647
the same physical repository via different URLs.
649
if self.__class__ is not other.__class__:
651
return (self.control_files._transport.base ==
652
other.control_files._transport.base)
654
def is_in_write_group(self):
655
"""Return True if there is an open write group.
657
:seealso: start_write_group.
659
return self._write_group is not None
662
return self.control_files.is_locked()
664
def is_write_locked(self):
665
"""Return True if this object is write locked."""
666
return self.is_locked() and self.control_files._lock_mode == 'w'
668
def lock_write(self, token=None):
669
"""Lock this repository for writing.
671
This causes caching within the repository obejct to start accumlating
672
data during reads, and allows a 'write_group' to be obtained. Write
673
groups must be used for actual data insertion.
675
:param token: if this is already locked, then lock_write will fail
676
unless the token matches the existing lock.
677
:returns: a token if this instance supports tokens, otherwise None.
678
:raises TokenLockingNotSupported: when a token is given but this
679
instance doesn't support using token locks.
680
:raises MismatchedToken: if the specified token doesn't match the token
681
of the existing lock.
682
:seealso: start_write_group.
684
A token should be passed in if you know that you have locked the object
685
some other way, and need to synchronise this object's state with that
688
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
690
result = self.control_files.lock_write(token=token)
695
self.control_files.lock_read()
698
def get_physical_lock_status(self):
699
return self.control_files.get_physical_lock_status()
701
def leave_lock_in_place(self):
702
"""Tell this repository not to release the physical lock when this
705
If lock_write doesn't return a token, then this method is not supported.
707
self.control_files.leave_in_place()
709
def dont_leave_lock_in_place(self):
710
"""Tell this repository to release the physical lock when this
711
object is unlocked, even if it didn't originally acquire it.
713
If lock_write doesn't return a token, then this method is not supported.
715
self.control_files.dont_leave_in_place()
718
def gather_stats(self, revid=None, committers=None):
719
"""Gather statistics from a revision id.
721
:param revid: The revision id to gather statistics from, if None, then
722
no revision specific statistics are gathered.
723
:param committers: Optional parameter controlling whether to grab
724
a count of committers from the revision specific statistics.
725
:return: A dictionary of statistics. Currently this contains:
726
committers: The number of committers if requested.
727
firstrev: A tuple with timestamp, timezone for the penultimate left
728
most ancestor of revid, if revid is not the NULL_REVISION.
729
latestrev: A tuple with timestamp, timezone for revid, if revid is
730
not the NULL_REVISION.
731
revisions: The total revision count in the repository.
732
size: An estimate disk size of the repository in bytes.
735
if revid and committers:
736
result['committers'] = 0
737
if revid and revid != _mod_revision.NULL_REVISION:
739
all_committers = set()
740
revisions = self.get_ancestry(revid)
741
# pop the leading None
743
first_revision = None
745
# ignore the revisions in the middle - just grab first and last
746
revisions = revisions[0], revisions[-1]
747
for revision in self.get_revisions(revisions):
748
if not first_revision:
749
first_revision = revision
751
all_committers.add(revision.committer)
752
last_revision = revision
754
result['committers'] = len(all_committers)
755
result['firstrev'] = (first_revision.timestamp,
756
first_revision.timezone)
757
result['latestrev'] = (last_revision.timestamp,
758
last_revision.timezone)
760
# now gather global repository information
761
# XXX: This is available for many repos regardless of listability.
762
if self.bzrdir.root_transport.listable():
763
# XXX: do we want to __define len__() ?
764
result['revisions'] = len(self.revisions.keys())
768
def find_branches(self, using=False):
769
"""Find branches underneath this repository.
771
This will include branches inside other branches.
773
:param using: If True, list only branches using this repository.
775
if using and not self.is_shared():
777
return [self.bzrdir.open_branch()]
778
except errors.NotBranchError:
780
class Evaluator(object):
783
self.first_call = True
785
def __call__(self, bzrdir):
786
# On the first call, the parameter is always the bzrdir
787
# containing the current repo.
788
if not self.first_call:
790
repository = bzrdir.open_repository()
791
except errors.NoRepositoryPresent:
794
return False, (None, repository)
795
self.first_call = False
797
value = (bzrdir.open_branch(), None)
798
except errors.NotBranchError:
803
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
804
self.bzrdir.root_transport, evaluate=Evaluator()):
805
if branch is not None:
806
branches.append(branch)
807
if not using and repository is not None:
808
branches.extend(repository.find_branches())
812
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
813
"""Return the revision ids that other has that this does not.
815
These are returned in topological order.
817
revision_id: only return revision ids included by revision_id.
819
return InterRepository.get(other, self).search_missing_revision_ids(
820
revision_id, find_ghosts)
822
@deprecated_method(symbol_versioning.one_two)
824
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
825
"""Return the revision ids that other has that this does not.
827
These are returned in topological order.
829
revision_id: only return revision ids included by revision_id.
831
keys = self.search_missing_revision_ids(
832
other, revision_id, find_ghosts).get_keys()
835
parents = other.get_graph().get_parent_map(keys)
838
return tsort.topo_sort(parents)
842
"""Open the repository rooted at base.
844
For instance, if the repository is at URL/.bzr/repository,
845
Repository.open(URL) -> a Repository instance.
847
control = bzrdir.BzrDir.open(base)
848
return control.open_repository()
850
def copy_content_into(self, destination, revision_id=None):
851
"""Make a complete copy of the content in self into destination.
853
This is a destructive operation! Do not use it on existing
856
return InterRepository.get(self, destination).copy_content(revision_id)
858
def commit_write_group(self):
859
"""Commit the contents accrued within the current write group.
861
:seealso: start_write_group.
863
if self._write_group is not self.get_transaction():
864
# has an unlock or relock occured ?
865
raise errors.BzrError('mismatched lock context %r and '
867
(self.get_transaction(), self._write_group))
868
self._commit_write_group()
869
self._write_group = None
871
def _commit_write_group(self):
872
"""Template method for per-repository write group cleanup.
874
This is called before the write group is considered to be
875
finished and should ensure that all data handed to the repository
876
for writing during the write group is safely committed (to the
877
extent possible considering file system caching etc).
880
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
881
"""Fetch the content required to construct revision_id from source.
883
If revision_id is None all content is copied.
884
:param find_ghosts: Find and copy revisions in the source that are
885
ghosts in the target (and not reachable directly by walking out to
886
the first-present revision in target from revision_id).
888
# fast path same-url fetch operations
889
if self.has_same_location(source):
890
# check that last_revision is in 'from' and then return a
892
if (revision_id is not None and
893
not _mod_revision.is_null(revision_id)):
894
self.get_revision(revision_id)
896
inter = InterRepository.get(source, self)
898
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
899
except NotImplementedError:
900
raise errors.IncompatibleRepositories(source, self)
902
def create_bundle(self, target, base, fileobj, format=None):
903
return serializer.write_bundle(self, target, base, fileobj, format)
905
def get_commit_builder(self, branch, parents, config, timestamp=None,
906
timezone=None, committer=None, revprops=None,
908
"""Obtain a CommitBuilder for this repository.
910
:param branch: Branch to commit to.
911
:param parents: Revision ids of the parents of the new revision.
912
:param config: Configuration to use.
913
:param timestamp: Optional timestamp recorded for commit.
914
:param timezone: Optional timezone for timestamp.
915
:param committer: Optional committer to set for commit.
916
:param revprops: Optional dictionary of revision properties.
917
:param revision_id: Optional revision id.
919
result = self._commit_builder_class(self, parents, config,
920
timestamp, timezone, committer, revprops, revision_id)
921
self.start_write_group()
925
if (self.control_files._lock_count == 1 and
926
self.control_files._lock_mode == 'w'):
927
if self._write_group is not None:
928
self.abort_write_group()
929
self.control_files.unlock()
930
raise errors.BzrError(
931
'Must end write groups before releasing write locks.')
932
self.control_files.unlock()
935
def clone(self, a_bzrdir, revision_id=None):
936
"""Clone this repository into a_bzrdir using the current format.
938
Currently no check is made that the format of this repository and
939
the bzrdir format are compatible. FIXME RBC 20060201.
941
:return: The newly created destination repository.
943
# TODO: deprecate after 0.16; cloning this with all its settings is
944
# probably not very useful -- mbp 20070423
945
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
946
self.copy_content_into(dest_repo, revision_id)
949
def start_write_group(self):
950
"""Start a write group in the repository.
952
Write groups are used by repositories which do not have a 1:1 mapping
953
between file ids and backend store to manage the insertion of data from
954
both fetch and commit operations.
956
A write lock is required around the start_write_group/commit_write_group
957
for the support of lock-requiring repository formats.
959
One can only insert data into a repository inside a write group.
963
if not self.is_write_locked():
964
raise errors.NotWriteLocked(self)
965
if self._write_group:
966
raise errors.BzrError('already in a write group')
967
self._start_write_group()
968
# so we can detect unlock/relock - the write group is now entered.
969
self._write_group = self.get_transaction()
971
def _start_write_group(self):
972
"""Template method for per-repository write group startup.
974
This is called before the write group is considered to be
979
def sprout(self, to_bzrdir, revision_id=None):
980
"""Create a descendent repository for new development.
982
Unlike clone, this does not copy the settings of the repository.
984
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
985
dest_repo.fetch(self, revision_id=revision_id)
988
def _create_sprouting_repo(self, a_bzrdir, shared):
989
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
990
# use target default format.
991
dest_repo = a_bzrdir.create_repository()
993
# Most control formats need the repository to be specifically
994
# created, but on some old all-in-one formats it's not needed
996
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
997
except errors.UninitializableFormat:
998
dest_repo = a_bzrdir.open_repository()
1002
def has_revision(self, revision_id):
1003
"""True if this repository has a copy of the revision."""
1004
return revision_id in self.has_revisions((revision_id,))
1007
def has_revisions(self, revision_ids):
1008
"""Probe to find out the presence of multiple revisions.
1010
:param revision_ids: An iterable of revision_ids.
1011
:return: A set of the revision_ids that were present.
1013
parent_map = self.revisions.get_parent_map(
1014
[(rev_id,) for rev_id in revision_ids])
1016
if _mod_revision.NULL_REVISION in revision_ids:
1017
result.add(_mod_revision.NULL_REVISION)
1018
result.update([key[0] for key in parent_map])
1022
def get_revision(self, revision_id):
1023
"""Return the Revision object for a named revision."""
1024
return self.get_revisions([revision_id])[0]
1027
def get_revision_reconcile(self, revision_id):
1028
"""'reconcile' helper routine that allows access to a revision always.
1030
This variant of get_revision does not cross check the weave graph
1031
against the revision one as get_revision does: but it should only
1032
be used by reconcile, or reconcile-alike commands that are correcting
1033
or testing the revision graph.
1035
return self._get_revisions([revision_id])[0]
1038
def get_revisions(self, revision_ids):
1039
"""Get many revisions at once."""
1040
return self._get_revisions(revision_ids)
1043
def _get_revisions(self, revision_ids):
1044
"""Core work logic to get many revisions without sanity checks."""
1045
for rev_id in revision_ids:
1046
if not rev_id or not isinstance(rev_id, basestring):
1047
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1048
keys = [(key,) for key in revision_ids]
1049
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1051
for record in stream:
1052
if record.storage_kind == 'absent':
1053
raise errors.NoSuchRevision(self, record.key[0])
1054
text = record.get_bytes_as('fulltext')
1055
rev = self._serializer.read_revision_from_string(text)
1056
revs[record.key[0]] = rev
1057
return [revs[revid] for revid in revision_ids]
1060
def get_revision_xml(self, revision_id):
1061
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1062
# would have already do it.
1063
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1064
rev = self.get_revision(revision_id)
1065
rev_tmp = StringIO()
1066
# the current serializer..
1067
self._serializer.write_revision(rev, rev_tmp)
1069
return rev_tmp.getvalue()
1071
def get_deltas_for_revisions(self, revisions):
1072
"""Produce a generator of revision deltas.
1074
Note that the input is a sequence of REVISIONS, not revision_ids.
1075
Trees will be held in memory until the generator exits.
1076
Each delta is relative to the revision's lefthand predecessor.
1078
required_trees = set()
1079
for revision in revisions:
1080
required_trees.add(revision.revision_id)
1081
required_trees.update(revision.parent_ids[:1])
1082
trees = dict((t.get_revision_id(), t) for
1083
t in self.revision_trees(required_trees))
1084
for revision in revisions:
1085
if not revision.parent_ids:
1086
old_tree = self.revision_tree(None)
1088
old_tree = trees[revision.parent_ids[0]]
1089
yield trees[revision.revision_id].changes_from(old_tree)
1092
def get_revision_delta(self, revision_id):
1093
"""Return the delta for one revision.
1095
The delta is relative to the left-hand predecessor of the
1098
r = self.get_revision(revision_id)
1099
return list(self.get_deltas_for_revisions([r]))[0]
1102
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1103
signature = gpg_strategy.sign(plaintext)
1104
self.add_signature_text(revision_id, signature)
1107
def add_signature_text(self, revision_id, signature):
1108
self.signatures.add_lines((revision_id,), (),
1109
osutils.split_lines(signature))
1111
def find_text_key_references(self):
1112
"""Find the text key references within the repository.
1114
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
1115
revision_ids. Each altered file-ids has the exact revision_ids that
1116
altered it listed explicitly.
1117
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1118
to whether they were referred to by the inventory of the
1119
revision_id that they contain. The inventory texts from all present
1120
revision ids are assessed to generate this report.
1122
revision_keys = self.revisions.keys()
1123
w = self.inventories
1124
pb = ui.ui_factory.nested_progress_bar()
1126
return self._find_text_key_references_from_xml_inventory_lines(
1127
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1131
def _find_text_key_references_from_xml_inventory_lines(self,
1133
"""Core routine for extracting references to texts from inventories.
1135
This performs the translation of xml lines to revision ids.
1137
:param line_iterator: An iterator of lines, origin_version_id
1138
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1139
to whether they were referred to by the inventory of the
1140
revision_id that they contain. Note that if that revision_id was
1141
not part of the line_iterator's output then False will be given -
1142
even though it may actually refer to that key.
1144
if not self._serializer.support_altered_by_hack:
1145
raise AssertionError(
1146
"_find_text_key_references_from_xml_inventory_lines only "
1147
"supported for branches which store inventory as unnested xml"
1148
", not on %r" % self)
1151
# this code needs to read every new line in every inventory for the
1152
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1153
# not present in one of those inventories is unnecessary but not
1154
# harmful because we are filtering by the revision id marker in the
1155
# inventory lines : we only select file ids altered in one of those
1156
# revisions. We don't need to see all lines in the inventory because
1157
# only those added in an inventory in rev X can contain a revision=X
1159
unescape_revid_cache = {}
1160
unescape_fileid_cache = {}
1162
# jam 20061218 In a big fetch, this handles hundreds of thousands
1163
# of lines, so it has had a lot of inlining and optimizing done.
1164
# Sorry that it is a little bit messy.
1165
# Move several functions to be local variables, since this is a long
1167
search = self._file_ids_altered_regex.search
1168
unescape = _unescape_xml
1169
setdefault = result.setdefault
1170
for line, line_key in line_iterator:
1171
match = search(line)
1174
# One call to match.group() returning multiple items is quite a
1175
# bit faster than 2 calls to match.group() each returning 1
1176
file_id, revision_id = match.group('file_id', 'revision_id')
1178
# Inlining the cache lookups helps a lot when you make 170,000
1179
# lines and 350k ids, versus 8.4 unique ids.
1180
# Using a cache helps in 2 ways:
1181
# 1) Avoids unnecessary decoding calls
1182
# 2) Re-uses cached strings, which helps in future set and
1184
# (2) is enough that removing encoding entirely along with
1185
# the cache (so we are using plain strings) results in no
1186
# performance improvement.
1188
revision_id = unescape_revid_cache[revision_id]
1190
unescaped = unescape(revision_id)
1191
unescape_revid_cache[revision_id] = unescaped
1192
revision_id = unescaped
1194
# Note that unconditionally unescaping means that we deserialise
1195
# every fileid, which for general 'pull' is not great, but we don't
1196
# really want to have some many fulltexts that this matters anyway.
1199
file_id = unescape_fileid_cache[file_id]
1201
unescaped = unescape(file_id)
1202
unescape_fileid_cache[file_id] = unescaped
1205
key = (file_id, revision_id)
1206
setdefault(key, False)
1207
if revision_id == line_key[-1]:
1211
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1213
"""Helper routine for fileids_altered_by_revision_ids.
1215
This performs the translation of xml lines to revision ids.
1217
:param line_iterator: An iterator of lines, origin_version_id
1218
:param revision_ids: The revision ids to filter for. This should be a
1219
set or other type which supports efficient __contains__ lookups, as
1220
the revision id from each parsed line will be looked up in the
1221
revision_ids filter.
1222
:return: a dictionary mapping altered file-ids to an iterable of
1223
revision_ids. Each altered file-ids has the exact revision_ids that
1224
altered it listed explicitly.
1227
setdefault = result.setdefault
1229
self._find_text_key_references_from_xml_inventory_lines(
1230
line_iterator).iterkeys():
1231
# once data is all ensured-consistent; then this is
1232
# if revision_id == version_id
1233
if key[-1:] in revision_ids:
1234
setdefault(key[0], set()).add(key[-1])
1237
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1238
"""Find the file ids and versions affected by revisions.
1240
:param revisions: an iterable containing revision ids.
1241
:param _inv_weave: The inventory weave from this repository or None.
1242
If None, the inventory weave will be opened automatically.
1243
:return: a dictionary mapping altered file-ids to an iterable of
1244
revision_ids. Each altered file-ids has the exact revision_ids that
1245
altered it listed explicitly.
1247
selected_keys = set((revid,) for revid in revision_ids)
1248
w = _inv_weave or self.inventories
1249
pb = ui.ui_factory.nested_progress_bar()
1251
return self._find_file_ids_from_xml_inventory_lines(
1252
w.iter_lines_added_or_present_in_keys(
1253
selected_keys, pb=pb),
1258
def iter_files_bytes(self, desired_files):
1259
"""Iterate through file versions.
1261
Files will not necessarily be returned in the order they occur in
1262
desired_files. No specific order is guaranteed.
1264
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1265
value supplied by the caller as part of desired_files. It should
1266
uniquely identify the file version in the caller's context. (Examples:
1267
an index number or a TreeTransform trans_id.)
1269
bytes_iterator is an iterable of bytestrings for the file. The
1270
kind of iterable and length of the bytestrings are unspecified, but for
1271
this implementation, it is a list of bytes produced by
1272
VersionedFile.get_record_stream().
1274
:param desired_files: a list of (file_id, revision_id, identifier)
1277
transaction = self.get_transaction()
1279
for file_id, revision_id, callable_data in desired_files:
1280
text_keys[(file_id, revision_id)] = callable_data
1281
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1282
if record.storage_kind == 'absent':
1283
raise errors.RevisionNotPresent(record.key, self)
1284
yield text_keys[record.key], record.get_bytes_as('fulltext')
1286
def _generate_text_key_index(self, text_key_references=None,
1288
"""Generate a new text key index for the repository.
1290
This is an expensive function that will take considerable time to run.
1292
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1293
list of parents, also text keys. When a given key has no parents,
1294
the parents list will be [NULL_REVISION].
1296
# All revisions, to find inventory parents.
1297
if ancestors is None:
1298
graph = self.get_graph()
1299
ancestors = graph.get_parent_map(self.all_revision_ids())
1300
if text_key_references is None:
1301
text_key_references = self.find_text_key_references()
1302
pb = ui.ui_factory.nested_progress_bar()
1304
return self._do_generate_text_key_index(ancestors,
1305
text_key_references, pb)
1309
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1310
"""Helper for _generate_text_key_index to avoid deep nesting."""
1311
revision_order = tsort.topo_sort(ancestors)
1312
invalid_keys = set()
1314
for revision_id in revision_order:
1315
revision_keys[revision_id] = set()
1316
text_count = len(text_key_references)
1317
# a cache of the text keys to allow reuse; costs a dict of all the
1318
# keys, but saves a 2-tuple for every child of a given key.
1320
for text_key, valid in text_key_references.iteritems():
1322
invalid_keys.add(text_key)
1324
revision_keys[text_key[1]].add(text_key)
1325
text_key_cache[text_key] = text_key
1326
del text_key_references
1328
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1329
NULL_REVISION = _mod_revision.NULL_REVISION
1330
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1331
# too small for large or very branchy trees. However, for 55K path
1332
# trees, it would be easy to use too much memory trivially. Ideally we
1333
# could gauge this by looking at available real memory etc, but this is
1334
# always a tricky proposition.
1335
inventory_cache = lru_cache.LRUCache(10)
1336
batch_size = 10 # should be ~150MB on a 55K path tree
1337
batch_count = len(revision_order) / batch_size + 1
1339
pb.update("Calculating text parents.", processed_texts, text_count)
1340
for offset in xrange(batch_count):
1341
to_query = revision_order[offset * batch_size:(offset + 1) *
1345
for rev_tree in self.revision_trees(to_query):
1346
revision_id = rev_tree.get_revision_id()
1347
parent_ids = ancestors[revision_id]
1348
for text_key in revision_keys[revision_id]:
1349
pb.update("Calculating text parents.", processed_texts)
1350
processed_texts += 1
1351
candidate_parents = []
1352
for parent_id in parent_ids:
1353
parent_text_key = (text_key[0], parent_id)
1355
check_parent = parent_text_key not in \
1356
revision_keys[parent_id]
1358
# the parent parent_id is a ghost:
1359
check_parent = False
1360
# truncate the derived graph against this ghost.
1361
parent_text_key = None
1363
# look at the parent commit details inventories to
1364
# determine possible candidates in the per file graph.
1367
inv = inventory_cache[parent_id]
1369
inv = self.revision_tree(parent_id).inventory
1370
inventory_cache[parent_id] = inv
1371
parent_entry = inv._byid.get(text_key[0], None)
1372
if parent_entry is not None:
1374
text_key[0], parent_entry.revision)
1376
parent_text_key = None
1377
if parent_text_key is not None:
1378
candidate_parents.append(
1379
text_key_cache[parent_text_key])
1380
parent_heads = text_graph.heads(candidate_parents)
1381
new_parents = list(parent_heads)
1382
new_parents.sort(key=lambda x:candidate_parents.index(x))
1383
if new_parents == []:
1384
new_parents = [NULL_REVISION]
1385
text_index[text_key] = new_parents
1387
for text_key in invalid_keys:
1388
text_index[text_key] = [NULL_REVISION]
1391
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1392
"""Get an iterable listing the keys of all the data introduced by a set
1395
The keys will be ordered so that the corresponding items can be safely
1396
fetched and inserted in that order.
1398
:returns: An iterable producing tuples of (knit-kind, file-id,
1399
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1400
'revisions'. file-id is None unless knit-kind is 'file'.
1402
# XXX: it's a bit weird to control the inventory weave caching in this
1403
# generator. Ideally the caching would be done in fetch.py I think. Or
1404
# maybe this generator should explicitly have the contract that it
1405
# should not be iterated until the previously yielded item has been
1407
inv_w = self.inventories
1409
# file ids that changed
1410
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
1412
num_file_ids = len(file_ids)
1413
for file_id, altered_versions in file_ids.iteritems():
1414
if _files_pb is not None:
1415
_files_pb.update("fetch texts", count, num_file_ids)
1417
yield ("file", file_id, altered_versions)
1418
# We're done with the files_pb. Note that it finished by the caller,
1419
# just as it was created by the caller.
1423
yield ("inventory", None, revision_ids)
1426
revisions_with_signatures = set()
1427
for rev_id in revision_ids:
1429
self.get_signature_text(rev_id)
1430
except errors.NoSuchRevision:
1434
revisions_with_signatures.add(rev_id)
1435
yield ("signatures", None, revisions_with_signatures)
1438
yield ("revisions", None, revision_ids)
1441
def get_inventory(self, revision_id):
1442
"""Get Inventory object by revision id."""
1443
return self.iter_inventories([revision_id]).next()
1445
def iter_inventories(self, revision_ids):
1446
"""Get many inventories by revision_ids.
1448
This will buffer some or all of the texts used in constructing the
1449
inventories in memory, but will only parse a single inventory at a
1452
:return: An iterator of inventories.
1454
if ((None in revision_ids)
1455
or (_mod_revision.NULL_REVISION in revision_ids)):
1456
raise ValueError('cannot get null revision inventory')
1457
return self._iter_inventories(revision_ids)
1459
def _iter_inventories(self, revision_ids):
1460
"""single-document based inventory iteration."""
1461
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1462
yield self.deserialise_inventory(revision_id, text)
1464
def _iter_inventory_xmls(self, revision_ids):
1465
keys = [(revision_id,) for revision_id in revision_ids]
1466
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1468
for record in stream:
1469
if record.storage_kind != 'absent':
1470
texts[record.key] = record.get_bytes_as('fulltext')
1472
raise errors.NoSuchRevision(self, record.key)
1474
yield texts[key], key[-1]
1476
def deserialise_inventory(self, revision_id, xml):
1477
"""Transform the xml into an inventory object.
1479
:param revision_id: The expected revision id of the inventory.
1480
:param xml: A serialised inventory.
1482
result = self._serializer.read_inventory_from_string(xml, revision_id)
1483
if result.revision_id != revision_id:
1484
raise AssertionError('revision id mismatch %s != %s' % (
1485
result.revision_id, revision_id))
1488
def serialise_inventory(self, inv):
1489
return self._serializer.write_inventory_to_string(inv)
1491
def _serialise_inventory_to_lines(self, inv):
1492
return self._serializer.write_inventory_to_lines(inv)
1494
def get_serializer_format(self):
1495
return self._serializer.format_num
1498
def get_inventory_xml(self, revision_id):
1499
"""Get inventory XML as a file object."""
1500
texts = self._iter_inventory_xmls([revision_id])
1502
text, revision_id = texts.next()
1503
except StopIteration:
1504
raise errors.HistoryMissing(self, 'inventory', revision_id)
1508
def get_inventory_sha1(self, revision_id):
1509
"""Return the sha1 hash of the inventory entry
1511
return self.get_revision(revision_id).inventory_sha1
1513
def iter_reverse_revision_history(self, revision_id):
1514
"""Iterate backwards through revision ids in the lefthand history
1516
:param revision_id: The revision id to start with. All its lefthand
1517
ancestors will be traversed.
1519
graph = self.get_graph()
1520
next_id = revision_id
1522
if next_id in (None, _mod_revision.NULL_REVISION):
1525
# Note: The following line may raise KeyError in the event of
1526
# truncated history. We decided not to have a try:except:raise
1527
# RevisionNotPresent here until we see a use for it, because of the
1528
# cost in an inner loop that is by its very nature O(history).
1529
# Robert Collins 20080326
1530
parents = graph.get_parent_map([next_id])[next_id]
1531
if len(parents) == 0:
1534
next_id = parents[0]
1537
def get_revision_inventory(self, revision_id):
1538
"""Return inventory of a past revision."""
1539
# TODO: Unify this with get_inventory()
1540
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1541
# must be the same as its revision, so this is trivial.
1542
if revision_id is None:
1543
# This does not make sense: if there is no revision,
1544
# then it is the current tree inventory surely ?!
1545
# and thus get_root_id() is something that looks at the last
1546
# commit on the branch, and the get_root_id is an inventory check.
1547
raise NotImplementedError
1548
# return Inventory(self.get_root_id())
1550
return self.get_inventory(revision_id)
1553
def is_shared(self):
1554
"""Return True if this repository is flagged as a shared repository."""
1555
raise NotImplementedError(self.is_shared)
1558
def reconcile(self, other=None, thorough=False):
1559
"""Reconcile this repository."""
1560
from bzrlib.reconcile import RepoReconciler
1561
reconciler = RepoReconciler(self, thorough=thorough)
1562
reconciler.reconcile()
1565
def _refresh_data(self):
1566
"""Helper called from lock_* to ensure coherency with disk.
1568
The default implementation does nothing; it is however possible
1569
for repositories to maintain loaded indices across multiple locks
1570
by checking inside their implementation of this method to see
1571
whether their indices are still valid. This depends of course on
1572
the disk format being validatable in this manner.
1576
def revision_tree(self, revision_id):
1577
"""Return Tree for a revision on this branch.
1579
`revision_id` may be None for the empty tree revision.
1581
# TODO: refactor this to use an existing revision object
1582
# so we don't need to read it in twice.
1583
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1584
return RevisionTree(self, Inventory(root_id=None),
1585
_mod_revision.NULL_REVISION)
1587
inv = self.get_revision_inventory(revision_id)
1588
return RevisionTree(self, inv, revision_id)
1590
def revision_trees(self, revision_ids):
1591
"""Return Tree for a revision on this branch.
1593
`revision_id` may not be None or 'null:'"""
1594
inventories = self.iter_inventories(revision_ids)
1595
for inv in inventories:
1596
yield RevisionTree(self, inv, inv.revision_id)
1599
def get_ancestry(self, revision_id, topo_sorted=True):
1600
"""Return a list of revision-ids integrated by a revision.
1602
The first element of the list is always None, indicating the origin
1603
revision. This might change when we have history horizons, or
1604
perhaps we should have a new API.
1606
This is topologically sorted.
1608
if _mod_revision.is_null(revision_id):
1610
if not self.has_revision(revision_id):
1611
raise errors.NoSuchRevision(self, revision_id)
1612
graph = self.get_graph()
1614
search = graph._make_breadth_first_searcher([revision_id])
1617
found, ghosts = search.next_with_ghosts()
1618
except StopIteration:
1621
if _mod_revision.NULL_REVISION in keys:
1622
keys.remove(_mod_revision.NULL_REVISION)
1624
parent_map = graph.get_parent_map(keys)
1625
keys = tsort.topo_sort(parent_map)
1626
return [None] + list(keys)
1629
"""Compress the data within the repository.
1631
This operation only makes sense for some repository types. For other
1632
types it should be a no-op that just returns.
1634
This stub method does not require a lock, but subclasses should use
1635
@needs_write_lock as this is a long running call its reasonable to
1636
implicitly lock for the user.
1640
def print_file(self, file, revision_id):
1641
"""Print `file` to stdout.
1643
FIXME RBC 20060125 as John Meinel points out this is a bad api
1644
- it writes to stdout, it assumes that that is valid etc. Fix
1645
by creating a new more flexible convenience function.
1647
tree = self.revision_tree(revision_id)
1648
# use inventory as it was in that revision
1649
file_id = tree.inventory.path2id(file)
1651
# TODO: jam 20060427 Write a test for this code path
1652
# it had a bug in it, and was raising the wrong
1654
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1655
tree.print_file(file_id)
1657
def get_transaction(self):
1658
return self.control_files.get_transaction()
1660
@deprecated_method(symbol_versioning.one_five)
1661
def revision_parents(self, revision_id):
1662
return self.get_revision(revision_id).parent_ids
1664
@deprecated_method(symbol_versioning.one_one)
1665
def get_parents(self, revision_ids):
1666
"""See StackedParentsProvider.get_parents"""
1667
parent_map = self.get_parent_map(revision_ids)
1668
return [parent_map.get(r, None) for r in revision_ids]
1670
def get_parent_map(self, keys):
1671
"""See graph._StackedParentsProvider.get_parent_map"""
1673
for revision_id in keys:
1674
if revision_id == _mod_revision.NULL_REVISION:
1675
parent_map[revision_id] = ()
1678
parent_id_list = self.get_revision(revision_id).parent_ids
1679
except errors.NoSuchRevision:
1682
if len(parent_id_list) == 0:
1683
parent_ids = (_mod_revision.NULL_REVISION,)
1685
parent_ids = tuple(parent_id_list)
1686
parent_map[revision_id] = parent_ids
1689
def _make_parents_provider(self):
1692
def get_graph(self, other_repository=None):
1693
"""Return the graph walker for this repository format"""
1694
parents_provider = self._make_parents_provider()
1695
if (other_repository is not None and
1696
not self.has_same_location(other_repository)):
1697
parents_provider = graph._StackedParentsProvider(
1698
[parents_provider, other_repository._make_parents_provider()])
1699
return graph.Graph(parents_provider)
1701
def _get_versioned_file_checker(self):
1702
"""Return an object suitable for checking versioned files."""
1703
return _VersionedFileChecker(self)
1705
def revision_ids_to_search_result(self, result_set):
1706
"""Convert a set of revision ids to a graph SearchResult."""
1707
result_parents = set()
1708
for parents in self.get_graph().get_parent_map(
1709
result_set).itervalues():
1710
result_parents.update(parents)
1711
included_keys = result_set.intersection(result_parents)
1712
start_keys = result_set.difference(included_keys)
1713
exclude_keys = result_parents.difference(result_set)
1714
result = graph.SearchResult(start_keys, exclude_keys,
1715
len(result_set), result_set)
1719
def set_make_working_trees(self, new_value):
1720
"""Set the policy flag for making working trees when creating branches.
1722
This only applies to branches that use this repository.
1724
The default is 'True'.
1725
:param new_value: True to restore the default, False to disable making
1728
raise NotImplementedError(self.set_make_working_trees)
1730
def make_working_trees(self):
1731
"""Returns the policy for making working trees on new branches."""
1732
raise NotImplementedError(self.make_working_trees)
1735
def sign_revision(self, revision_id, gpg_strategy):
1736
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1737
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1740
def has_signature_for_revision_id(self, revision_id):
1741
"""Query for a revision signature for revision_id in the repository."""
1742
if not self.has_revision(revision_id):
1743
raise errors.NoSuchRevision(self, revision_id)
1744
sig_present = (1 == len(
1745
self.signatures.get_parent_map([(revision_id,)])))
1749
def get_signature_text(self, revision_id):
1750
"""Return the text for a signature."""
1751
stream = self.signatures.get_record_stream([(revision_id,)],
1753
record = stream.next()
1754
if record.storage_kind == 'absent':
1755
raise errors.NoSuchRevision(self, revision_id)
1756
return record.get_bytes_as('fulltext')
1759
def check(self, revision_ids=None):
1760
"""Check consistency of all history of given revision_ids.
1762
Different repository implementations should override _check().
1764
:param revision_ids: A non-empty list of revision_ids whose ancestry
1765
will be checked. Typically the last revision_id of a branch.
1767
return self._check(revision_ids)
1769
def _check(self, revision_ids):
1770
result = check.Check(self)
1774
def _warn_if_deprecated(self):
1775
global _deprecation_warning_done
1776
if _deprecation_warning_done:
1778
_deprecation_warning_done = True
1779
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1780
% (self._format, self.bzrdir.transport.base))
1782
def supports_rich_root(self):
1783
return self._format.rich_root_data
1785
def _check_ascii_revisionid(self, revision_id, method):
1786
"""Private helper for ascii-only repositories."""
1787
# weave repositories refuse to store revisionids that are non-ascii.
1788
if revision_id is not None:
1789
# weaves require ascii revision ids.
1790
if isinstance(revision_id, unicode):
1792
revision_id.encode('ascii')
1793
except UnicodeEncodeError:
1794
raise errors.NonAsciiRevisionId(method, self)
1797
revision_id.decode('ascii')
1798
except UnicodeDecodeError:
1799
raise errors.NonAsciiRevisionId(method, self)
1801
def revision_graph_can_have_wrong_parents(self):
1802
"""Is it possible for this repository to have a revision graph with
1805
If True, then this repository must also implement
1806
_find_inconsistent_revision_parents so that check and reconcile can
1807
check for inconsistencies before proceeding with other checks that may
1808
depend on the revision index being consistent.
1810
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1813
# remove these delegates a while after bzr 0.15
1814
def __make_delegated(name, from_module):
1815
def _deprecated_repository_forwarder():
1816
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1817
% (name, from_module),
1820
m = __import__(from_module, globals(), locals(), [name])
1822
return getattr(m, name)
1823
except AttributeError:
1824
raise AttributeError('module %s has no name %s'
1826
globals()[name] = _deprecated_repository_forwarder
1829
'AllInOneRepository',
1830
'WeaveMetaDirRepository',
1831
'PreSplitOutRepositoryFormat',
1832
'RepositoryFormat4',
1833
'RepositoryFormat5',
1834
'RepositoryFormat6',
1835
'RepositoryFormat7',
1837
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1841
'RepositoryFormatKnit',
1842
'RepositoryFormatKnit1',
1844
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1847
def install_revision(repository, rev, revision_tree):
1848
"""Install all revision data into a repository."""
1849
install_revisions(repository, [(rev, revision_tree, None)])
1852
def install_revisions(repository, iterable, num_revisions=None, pb=None):
1853
"""Install all revision data into a repository.
1855
Accepts an iterable of revision, tree, signature tuples. The signature
1858
repository.start_write_group()
1860
for n, (revision, revision_tree, signature) in enumerate(iterable):
1861
_install_revision(repository, revision, revision_tree, signature)
1863
pb.update('Transferring revisions', n + 1, num_revisions)
1865
repository.abort_write_group()
1868
repository.commit_write_group()
1871
def _install_revision(repository, rev, revision_tree, signature):
1872
"""Install all revision data into a repository."""
1873
present_parents = []
1875
for p_id in rev.parent_ids:
1876
if repository.has_revision(p_id):
1877
present_parents.append(p_id)
1878
parent_trees[p_id] = repository.revision_tree(p_id)
1880
parent_trees[p_id] = repository.revision_tree(None)
1882
inv = revision_tree.inventory
1883
entries = inv.iter_entries()
1884
# backwards compatibility hack: skip the root id.
1885
if not repository.supports_rich_root():
1886
path, root = entries.next()
1887
if root.revision != rev.revision_id:
1888
raise errors.IncompatibleRevision(repr(repository))
1890
for path, ie in entries:
1891
text_keys[(ie.file_id, ie.revision)] = ie
1892
text_parent_map = repository.texts.get_parent_map(text_keys)
1893
missing_texts = set(text_keys) - set(text_parent_map)
1894
# Add the texts that are not already present
1895
for text_key in missing_texts:
1896
ie = text_keys[text_key]
1898
# FIXME: TODO: The following loop overlaps/duplicates that done by
1899
# commit to determine parents. There is a latent/real bug here where
1900
# the parents inserted are not those commit would do - in particular
1901
# they are not filtered by heads(). RBC, AB
1902
for revision, tree in parent_trees.iteritems():
1903
if ie.file_id not in tree:
1905
parent_id = tree.inventory[ie.file_id].revision
1906
if parent_id in text_parents:
1908
text_parents.append((ie.file_id, parent_id))
1909
lines = revision_tree.get_file(ie.file_id).readlines()
1910
repository.texts.add_lines(text_key, text_parents, lines)
1912
# install the inventory
1913
repository.add_inventory(rev.revision_id, inv, present_parents)
1914
except errors.RevisionAlreadyPresent:
1916
if signature is not None:
1917
repository.add_signature_text(rev.revision_id, signature)
1918
repository.add_revision(rev.revision_id, rev, inv)
1921
class MetaDirRepository(Repository):
1922
"""Repositories in the new meta-dir layout."""
1924
def __init__(self, _format, a_bzrdir, control_files):
1925
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
1926
dir_mode = self.control_files._dir_mode
1927
file_mode = self.control_files._file_mode
1930
def is_shared(self):
1931
"""Return True if this repository is flagged as a shared repository."""
1932
return self.control_files._transport.has('shared-storage')
1935
def set_make_working_trees(self, new_value):
1936
"""Set the policy flag for making working trees when creating branches.
1938
This only applies to branches that use this repository.
1940
The default is 'True'.
1941
:param new_value: True to restore the default, False to disable making
1946
self.control_files._transport.delete('no-working-trees')
1947
except errors.NoSuchFile:
1950
self.control_files.put_utf8('no-working-trees', '')
1952
def make_working_trees(self):
1953
"""Returns the policy for making working trees on new branches."""
1954
return not self.control_files._transport.has('no-working-trees')
1957
class MetaDirVersionedFileRepository(MetaDirRepository):
1958
"""Repositories in a meta-dir, that work via versioned file objects."""
1960
def __init__(self, _format, a_bzrdir, control_files):
1961
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
1965
class RepositoryFormatRegistry(registry.Registry):
1966
"""Registry of RepositoryFormats."""
1968
def get(self, format_string):
1969
r = registry.Registry.get(self, format_string)
1975
format_registry = RepositoryFormatRegistry()
1976
"""Registry of formats, indexed by their identifying format string.
1978
This can contain either format instances themselves, or classes/factories that
1979
can be called to obtain one.
1983
#####################################################################
1984
# Repository Formats
1986
class RepositoryFormat(object):
1987
"""A repository format.
1989
Formats provide three things:
1990
* An initialization routine to construct repository data on disk.
1991
* a format string which is used when the BzrDir supports versioned
1993
* an open routine which returns a Repository instance.
1995
There is one and only one Format subclass for each on-disk format. But
1996
there can be one Repository subclass that is used for several different
1997
formats. The _format attribute on a Repository instance can be used to
1998
determine the disk format.
2000
Formats are placed in an dict by their format string for reference
2001
during opening. These should be subclasses of RepositoryFormat
2004
Once a format is deprecated, just deprecate the initialize and open
2005
methods on the format class. Do not deprecate the object, as the
2006
object will be created every system load.
2008
Common instance attributes:
2009
_matchingbzrdir - the bzrdir format that the repository format was
2010
originally written to work with. This can be used if manually
2011
constructing a bzrdir and repository, or more commonly for test suite
2015
# Set to True or False in derived classes. True indicates that the format
2016
# supports ghosts gracefully.
2017
supports_ghosts = None
2018
# Can this repository be given external locations to lookup additional
2019
# data. Set to True or False in derived classes.
2020
supports_external_lookups = None
2023
return "<%s>" % self.__class__.__name__
2025
def __eq__(self, other):
2026
# format objects are generally stateless
2027
return isinstance(other, self.__class__)
2029
def __ne__(self, other):
2030
return not self == other
2033
def find_format(klass, a_bzrdir):
2034
"""Return the format for the repository object in a_bzrdir.
2036
This is used by bzr native formats that have a "format" file in
2037
the repository. Other methods may be used by different types of
2041
transport = a_bzrdir.get_repository_transport(None)
2042
format_string = transport.get("format").read()
2043
return format_registry.get(format_string)
2044
except errors.NoSuchFile:
2045
raise errors.NoRepositoryPresent(a_bzrdir)
2047
raise errors.UnknownFormatError(format=format_string,
2051
def register_format(klass, format):
2052
format_registry.register(format.get_format_string(), format)
2055
def unregister_format(klass, format):
2056
format_registry.remove(format.get_format_string())
2059
def get_default_format(klass):
2060
"""Return the current default format."""
2061
from bzrlib import bzrdir
2062
return bzrdir.format_registry.make_bzrdir('default').repository_format
2064
def get_format_string(self):
2065
"""Return the ASCII format string that identifies this format.
2067
Note that in pre format ?? repositories the format string is
2068
not permitted nor written to disk.
2070
raise NotImplementedError(self.get_format_string)
2072
def get_format_description(self):
2073
"""Return the short description for this format."""
2074
raise NotImplementedError(self.get_format_description)
2076
# TODO: this shouldn't be in the base class, it's specific to things that
2077
# use weaves or knits -- mbp 20070207
2078
def _get_versioned_file_store(self,
2083
versionedfile_class=None,
2084
versionedfile_kwargs={},
2086
if versionedfile_class is None:
2087
versionedfile_class = self._versionedfile_class
2088
weave_transport = control_files._transport.clone(name)
2089
dir_mode = control_files._dir_mode
2090
file_mode = control_files._file_mode
2091
return VersionedFileStore(weave_transport, prefixed=prefixed,
2093
file_mode=file_mode,
2094
versionedfile_class=versionedfile_class,
2095
versionedfile_kwargs=versionedfile_kwargs,
2098
def initialize(self, a_bzrdir, shared=False):
2099
"""Initialize a repository of this format in a_bzrdir.
2101
:param a_bzrdir: The bzrdir to put the new repository in it.
2102
:param shared: The repository should be initialized as a sharable one.
2103
:returns: The new repository object.
2105
This may raise UninitializableFormat if shared repository are not
2106
compatible the a_bzrdir.
2108
raise NotImplementedError(self.initialize)
2110
def is_supported(self):
2111
"""Is this format supported?
2113
Supported formats must be initializable and openable.
2114
Unsupported formats may not support initialization or committing or
2115
some other features depending on the reason for not being supported.
2119
def check_conversion_target(self, target_format):
2120
raise NotImplementedError(self.check_conversion_target)
2122
def open(self, a_bzrdir, _found=False):
2123
"""Return an instance of this format for the bzrdir a_bzrdir.
2125
_found is a private parameter, do not use it.
2127
raise NotImplementedError(self.open)
2130
class MetaDirRepositoryFormat(RepositoryFormat):
2131
"""Common base class for the new repositories using the metadir layout."""
2133
rich_root_data = False
2134
supports_tree_reference = False
2135
supports_external_lookups = False
2136
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2139
super(MetaDirRepositoryFormat, self).__init__()
2141
def _create_control_files(self, a_bzrdir):
2142
"""Create the required files and the initial control_files object."""
2143
# FIXME: RBC 20060125 don't peek under the covers
2144
# NB: no need to escape relative paths that are url safe.
2145
repository_transport = a_bzrdir.get_repository_transport(self)
2146
control_files = lockable_files.LockableFiles(repository_transport,
2147
'lock', lockdir.LockDir)
2148
control_files.create_lock()
2149
return control_files
2151
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2152
"""Upload the initial blank content."""
2153
control_files = self._create_control_files(a_bzrdir)
2154
control_files.lock_write()
2156
control_files._transport.mkdir_multi(dirs,
2157
mode=control_files._dir_mode)
2158
for file, content in files:
2159
control_files.put(file, content)
2160
for file, content in utf8_files:
2161
control_files.put_utf8(file, content)
2163
control_files.put_utf8('shared-storage', '')
2165
control_files.unlock()
2168
# formats which have no format string are not discoverable
2169
# and not independently creatable, so are not registered. They're
2170
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2171
# needed, it's constructed directly by the BzrDir. Non-native formats where
2172
# the repository is not separately opened are similar.
2174
format_registry.register_lazy(
2175
'Bazaar-NG Repository format 7',
2176
'bzrlib.repofmt.weaverepo',
2180
format_registry.register_lazy(
2181
'Bazaar-NG Knit Repository Format 1',
2182
'bzrlib.repofmt.knitrepo',
2183
'RepositoryFormatKnit1',
2186
format_registry.register_lazy(
2187
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2188
'bzrlib.repofmt.knitrepo',
2189
'RepositoryFormatKnit3',
2192
format_registry.register_lazy(
2193
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2194
'bzrlib.repofmt.knitrepo',
2195
'RepositoryFormatKnit4',
2198
# Pack-based formats. There is one format for pre-subtrees, and one for
2199
# post-subtrees to allow ease of testing.
2200
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
2201
format_registry.register_lazy(
2202
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2203
'bzrlib.repofmt.pack_repo',
2204
'RepositoryFormatKnitPack1',
2206
format_registry.register_lazy(
2207
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2208
'bzrlib.repofmt.pack_repo',
2209
'RepositoryFormatKnitPack3',
2211
format_registry.register_lazy(
2212
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2213
'bzrlib.repofmt.pack_repo',
2214
'RepositoryFormatKnitPack4',
2216
# Development formats.
2218
# development 0 - stub to introduce development versioning scheme.
2219
format_registry.register_lazy(
2220
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
2221
'bzrlib.repofmt.pack_repo',
2222
'RepositoryFormatPackDevelopment0',
2224
format_registry.register_lazy(
2225
("Bazaar development format 0 with subtree support "
2226
"(needs bzr.dev from before 1.3)\n"),
2227
'bzrlib.repofmt.pack_repo',
2228
'RepositoryFormatPackDevelopment0Subtree',
2230
# 1.3->1.4 go below here
2233
class InterRepository(InterObject):
2234
"""This class represents operations taking place between two repositories.
2236
Its instances have methods like copy_content and fetch, and contain
2237
references to the source and target repositories these operations can be
2240
Often we will provide convenience methods on 'repository' which carry out
2241
operations with another repository - they will always forward to
2242
InterRepository.get(other).method_name(parameters).
2246
"""The available optimised InterRepository types."""
2248
def copy_content(self, revision_id=None):
2249
raise NotImplementedError(self.copy_content)
2251
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2252
"""Fetch the content required to construct revision_id.
2254
The content is copied from self.source to self.target.
2256
:param revision_id: if None all content is copied, if NULL_REVISION no
2258
:param pb: optional progress bar to use for progress reports. If not
2259
provided a default one will be created.
2261
Returns the copied revision count and the failed revisions in a tuple:
2264
raise NotImplementedError(self.fetch)
2266
def _walk_to_common_revisions(self, revision_ids):
2267
"""Walk out from revision_ids in source to revisions target has.
2269
:param revision_ids: The start point for the search.
2270
:return: A set of revision ids.
2272
target_graph = self.target.get_graph()
2273
revision_ids = frozenset(revision_ids)
2274
if set(target_graph.get_parent_map(revision_ids)) == revision_ids:
2275
return graph.SearchResult(revision_ids, set(), 0, set())
2276
missing_revs = set()
2277
source_graph = self.source.get_graph()
2278
# ensure we don't pay silly lookup costs.
2279
searcher = source_graph._make_breadth_first_searcher(revision_ids)
2280
null_set = frozenset([_mod_revision.NULL_REVISION])
2283
next_revs, ghosts = searcher.next_with_ghosts()
2284
except StopIteration:
2286
if revision_ids.intersection(ghosts):
2287
absent_ids = set(revision_ids.intersection(ghosts))
2288
# If all absent_ids are present in target, no error is needed.
2289
absent_ids.difference_update(
2290
set(target_graph.get_parent_map(absent_ids)))
2292
raise errors.NoSuchRevision(self.source, absent_ids.pop())
2293
# we don't care about other ghosts as we can't fetch them and
2294
# haven't been asked to.
2295
next_revs = set(next_revs)
2296
# we always have NULL_REVISION present.
2297
have_revs = set(target_graph.get_parent_map(next_revs)).union(null_set)
2298
missing_revs.update(next_revs - have_revs)
2299
searcher.stop_searching_any(have_revs)
2300
return searcher.get_result()
2302
@deprecated_method(symbol_versioning.one_two)
2304
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2305
"""Return the revision ids that source has that target does not.
2307
These are returned in topological order.
2309
:param revision_id: only return revision ids included by this
2311
:param find_ghosts: If True find missing revisions in deep history
2312
rather than just finding the surface difference.
2314
return list(self.search_missing_revision_ids(
2315
revision_id, find_ghosts).get_keys())
2318
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2319
"""Return the revision ids that source has that target does not.
2321
:param revision_id: only return revision ids included by this
2323
:param find_ghosts: If True find missing revisions in deep history
2324
rather than just finding the surface difference.
2325
:return: A bzrlib.graph.SearchResult.
2327
# stop searching at found target revisions.
2328
if not find_ghosts and revision_id is not None:
2329
return self._walk_to_common_revisions([revision_id])
2330
# generic, possibly worst case, slow code path.
2331
target_ids = set(self.target.all_revision_ids())
2332
if revision_id is not None:
2333
source_ids = self.source.get_ancestry(revision_id)
2334
if source_ids[0] is not None:
2335
raise AssertionError()
2338
source_ids = self.source.all_revision_ids()
2339
result_set = set(source_ids).difference(target_ids)
2340
return self.source.revision_ids_to_search_result(result_set)
2343
def _same_model(source, target):
2344
"""True if source and target have the same data representation."""
2345
if source.supports_rich_root() != target.supports_rich_root():
2347
if source._serializer != target._serializer:
2352
class InterSameDataRepository(InterRepository):
2353
"""Code for converting between repositories that represent the same data.
2355
Data format and model must match for this to work.
2359
def _get_repo_format_to_test(self):
2360
"""Repository format for testing with.
2362
InterSameData can pull from subtree to subtree and from non-subtree to
2363
non-subtree, so we test this with the richest repository format.
2365
from bzrlib.repofmt import knitrepo
2366
return knitrepo.RepositoryFormatKnit3()
2369
def is_compatible(source, target):
2370
return InterRepository._same_model(source, target)
2373
def copy_content(self, revision_id=None):
2374
"""Make a complete copy of the content in self into destination.
2376
This copies both the repository's revision data, and configuration information
2377
such as the make_working_trees setting.
2379
This is a destructive operation! Do not use it on existing
2382
:param revision_id: Only copy the content needed to construct
2383
revision_id and its parents.
2386
self.target.set_make_working_trees(self.source.make_working_trees())
2387
except NotImplementedError:
2389
# but don't bother fetching if we have the needed data now.
2390
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2391
self.target.has_revision(revision_id)):
2393
self.target.fetch(self.source, revision_id=revision_id)
2396
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2397
"""See InterRepository.fetch()."""
2398
from bzrlib.fetch import GenericRepoFetcher
2399
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2400
self.source, self.source._format, self.target,
2401
self.target._format)
2402
f = GenericRepoFetcher(to_repository=self.target,
2403
from_repository=self.source,
2404
last_revision=revision_id,
2405
pb=pb, find_ghosts=find_ghosts)
2406
return f.count_copied, f.failed_revisions
2409
class InterWeaveRepo(InterSameDataRepository):
2410
"""Optimised code paths between Weave based repositories.
2412
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2413
implemented lazy inter-object optimisation.
2417
def _get_repo_format_to_test(self):
2418
from bzrlib.repofmt import weaverepo
2419
return weaverepo.RepositoryFormat7()
2422
def is_compatible(source, target):
2423
"""Be compatible with known Weave formats.
2425
We don't test for the stores being of specific types because that
2426
could lead to confusing results, and there is no need to be
2429
from bzrlib.repofmt.weaverepo import (
2435
return (isinstance(source._format, (RepositoryFormat5,
2437
RepositoryFormat7)) and
2438
isinstance(target._format, (RepositoryFormat5,
2440
RepositoryFormat7)))
2441
except AttributeError:
2445
def copy_content(self, revision_id=None):
2446
"""See InterRepository.copy_content()."""
2447
# weave specific optimised path:
2449
self.target.set_make_working_trees(self.source.make_working_trees())
2450
except (errors.RepositoryUpgradeRequired, NotImplemented):
2452
# FIXME do not peek!
2453
if self.source.control_files._transport.listable():
2454
pb = ui.ui_factory.nested_progress_bar()
2456
self.target.texts.insert_record_stream(
2457
self.source.texts.get_record_stream(
2458
self.source.texts.keys(), 'topological', False))
2459
pb.update('copying inventory', 0, 1)
2460
self.target.inventories.insert_record_stream(
2461
self.source.inventories.get_record_stream(
2462
self.source.inventories.keys(), 'topological', False))
2463
self.target.signatures.insert_record_stream(
2464
self.source.signatures.get_record_stream(
2465
self.source.signatures.keys(),
2467
self.target.revisions.insert_record_stream(
2468
self.source.revisions.get_record_stream(
2469
self.source.revisions.keys(),
2470
'topological', True))
2474
self.target.fetch(self.source, revision_id=revision_id)
2477
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2478
"""See InterRepository.fetch()."""
2479
from bzrlib.fetch import GenericRepoFetcher
2480
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2481
self.source, self.source._format, self.target, self.target._format)
2482
f = GenericRepoFetcher(to_repository=self.target,
2483
from_repository=self.source,
2484
last_revision=revision_id,
2485
pb=pb, find_ghosts=find_ghosts)
2486
return f.count_copied, f.failed_revisions
2489
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2490
"""See InterRepository.missing_revision_ids()."""
2491
# we want all revisions to satisfy revision_id in source.
2492
# but we don't want to stat every file here and there.
2493
# we want then, all revisions other needs to satisfy revision_id
2494
# checked, but not those that we have locally.
2495
# so the first thing is to get a subset of the revisions to
2496
# satisfy revision_id in source, and then eliminate those that
2497
# we do already have.
2498
# this is slow on high latency connection to self, but as as this
2499
# disk format scales terribly for push anyway due to rewriting
2500
# inventory.weave, this is considered acceptable.
2502
if revision_id is not None:
2503
source_ids = self.source.get_ancestry(revision_id)
2504
if source_ids[0] is not None:
2505
raise AssertionError()
2508
source_ids = self.source._all_possible_ids()
2509
source_ids_set = set(source_ids)
2510
# source_ids is the worst possible case we may need to pull.
2511
# now we want to filter source_ids against what we actually
2512
# have in target, but don't try to check for existence where we know
2513
# we do not have a revision as that would be pointless.
2514
target_ids = set(self.target._all_possible_ids())
2515
possibly_present_revisions = target_ids.intersection(source_ids_set)
2516
actually_present_revisions = set(
2517
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2518
required_revisions = source_ids_set.difference(actually_present_revisions)
2519
if revision_id is not None:
2520
# we used get_ancestry to determine source_ids then we are assured all
2521
# revisions referenced are present as they are installed in topological order.
2522
# and the tip revision was validated by get_ancestry.
2523
result_set = required_revisions
2525
# if we just grabbed the possibly available ids, then
2526
# we only have an estimate of whats available and need to validate
2527
# that against the revision records.
2529
self.source._eliminate_revisions_not_present(required_revisions))
2530
return self.source.revision_ids_to_search_result(result_set)
2533
class InterKnitRepo(InterSameDataRepository):
2534
"""Optimised code paths between Knit based repositories."""
2537
def _get_repo_format_to_test(self):
2538
from bzrlib.repofmt import knitrepo
2539
return knitrepo.RepositoryFormatKnit1()
2542
def is_compatible(source, target):
2543
"""Be compatible with known Knit formats.
2545
We don't test for the stores being of specific types because that
2546
could lead to confusing results, and there is no need to be
2549
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2551
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2552
isinstance(target._format, RepositoryFormatKnit))
2553
except AttributeError:
2555
return are_knits and InterRepository._same_model(source, target)
2558
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2559
"""See InterRepository.fetch()."""
2560
from bzrlib.fetch import KnitRepoFetcher
2561
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2562
self.source, self.source._format, self.target, self.target._format)
2563
f = KnitRepoFetcher(to_repository=self.target,
2564
from_repository=self.source,
2565
last_revision=revision_id,
2566
pb=pb, find_ghosts=find_ghosts)
2567
return f.count_copied, f.failed_revisions
2570
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2571
"""See InterRepository.missing_revision_ids()."""
2572
if revision_id is not None:
2573
source_ids = self.source.get_ancestry(revision_id)
2574
if source_ids[0] is not None:
2575
raise AssertionError()
2578
source_ids = self.source.all_revision_ids()
2579
source_ids_set = set(source_ids)
2580
# source_ids is the worst possible case we may need to pull.
2581
# now we want to filter source_ids against what we actually
2582
# have in target, but don't try to check for existence where we know
2583
# we do not have a revision as that would be pointless.
2584
target_ids = set(self.target.all_revision_ids())
2585
possibly_present_revisions = target_ids.intersection(source_ids_set)
2586
actually_present_revisions = set(
2587
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2588
required_revisions = source_ids_set.difference(actually_present_revisions)
2589
if revision_id is not None:
2590
# we used get_ancestry to determine source_ids then we are assured all
2591
# revisions referenced are present as they are installed in topological order.
2592
# and the tip revision was validated by get_ancestry.
2593
result_set = required_revisions
2595
# if we just grabbed the possibly available ids, then
2596
# we only have an estimate of whats available and need to validate
2597
# that against the revision records.
2599
self.source._eliminate_revisions_not_present(required_revisions))
2600
return self.source.revision_ids_to_search_result(result_set)
2603
class InterPackRepo(InterSameDataRepository):
2604
"""Optimised code paths between Pack based repositories."""
2607
def _get_repo_format_to_test(self):
2608
from bzrlib.repofmt import pack_repo
2609
return pack_repo.RepositoryFormatKnitPack1()
2612
def is_compatible(source, target):
2613
"""Be compatible with known Pack formats.
2615
We don't test for the stores being of specific types because that
2616
could lead to confusing results, and there is no need to be
2619
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2621
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2622
isinstance(target._format, RepositoryFormatPack))
2623
except AttributeError:
2625
return are_packs and InterRepository._same_model(source, target)
2628
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2629
"""See InterRepository.fetch()."""
2630
from bzrlib.repofmt.pack_repo import Packer
2631
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2632
self.source, self.source._format, self.target, self.target._format)
2633
self.count_copied = 0
2634
if revision_id is None:
2636
# everything to do - use pack logic
2637
# to fetch from all packs to one without
2638
# inventory parsing etc, IFF nothing to be copied is in the target.
2640
revision_ids = self.source.all_revision_ids()
2641
revision_keys = [(revid,) for revid in revision_ids]
2642
index = self.target._pack_collection.revision_index.combined_index
2643
present_revision_ids = set(item[1][0] for item in
2644
index.iter_entries(revision_keys))
2645
revision_ids = set(revision_ids) - present_revision_ids
2646
# implementing the TODO will involve:
2647
# - detecting when all of a pack is selected
2648
# - avoiding as much as possible pre-selection, so the
2649
# more-core routines such as create_pack_from_packs can filter in
2650
# a just-in-time fashion. (though having a HEADS list on a
2651
# repository might make this a lot easier, because we could
2652
# sensibly detect 'new revisions' without doing a full index scan.
2653
elif _mod_revision.is_null(revision_id):
2658
revision_ids = self.search_missing_revision_ids(revision_id,
2659
find_ghosts=find_ghosts).get_keys()
2660
except errors.NoSuchRevision:
2661
raise errors.InstallFailed([revision_id])
2662
if len(revision_ids) == 0:
2664
packs = self.source._pack_collection.all_packs()
2665
pack = Packer(self.target._pack_collection, packs, '.fetch',
2666
revision_ids).pack()
2667
if pack is not None:
2668
self.target._pack_collection._save_pack_names()
2669
# Trigger an autopack. This may duplicate effort as we've just done
2670
# a pack creation, but for now it is simpler to think about as
2671
# 'upload data, then repack if needed'.
2672
self.target._pack_collection.autopack()
2673
return (pack.get_revision_count(), [])
2678
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2679
"""See InterRepository.missing_revision_ids().
2681
:param find_ghosts: Find ghosts throughout the ancestry of
2684
if not find_ghosts and revision_id is not None:
2685
return self._walk_to_common_revisions([revision_id])
2686
elif revision_id is not None:
2687
source_ids = self.source.get_ancestry(revision_id)
2688
if source_ids[0] is not None:
2689
raise AssertionError()
2692
source_ids = self.source.all_revision_ids()
2693
# source_ids is the worst possible case we may need to pull.
2694
# now we want to filter source_ids against what we actually
2695
# have in target, but don't try to check for existence where we know
2696
# we do not have a revision as that would be pointless.
2697
target_ids = set(self.target.all_revision_ids())
2698
result_set = set(source_ids).difference(target_ids)
2699
return self.source.revision_ids_to_search_result(result_set)
2702
class InterModel1and2(InterRepository):
2705
def _get_repo_format_to_test(self):
2709
def is_compatible(source, target):
2710
if not source.supports_rich_root() and target.supports_rich_root():
2716
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2717
"""See InterRepository.fetch()."""
2718
from bzrlib.fetch import Model1toKnit2Fetcher
2719
f = Model1toKnit2Fetcher(to_repository=self.target,
2720
from_repository=self.source,
2721
last_revision=revision_id,
2722
pb=pb, find_ghosts=find_ghosts)
2723
return f.count_copied, f.failed_revisions
2726
def copy_content(self, revision_id=None):
2727
"""Make a complete copy of the content in self into destination.
2729
This is a destructive operation! Do not use it on existing
2732
:param revision_id: Only copy the content needed to construct
2733
revision_id and its parents.
2736
self.target.set_make_working_trees(self.source.make_working_trees())
2737
except NotImplementedError:
2739
# but don't bother fetching if we have the needed data now.
2740
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2741
self.target.has_revision(revision_id)):
2743
self.target.fetch(self.source, revision_id=revision_id)
2746
class InterKnit1and2(InterKnitRepo):
2749
def _get_repo_format_to_test(self):
2753
def is_compatible(source, target):
2754
"""Be compatible with Knit1 source and Knit3 target"""
2755
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2757
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2758
RepositoryFormatKnit3)
2759
from bzrlib.repofmt.pack_repo import (
2760
RepositoryFormatKnitPack1,
2761
RepositoryFormatKnitPack3,
2762
RepositoryFormatPackDevelopment0,
2763
RepositoryFormatPackDevelopment0Subtree,
2766
RepositoryFormatKnit1,
2767
RepositoryFormatKnitPack1,
2768
RepositoryFormatPackDevelopment0,
2771
RepositoryFormatKnit3,
2772
RepositoryFormatKnitPack3,
2773
RepositoryFormatPackDevelopment0Subtree,
2775
return (isinstance(source._format, nosubtrees) and
2776
isinstance(target._format, subtrees))
2777
except AttributeError:
2781
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2782
"""See InterRepository.fetch()."""
2783
from bzrlib.fetch import Knit1to2Fetcher
2784
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2785
self.source, self.source._format, self.target,
2786
self.target._format)
2787
f = Knit1to2Fetcher(to_repository=self.target,
2788
from_repository=self.source,
2789
last_revision=revision_id,
2790
pb=pb, find_ghosts=find_ghosts)
2791
return f.count_copied, f.failed_revisions
2794
class InterDifferingSerializer(InterKnitRepo):
2797
def _get_repo_format_to_test(self):
2801
def is_compatible(source, target):
2802
"""Be compatible with Knit2 source and Knit3 target"""
2803
if source.supports_rich_root() != target.supports_rich_root():
2805
# Ideally, we'd support fetching if the source had no tree references
2806
# even if it supported them...
2807
if (getattr(source, '_format.supports_tree_reference', False) and
2808
not getattr(target, '_format.supports_tree_reference', False)):
2813
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2814
"""See InterRepository.fetch()."""
2815
revision_ids = self.target.search_missing_revision_ids(self.source,
2816
revision_id, find_ghosts=find_ghosts).get_keys()
2817
revision_ids = tsort.topo_sort(
2818
self.source.get_graph().get_parent_map(revision_ids))
2819
def revisions_iterator():
2820
for current_revision_id in revision_ids:
2821
revision = self.source.get_revision(current_revision_id)
2822
tree = self.source.revision_tree(current_revision_id)
2824
signature = self.source.get_signature_text(
2825
current_revision_id)
2826
except errors.NoSuchRevision:
2828
yield revision, tree, signature
2830
my_pb = ui.ui_factory.nested_progress_bar()
2835
install_revisions(self.target, revisions_iterator(),
2836
len(revision_ids), pb)
2838
if my_pb is not None:
2840
return len(revision_ids), 0
2843
class InterOtherToRemote(InterRepository):
2845
def __init__(self, source, target):
2846
InterRepository.__init__(self, source, target)
2847
self._real_inter = None
2850
def is_compatible(source, target):
2851
if isinstance(target, remote.RemoteRepository):
2855
def _ensure_real_inter(self):
2856
if self._real_inter is None:
2857
self.target._ensure_real()
2858
real_target = self.target._real_repository
2859
self._real_inter = InterRepository.get(self.source, real_target)
2861
def copy_content(self, revision_id=None):
2862
self._ensure_real_inter()
2863
self._real_inter.copy_content(revision_id=revision_id)
2865
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2866
self._ensure_real_inter()
2867
self._real_inter.fetch(revision_id=revision_id, pb=pb,
2868
find_ghosts=find_ghosts)
2871
def _get_repo_format_to_test(self):
2875
InterRepository.register_optimiser(InterDifferingSerializer)
2876
InterRepository.register_optimiser(InterSameDataRepository)
2877
InterRepository.register_optimiser(InterWeaveRepo)
2878
InterRepository.register_optimiser(InterKnitRepo)
2879
InterRepository.register_optimiser(InterModel1and2)
2880
InterRepository.register_optimiser(InterKnit1and2)
2881
InterRepository.register_optimiser(InterPackRepo)
2882
InterRepository.register_optimiser(InterOtherToRemote)
2885
class CopyConverter(object):
2886
"""A repository conversion tool which just performs a copy of the content.
2888
This is slow but quite reliable.
2891
def __init__(self, target_format):
2892
"""Create a CopyConverter.
2894
:param target_format: The format the resulting repository should be.
2896
self.target_format = target_format
2898
def convert(self, repo, pb):
2899
"""Perform the conversion of to_convert, giving feedback via pb.
2901
:param to_convert: The disk object to convert.
2902
:param pb: a progress bar to use for progress information.
2907
# this is only useful with metadir layouts - separated repo content.
2908
# trigger an assertion if not such
2909
repo._format.get_format_string()
2910
self.repo_dir = repo.bzrdir
2911
self.step('Moving repository to repository.backup')
2912
self.repo_dir.transport.move('repository', 'repository.backup')
2913
backup_transport = self.repo_dir.transport.clone('repository.backup')
2914
repo._format.check_conversion_target(self.target_format)
2915
self.source_repo = repo._format.open(self.repo_dir,
2917
_override_transport=backup_transport)
2918
self.step('Creating new repository')
2919
converted = self.target_format.initialize(self.repo_dir,
2920
self.source_repo.is_shared())
2921
converted.lock_write()
2923
self.step('Copying content into repository.')
2924
self.source_repo.copy_content_into(converted)
2927
self.step('Deleting old repository content.')
2928
self.repo_dir.transport.delete_tree('repository.backup')
2929
self.pb.note('repository converted')
2931
def step(self, message):
2932
"""Update the pb by a step."""
2934
self.pb.update(message, self.count, self.total)
2946
def _unescaper(match, _map=_unescape_map):
2947
code = match.group(1)
2951
if not code.startswith('#'):
2953
return unichr(int(code[1:])).encode('utf8')
2959
def _unescape_xml(data):
2960
"""Unescape predefined XML entities in a string of data."""
2962
if _unescape_re is None:
2963
_unescape_re = re.compile('\&([^;]*);')
2964
return _unescape_re.sub(_unescaper, data)
2967
class _VersionedFileChecker(object):
2969
def __init__(self, repository):
2970
self.repository = repository
2971
self.text_index = self.repository._generate_text_key_index()
2973
def calculate_file_version_parents(self, text_key):
2974
"""Calculate the correct parents for a file version according to
2977
parent_keys = self.text_index[text_key]
2978
if parent_keys == [_mod_revision.NULL_REVISION]:
2980
return tuple(parent_keys)
2982
def check_file_version_parents(self, texts, progress_bar=None):
2983
"""Check the parents stored in a versioned file are correct.
2985
It also detects file versions that are not referenced by their
2986
corresponding revision's inventory.
2988
:returns: A tuple of (wrong_parents, dangling_file_versions).
2989
wrong_parents is a dict mapping {revision_id: (stored_parents,
2990
correct_parents)} for each revision_id where the stored parents
2991
are not correct. dangling_file_versions is a set of (file_id,
2992
revision_id) tuples for versions that are present in this versioned
2993
file, but not used by the corresponding inventory.
2996
self.file_ids = set([file_id for file_id, _ in
2997
self.text_index.iterkeys()])
2998
# text keys is now grouped by file_id
2999
n_weaves = len(self.file_ids)
3000
files_in_revisions = {}
3001
revisions_of_files = {}
3002
n_versions = len(self.text_index)
3003
progress_bar.update('loading text store', 0, n_versions)
3004
parent_map = self.repository.texts.get_parent_map(self.text_index)
3005
# On unlistable transports this could well be empty/error...
3006
text_keys = self.repository.texts.keys()
3007
unused_keys = frozenset(text_keys) - set(self.text_index)
3008
for num, key in enumerate(self.text_index.iterkeys()):
3009
if progress_bar is not None:
3010
progress_bar.update('checking text graph', num, n_versions)
3011
correct_parents = self.calculate_file_version_parents(key)
3013
knit_parents = parent_map[key]
3014
except errors.RevisionNotPresent:
3017
if correct_parents != knit_parents:
3018
wrong_parents[key] = (knit_parents, correct_parents)
3019
return wrong_parents, unused_keys
3022
def _old_get_graph(repository, revision_id):
3023
"""DO NOT USE. That is all. I'm serious."""
3024
graph = repository.get_graph()
3025
revision_graph = dict(((key, value) for key, value in
3026
graph.iter_ancestry([revision_id]) if value is not None))
3027
return _strip_NULL_ghosts(revision_graph)
3030
def _strip_NULL_ghosts(revision_graph):
3031
"""Also don't use this. more compatibility code for unmigrated clients."""
3032
# Filter ghosts, and null:
3033
if _mod_revision.NULL_REVISION in revision_graph:
3034
del revision_graph[_mod_revision.NULL_REVISION]
3035
for key, parents in revision_graph.items():
3036
revision_graph[key] = tuple(parent for parent in parents if parent
3038
return revision_graph