1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.store.text import TextStore
50
from bzrlib.testament import Testament
51
from bzrlib.util import bencode
54
from bzrlib.decorators import needs_read_lock, needs_write_lock
55
from bzrlib.inter import InterObject
56
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
57
from bzrlib.symbol_versioning import (
60
from bzrlib.trace import mutter, mutter_callsite, note, warning
63
# Old formats display a warning, but only once
64
_deprecation_warning_done = False
67
class CommitBuilder(object):
68
"""Provides an interface to build up a commit.
70
This allows describing a tree to be committed without needing to
71
know the internals of the format of the repository.
74
# all clients should supply tree roots.
75
record_root_entry = True
76
# the default CommitBuilder does not manage trees whose root is versioned.
77
_versioned_root = False
79
def __init__(self, repository, parents, config, timestamp=None,
80
timezone=None, committer=None, revprops=None,
82
"""Initiate a CommitBuilder.
84
:param repository: Repository to commit to.
85
:param parents: Revision ids of the parents of the new revision.
86
:param config: Configuration to use.
87
:param timestamp: Optional timestamp recorded for commit.
88
:param timezone: Optional timezone for timestamp.
89
:param committer: Optional committer to set for commit.
90
:param revprops: Optional dictionary of revision properties.
91
:param revision_id: Optional revision id.
96
self._committer = self._config.username()
98
assert isinstance(committer, basestring), type(committer)
99
self._committer = committer
101
self.new_inventory = Inventory(None)
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._revprops.update(revprops)
110
if timestamp is None:
111
timestamp = time.time()
112
# Restrict resolution to 1ms
113
self._timestamp = round(timestamp, 3)
116
self._timezone = osutils.local_time_offset()
118
self._timezone = int(timezone)
120
self._generate_revision_if_needed()
121
self.__heads = graph.HeadsCache(repository.get_graph()).heads
123
def commit(self, message):
124
"""Make the actual commit.
126
:return: The revision id of the recorded revision.
128
rev = _mod_revision.Revision(
129
timestamp=self._timestamp,
130
timezone=self._timezone,
131
committer=self._committer,
133
inventory_sha1=self.inv_sha1,
134
revision_id=self._new_revision_id,
135
properties=self._revprops)
136
rev.parent_ids = self.parents
137
self.repository.add_revision(self._new_revision_id, rev,
138
self.new_inventory, self._config)
139
self.repository.commit_write_group()
140
return self._new_revision_id
143
"""Abort the commit that is being built.
145
self.repository.abort_write_group()
147
def revision_tree(self):
148
"""Return the tree that was just committed.
150
After calling commit() this can be called to get a RevisionTree
151
representing the newly committed tree. This is preferred to
152
calling Repository.revision_tree() because that may require
153
deserializing the inventory, while we already have a copy in
156
return RevisionTree(self.repository, self.new_inventory,
157
self._new_revision_id)
159
def finish_inventory(self):
160
"""Tell the builder that the inventory is finished."""
161
if self.new_inventory.root is None:
162
raise AssertionError('Root entry should be supplied to'
163
' record_entry_contents, as of bzr 0.10.',
164
DeprecationWarning, stacklevel=2)
165
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
166
self.new_inventory.revision_id = self._new_revision_id
167
self.inv_sha1 = self.repository.add_inventory(
168
self._new_revision_id,
173
def _gen_revision_id(self):
174
"""Return new revision-id."""
175
return generate_ids.gen_revision_id(self._config.username(),
178
def _generate_revision_if_needed(self):
179
"""Create a revision id if None was supplied.
181
If the repository can not support user-specified revision ids
182
they should override this function and raise CannotSetRevisionId
183
if _new_revision_id is not None.
185
:raises: CannotSetRevisionId
187
if self._new_revision_id is None:
188
self._new_revision_id = self._gen_revision_id()
189
self.random_revid = True
191
self.random_revid = False
193
def _heads(self, file_id, revision_ids):
194
"""Calculate the graph heads for revision_ids in the graph of file_id.
196
This can use either a per-file graph or a global revision graph as we
197
have an identity relationship between the two graphs.
199
return self.__heads(revision_ids)
201
def _check_root(self, ie, parent_invs, tree):
202
"""Helper for record_entry_contents.
204
:param ie: An entry being added.
205
:param parent_invs: The inventories of the parent revisions of the
207
:param tree: The tree that is being committed.
209
# In this revision format, root entries have no knit or weave When
210
# serializing out to disk and back in root.revision is always
212
ie.revision = self._new_revision_id
214
def _get_delta(self, ie, basis_inv, path):
215
"""Get a delta against the basis inventory for ie."""
216
if ie.file_id not in basis_inv:
218
return (None, path, ie.file_id, ie)
219
elif ie != basis_inv[ie.file_id]:
221
# TODO: avoid tis id2path call.
222
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
227
def record_entry_contents(self, ie, parent_invs, path, tree,
229
"""Record the content of ie from tree into the commit if needed.
231
Side effect: sets ie.revision when unchanged
233
:param ie: An inventory entry present in the commit.
234
:param parent_invs: The inventories of the parent revisions of the
236
:param path: The path the entry is at in the tree.
237
:param tree: The tree which contains this entry and should be used to
239
:param content_summary: Summary data from the tree about the paths
240
content - stat, length, exec, sha/link target. This is only
241
accessed when the entry has a revision of None - that is when it is
242
a candidate to commit.
243
:return: A tuple (change_delta, version_recorded). change_delta is
244
an inventory_delta change for this entry against the basis tree of
245
the commit, or None if no change occured against the basis tree.
246
version_recorded is True if a new version of the entry has been
247
recorded. For instance, committing a merge where a file was only
248
changed on the other side will return (delta, False).
250
if self.new_inventory.root is None:
251
if ie.parent_id is not None:
252
raise errors.RootMissing()
253
self._check_root(ie, parent_invs, tree)
254
if ie.revision is None:
255
kind = content_summary[0]
257
# ie is carried over from a prior commit
259
# XXX: repository specific check for nested tree support goes here - if
260
# the repo doesn't want nested trees we skip it ?
261
if (kind == 'tree-reference' and
262
not self.repository._format.supports_tree_reference):
263
# mismatch between commit builder logic and repository:
264
# this needs the entry creation pushed down into the builder.
265
raise NotImplementedError('Missing repository subtree support.')
266
self.new_inventory.add(ie)
268
# TODO: slow, take it out of the inner loop.
270
basis_inv = parent_invs[0]
272
basis_inv = Inventory(root_id=None)
274
# ie.revision is always None if the InventoryEntry is considered
275
# for committing. We may record the previous parents revision if the
276
# content is actually unchanged against a sole head.
277
if ie.revision is not None:
278
if not self._versioned_root and path == '':
279
# repositories that do not version the root set the root's
280
# revision to the new commit even when no change occurs, and
281
# this masks when a change may have occurred against the basis,
282
# so calculate if one happened.
283
if ie.file_id in basis_inv:
284
delta = (basis_inv.id2path(ie.file_id), path,
288
delta = (None, path, ie.file_id, ie)
291
# we don't need to commit this, because the caller already
292
# determined that an existing revision of this file is
294
return None, (ie.revision == self._new_revision_id)
295
# XXX: Friction: parent_candidates should return a list not a dict
296
# so that we don't have to walk the inventories again.
297
parent_candiate_entries = ie.parent_candidates(parent_invs)
298
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
300
for inv in parent_invs:
301
if ie.file_id in inv:
302
old_rev = inv[ie.file_id].revision
303
if old_rev in head_set:
304
heads.append(inv[ie.file_id].revision)
305
head_set.remove(inv[ie.file_id].revision)
308
# now we check to see if we need to write a new record to the
310
# We write a new entry unless there is one head to the ancestors, and
311
# the kind-derived content is unchanged.
313
# Cheapest check first: no ancestors, or more the one head in the
314
# ancestors, we write a new node.
318
# There is a single head, look it up for comparison
319
parent_entry = parent_candiate_entries[heads[0]]
320
# if the non-content specific data has changed, we'll be writing a
322
if (parent_entry.parent_id != ie.parent_id or
323
parent_entry.name != ie.name):
325
# now we need to do content specific checks:
327
# if the kind changed the content obviously has
328
if kind != parent_entry.kind:
331
assert content_summary[2] is not None, \
332
"Files must not have executable = None"
334
if (# if the file length changed we have to store:
335
parent_entry.text_size != content_summary[1] or
336
# if the exec bit has changed we have to store:
337
parent_entry.executable != content_summary[2]):
339
elif parent_entry.text_sha1 == content_summary[3]:
340
# all meta and content is unchanged (using a hash cache
341
# hit to check the sha)
342
ie.revision = parent_entry.revision
343
ie.text_size = parent_entry.text_size
344
ie.text_sha1 = parent_entry.text_sha1
345
ie.executable = parent_entry.executable
346
return self._get_delta(ie, basis_inv, path), False
348
# Either there is only a hash change(no hash cache entry,
349
# or same size content change), or there is no change on
351
# Provide the parent's hash to the store layer, so that the
352
# content is unchanged we will not store a new node.
353
nostore_sha = parent_entry.text_sha1
355
# We want to record a new node regardless of the presence or
356
# absence of a content change in the file.
358
ie.executable = content_summary[2]
359
lines = tree.get_file(ie.file_id, path).readlines()
361
ie.text_sha1, ie.text_size = self._add_text_to_weave(
362
ie.file_id, lines, heads, nostore_sha)
363
except errors.ExistingContent:
364
# Turns out that the file content was unchanged, and we were
365
# only going to store a new node if it was changed. Carry over
367
ie.revision = parent_entry.revision
368
ie.text_size = parent_entry.text_size
369
ie.text_sha1 = parent_entry.text_sha1
370
ie.executable = parent_entry.executable
371
return self._get_delta(ie, basis_inv, path), False
372
elif kind == 'directory':
374
# all data is meta here, nothing specific to directory, so
376
ie.revision = parent_entry.revision
377
return self._get_delta(ie, basis_inv, path), False
379
self._add_text_to_weave(ie.file_id, lines, heads, None)
380
elif kind == 'symlink':
381
current_link_target = content_summary[3]
383
# symlink target is not generic metadata, check if it has
385
if current_link_target != parent_entry.symlink_target:
388
# unchanged, carry over.
389
ie.revision = parent_entry.revision
390
ie.symlink_target = parent_entry.symlink_target
391
return self._get_delta(ie, basis_inv, path), False
392
ie.symlink_target = current_link_target
394
self._add_text_to_weave(ie.file_id, lines, heads, None)
395
elif kind == 'tree-reference':
397
if content_summary[3] != parent_entry.reference_revision:
400
# unchanged, carry over.
401
ie.reference_revision = parent_entry.reference_revision
402
ie.revision = parent_entry.revision
403
return self._get_delta(ie, basis_inv, path), False
404
ie.reference_revision = content_summary[3]
406
self._add_text_to_weave(ie.file_id, lines, heads, None)
408
raise NotImplementedError('unknown kind')
409
ie.revision = self._new_revision_id
410
return self._get_delta(ie, basis_inv, path), True
412
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
413
versionedfile = self.repository.weave_store.get_weave_or_empty(
414
file_id, self.repository.get_transaction())
415
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
416
# than add_lines, and allows committing when a parent is ghosted for
418
# Note: as we read the content directly from the tree, we know its not
419
# been turned into unicode or badly split - but a broken tree
420
# implementation could give us bad output from readlines() so this is
421
# not a guarantee of safety. What would be better is always checking
422
# the content during test suite execution. RBC 20070912
424
return versionedfile.add_lines_with_ghosts(
425
self._new_revision_id, parents, new_lines,
426
nostore_sha=nostore_sha, random_id=self.random_revid,
427
check_content=False)[0:2]
429
versionedfile.clear_cache()
432
class RootCommitBuilder(CommitBuilder):
433
"""This commitbuilder actually records the root id"""
435
# the root entry gets versioned properly by this builder.
436
_versioned_root = True
438
def _check_root(self, ie, parent_invs, tree):
439
"""Helper for record_entry_contents.
441
:param ie: An entry being added.
442
:param parent_invs: The inventories of the parent revisions of the
444
:param tree: The tree that is being committed.
448
######################################################################
451
class Repository(object):
452
"""Repository holding history for one or more branches.
454
The repository holds and retrieves historical information including
455
revisions and file history. It's normally accessed only by the Branch,
456
which views a particular line of development through that history.
458
The Repository builds on top of Stores and a Transport, which respectively
459
describe the disk data format and the way of accessing the (possibly
463
# What class to use for a CommitBuilder. Often its simpler to change this
464
# in a Repository class subclass rather than to override
465
# get_commit_builder.
466
_commit_builder_class = CommitBuilder
467
# The search regex used by xml based repositories to determine what things
468
# where changed in a single commit.
469
_file_ids_altered_regex = lazy_regex.lazy_compile(
470
r'file_id="(?P<file_id>[^"]+)"'
471
r'.* revision="(?P<revision_id>[^"]+)"'
474
def abort_write_group(self):
475
"""Commit the contents accrued within the current write group.
477
:seealso: start_write_group.
479
if self._write_group is not self.get_transaction():
480
# has an unlock or relock occured ?
481
raise errors.BzrError('mismatched lock context and write group.')
482
self._abort_write_group()
483
self._write_group = None
485
def _abort_write_group(self):
486
"""Template method for per-repository write group cleanup.
488
This is called during abort before the write group is considered to be
489
finished and should cleanup any internal state accrued during the write
490
group. There is no requirement that data handed to the repository be
491
*not* made available - this is not a rollback - but neither should any
492
attempt be made to ensure that data added is fully commited. Abort is
493
invoked when an error has occured so futher disk or network operations
494
may not be possible or may error and if possible should not be
499
def add_inventory(self, revision_id, inv, parents):
500
"""Add the inventory inv to the repository as revision_id.
502
:param parents: The revision ids of the parents that revision_id
503
is known to have and are in the repository already.
505
:returns: The validator(which is a sha1 digest, though what is sha'd is
506
repository format specific) of the serialized inventory.
508
assert self.is_in_write_group()
509
_mod_revision.check_not_reserved_id(revision_id)
510
assert inv.revision_id is None or inv.revision_id == revision_id, \
511
"Mismatch between inventory revision" \
512
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
513
assert inv.root is not None
514
inv_lines = self._serialise_inventory_to_lines(inv)
515
inv_vf = self.get_inventory_weave()
516
return self._inventory_add_lines(inv_vf, revision_id, parents,
517
inv_lines, check_content=False)
519
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
521
"""Store lines in inv_vf and return the sha1 of the inventory."""
523
for parent in parents:
525
final_parents.append(parent)
526
return inv_vf.add_lines(revision_id, final_parents, lines,
527
check_content=check_content)[0]
530
def add_revision(self, revision_id, rev, inv=None, config=None):
531
"""Add rev to the revision store as revision_id.
533
:param revision_id: the revision id to use.
534
:param rev: The revision object.
535
:param inv: The inventory for the revision. if None, it will be looked
536
up in the inventory storer
537
:param config: If None no digital signature will be created.
538
If supplied its signature_needed method will be used
539
to determine if a signature should be made.
541
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
543
_mod_revision.check_not_reserved_id(revision_id)
544
if config is not None and config.signature_needed():
546
inv = self.get_inventory(revision_id)
547
plaintext = Testament(rev, inv).as_short_text()
548
self.store_revision_signature(
549
gpg.GPGStrategy(config), plaintext, revision_id)
550
if not revision_id in self.get_inventory_weave():
552
raise errors.WeaveRevisionNotPresent(revision_id,
553
self.get_inventory_weave())
555
# yes, this is not suitable for adding with ghosts.
556
self.add_inventory(revision_id, inv, rev.parent_ids)
557
self._revision_store.add_revision(rev, self.get_transaction())
559
def _add_revision_text(self, revision_id, text):
560
revision = self._revision_store._serializer.read_revision_from_string(
562
self._revision_store._add_revision(revision, StringIO(text),
563
self.get_transaction())
565
def all_revision_ids(self):
566
"""Returns a list of all the revision ids in the repository.
568
This is deprecated because code should generally work on the graph
569
reachable from a particular revision, and ignore any other revisions
570
that might be present. There is no direct replacement method.
572
if 'evil' in debug.debug_flags:
573
mutter_callsite(2, "all_revision_ids is linear with history.")
574
return self._all_revision_ids()
576
def _all_revision_ids(self):
577
"""Returns a list of all the revision ids in the repository.
579
These are in as much topological order as the underlying store can
582
raise NotImplementedError(self._all_revision_ids)
584
def break_lock(self):
585
"""Break a lock if one is present from another instance.
587
Uses the ui factory to ask for confirmation if the lock may be from
590
self.control_files.break_lock()
593
def _eliminate_revisions_not_present(self, revision_ids):
594
"""Check every revision id in revision_ids to see if we have it.
596
Returns a set of the present revisions.
599
for id in revision_ids:
600
if self.has_revision(id):
605
def create(a_bzrdir):
606
"""Construct the current default format repository in a_bzrdir."""
607
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
609
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
610
"""instantiate a Repository.
612
:param _format: The format of the repository on disk.
613
:param a_bzrdir: The BzrDir of the repository.
615
In the future we will have a single api for all stores for
616
getting file texts, inventories and revisions, then
617
this construct will accept instances of those things.
619
super(Repository, self).__init__()
620
self._format = _format
621
# the following are part of the public API for Repository:
622
self.bzrdir = a_bzrdir
623
self.control_files = control_files
624
self._revision_store = _revision_store
625
# backwards compatibility
626
self.weave_store = text_store
628
self._reconcile_does_inventory_gc = True
629
self._reconcile_fixes_text_parents = False
630
self._reconcile_backsup_inventory = True
631
# not right yet - should be more semantically clear ?
633
self.control_store = control_store
634
self.control_weaves = control_store
635
# TODO: make sure to construct the right store classes, etc, depending
636
# on whether escaping is required.
637
self._warn_if_deprecated()
638
self._write_group = None
639
self.base = control_files._transport.base
642
return '%s(%r)' % (self.__class__.__name__,
645
def has_same_location(self, other):
646
"""Returns a boolean indicating if this repository is at the same
647
location as another repository.
649
This might return False even when two repository objects are accessing
650
the same physical repository via different URLs.
652
if self.__class__ is not other.__class__:
654
return (self.control_files._transport.base ==
655
other.control_files._transport.base)
657
def is_in_write_group(self):
658
"""Return True if there is an open write group.
660
:seealso: start_write_group.
662
return self._write_group is not None
665
return self.control_files.is_locked()
667
def is_write_locked(self):
668
"""Return True if this object is write locked."""
669
return self.is_locked() and self.control_files._lock_mode == 'w'
671
def lock_write(self, token=None):
672
"""Lock this repository for writing.
674
This causes caching within the repository obejct to start accumlating
675
data during reads, and allows a 'write_group' to be obtained. Write
676
groups must be used for actual data insertion.
678
:param token: if this is already locked, then lock_write will fail
679
unless the token matches the existing lock.
680
:returns: a token if this instance supports tokens, otherwise None.
681
:raises TokenLockingNotSupported: when a token is given but this
682
instance doesn't support using token locks.
683
:raises MismatchedToken: if the specified token doesn't match the token
684
of the existing lock.
685
:seealso: start_write_group.
687
A token should be passed in if you know that you have locked the object
688
some other way, and need to synchronise this object's state with that
691
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
693
result = self.control_files.lock_write(token=token)
698
self.control_files.lock_read()
701
def get_physical_lock_status(self):
702
return self.control_files.get_physical_lock_status()
704
def leave_lock_in_place(self):
705
"""Tell this repository not to release the physical lock when this
708
If lock_write doesn't return a token, then this method is not supported.
710
self.control_files.leave_in_place()
712
def dont_leave_lock_in_place(self):
713
"""Tell this repository to release the physical lock when this
714
object is unlocked, even if it didn't originally acquire it.
716
If lock_write doesn't return a token, then this method is not supported.
718
self.control_files.dont_leave_in_place()
721
def gather_stats(self, revid=None, committers=None):
722
"""Gather statistics from a revision id.
724
:param revid: The revision id to gather statistics from, if None, then
725
no revision specific statistics are gathered.
726
:param committers: Optional parameter controlling whether to grab
727
a count of committers from the revision specific statistics.
728
:return: A dictionary of statistics. Currently this contains:
729
committers: The number of committers if requested.
730
firstrev: A tuple with timestamp, timezone for the penultimate left
731
most ancestor of revid, if revid is not the NULL_REVISION.
732
latestrev: A tuple with timestamp, timezone for revid, if revid is
733
not the NULL_REVISION.
734
revisions: The total revision count in the repository.
735
size: An estimate disk size of the repository in bytes.
738
if revid and committers:
739
result['committers'] = 0
740
if revid and revid != _mod_revision.NULL_REVISION:
742
all_committers = set()
743
revisions = self.get_ancestry(revid)
744
# pop the leading None
746
first_revision = None
748
# ignore the revisions in the middle - just grab first and last
749
revisions = revisions[0], revisions[-1]
750
for revision in self.get_revisions(revisions):
751
if not first_revision:
752
first_revision = revision
754
all_committers.add(revision.committer)
755
last_revision = revision
757
result['committers'] = len(all_committers)
758
result['firstrev'] = (first_revision.timestamp,
759
first_revision.timezone)
760
result['latestrev'] = (last_revision.timestamp,
761
last_revision.timezone)
763
# now gather global repository information
764
if self.bzrdir.root_transport.listable():
765
c, t = self._revision_store.total_size(self.get_transaction())
766
result['revisions'] = c
770
def find_branches(self, using=False):
771
"""Find branches underneath this repository.
773
This will include branches inside other branches.
775
:param using: If True, list only branches using this repository.
777
if using and not self.is_shared():
779
return [self.bzrdir.open_branch()]
780
except errors.NotBranchError:
782
class Evaluator(object):
785
self.first_call = True
787
def __call__(self, bzrdir):
788
# On the first call, the parameter is always the bzrdir
789
# containing the current repo.
790
if not self.first_call:
792
repository = bzrdir.open_repository()
793
except errors.NoRepositoryPresent:
796
return False, (None, repository)
797
self.first_call = False
799
value = (bzrdir.open_branch(), None)
800
except errors.NotBranchError:
805
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
806
self.bzrdir.root_transport, evaluate=Evaluator()):
807
if branch is not None:
808
branches.append(branch)
809
if not using and repository is not None:
810
branches.extend(repository.find_branches())
813
def get_data_stream(self, revision_ids):
814
raise NotImplementedError(self.get_data_stream)
816
def insert_data_stream(self, stream):
817
"""XXX What does this really do?
819
Is it a substitute for fetch?
820
Should it manage its own write group ?
822
for item_key, bytes in stream:
823
if item_key[0] == 'file':
824
(file_id,) = item_key[1:]
825
knit = self.weave_store.get_weave_or_empty(
826
file_id, self.get_transaction())
827
elif item_key == ('inventory',):
828
knit = self.get_inventory_weave()
829
elif item_key == ('revisions',):
830
knit = self._revision_store.get_revision_file(
831
self.get_transaction())
832
elif item_key == ('signatures',):
833
knit = self._revision_store.get_signature_file(
834
self.get_transaction())
836
raise RepositoryDataStreamError(
837
"Unrecognised data stream key '%s'" % (item_key,))
838
decoded_list = bencode.bdecode(bytes)
839
format = decoded_list.pop(0)
842
for version, options, parents, some_bytes in decoded_list:
843
data_list.append((version, options, len(some_bytes), parents))
844
knit_bytes += some_bytes
845
knit.insert_data_stream(
846
(format, data_list, StringIO(knit_bytes).read))
849
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
850
"""Return the revision ids that other has that this does not.
852
These are returned in topological order.
854
revision_id: only return revision ids included by revision_id.
856
return InterRepository.get(other, self).missing_revision_ids(
857
revision_id, find_ghosts)
861
"""Open the repository rooted at base.
863
For instance, if the repository is at URL/.bzr/repository,
864
Repository.open(URL) -> a Repository instance.
866
control = bzrdir.BzrDir.open(base)
867
return control.open_repository()
869
def copy_content_into(self, destination, revision_id=None):
870
"""Make a complete copy of the content in self into destination.
872
This is a destructive operation! Do not use it on existing
875
return InterRepository.get(self, destination).copy_content(revision_id)
877
def commit_write_group(self):
878
"""Commit the contents accrued within the current write group.
880
:seealso: start_write_group.
882
if self._write_group is not self.get_transaction():
883
# has an unlock or relock occured ?
884
raise errors.BzrError('mismatched lock context %r and '
886
(self.get_transaction(), self._write_group))
887
self._commit_write_group()
888
self._write_group = None
890
def _commit_write_group(self):
891
"""Template method for per-repository write group cleanup.
893
This is called before the write group is considered to be
894
finished and should ensure that all data handed to the repository
895
for writing during the write group is safely committed (to the
896
extent possible considering file system caching etc).
899
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
900
"""Fetch the content required to construct revision_id from source.
902
If revision_id is None all content is copied.
903
:param find_ghosts: Find and copy revisions in the source that are
904
ghosts in the target (and not reachable directly by walking out to
905
the first-present revision in target from revision_id).
907
# fast path same-url fetch operations
908
if self.has_same_location(source):
909
# check that last_revision is in 'from' and then return a
911
if (revision_id is not None and
912
not _mod_revision.is_null(revision_id)):
913
self.get_revision(revision_id)
915
inter = InterRepository.get(source, self)
917
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
918
except NotImplementedError:
919
raise errors.IncompatibleRepositories(source, self)
921
def create_bundle(self, target, base, fileobj, format=None):
922
return serializer.write_bundle(self, target, base, fileobj, format)
924
def get_commit_builder(self, branch, parents, config, timestamp=None,
925
timezone=None, committer=None, revprops=None,
927
"""Obtain a CommitBuilder for this repository.
929
:param branch: Branch to commit to.
930
:param parents: Revision ids of the parents of the new revision.
931
:param config: Configuration to use.
932
:param timestamp: Optional timestamp recorded for commit.
933
:param timezone: Optional timezone for timestamp.
934
:param committer: Optional committer to set for commit.
935
:param revprops: Optional dictionary of revision properties.
936
:param revision_id: Optional revision id.
938
result = self._commit_builder_class(self, parents, config,
939
timestamp, timezone, committer, revprops, revision_id)
940
self.start_write_group()
944
if (self.control_files._lock_count == 1 and
945
self.control_files._lock_mode == 'w'):
946
if self._write_group is not None:
947
self.abort_write_group()
948
self.control_files.unlock()
949
raise errors.BzrError(
950
'Must end write groups before releasing write locks.')
951
self.control_files.unlock()
954
def clone(self, a_bzrdir, revision_id=None):
955
"""Clone this repository into a_bzrdir using the current format.
957
Currently no check is made that the format of this repository and
958
the bzrdir format are compatible. FIXME RBC 20060201.
960
:return: The newly created destination repository.
962
# TODO: deprecate after 0.16; cloning this with all its settings is
963
# probably not very useful -- mbp 20070423
964
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
965
self.copy_content_into(dest_repo, revision_id)
968
def start_write_group(self):
969
"""Start a write group in the repository.
971
Write groups are used by repositories which do not have a 1:1 mapping
972
between file ids and backend store to manage the insertion of data from
973
both fetch and commit operations.
975
A write lock is required around the start_write_group/commit_write_group
976
for the support of lock-requiring repository formats.
978
One can only insert data into a repository inside a write group.
982
if not self.is_write_locked():
983
raise errors.NotWriteLocked(self)
984
if self._write_group:
985
raise errors.BzrError('already in a write group')
986
self._start_write_group()
987
# so we can detect unlock/relock - the write group is now entered.
988
self._write_group = self.get_transaction()
990
def _start_write_group(self):
991
"""Template method for per-repository write group startup.
993
This is called before the write group is considered to be
998
def sprout(self, to_bzrdir, revision_id=None):
999
"""Create a descendent repository for new development.
1001
Unlike clone, this does not copy the settings of the repository.
1003
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1004
dest_repo.fetch(self, revision_id=revision_id)
1007
def _create_sprouting_repo(self, a_bzrdir, shared):
1008
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1009
# use target default format.
1010
dest_repo = a_bzrdir.create_repository()
1012
# Most control formats need the repository to be specifically
1013
# created, but on some old all-in-one formats it's not needed
1015
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1016
except errors.UninitializableFormat:
1017
dest_repo = a_bzrdir.open_repository()
1021
def has_revision(self, revision_id):
1022
"""True if this repository has a copy of the revision."""
1023
if 'evil' in debug.debug_flags:
1024
mutter_callsite(3, "has_revision is a LBYL symptom.")
1025
return self._revision_store.has_revision_id(revision_id,
1026
self.get_transaction())
1029
def get_revision(self, revision_id):
1030
"""Return the Revision object for a named revision."""
1031
return self.get_revisions([revision_id])[0]
1034
def get_revision_reconcile(self, revision_id):
1035
"""'reconcile' helper routine that allows access to a revision always.
1037
This variant of get_revision does not cross check the weave graph
1038
against the revision one as get_revision does: but it should only
1039
be used by reconcile, or reconcile-alike commands that are correcting
1040
or testing the revision graph.
1042
return self._get_revisions([revision_id])[0]
1045
def get_revisions(self, revision_ids):
1046
"""Get many revisions at once."""
1047
return self._get_revisions(revision_ids)
1050
def _get_revisions(self, revision_ids):
1051
"""Core work logic to get many revisions without sanity checks."""
1052
for rev_id in revision_ids:
1053
if not rev_id or not isinstance(rev_id, basestring):
1054
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1055
revs = self._revision_store.get_revisions(revision_ids,
1056
self.get_transaction())
1058
assert not isinstance(rev.revision_id, unicode)
1059
for parent_id in rev.parent_ids:
1060
assert not isinstance(parent_id, unicode)
1064
def get_revision_xml(self, revision_id):
1065
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1066
# would have already do it.
1067
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1068
rev = self.get_revision(revision_id)
1069
rev_tmp = StringIO()
1070
# the current serializer..
1071
self._revision_store._serializer.write_revision(rev, rev_tmp)
1073
return rev_tmp.getvalue()
1076
def get_deltas_for_revisions(self, revisions):
1077
"""Produce a generator of revision deltas.
1079
Note that the input is a sequence of REVISIONS, not revision_ids.
1080
Trees will be held in memory until the generator exits.
1081
Each delta is relative to the revision's lefthand predecessor.
1083
required_trees = set()
1084
for revision in revisions:
1085
required_trees.add(revision.revision_id)
1086
required_trees.update(revision.parent_ids[:1])
1087
trees = dict((t.get_revision_id(), t) for
1088
t in self.revision_trees(required_trees))
1089
for revision in revisions:
1090
if not revision.parent_ids:
1091
old_tree = self.revision_tree(None)
1093
old_tree = trees[revision.parent_ids[0]]
1094
yield trees[revision.revision_id].changes_from(old_tree)
1097
def get_revision_delta(self, revision_id):
1098
"""Return the delta for one revision.
1100
The delta is relative to the left-hand predecessor of the
1103
r = self.get_revision(revision_id)
1104
return list(self.get_deltas_for_revisions([r]))[0]
1107
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1108
signature = gpg_strategy.sign(plaintext)
1109
self.add_signature_text(revision_id, signature)
1112
def add_signature_text(self, revision_id, signature):
1113
self._revision_store.add_revision_signature_text(revision_id,
1115
self.get_transaction())
1117
def find_text_key_references(self):
1118
"""Find the text key references within the repository.
1120
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
1121
revision_ids. Each altered file-ids has the exact revision_ids that
1122
altered it listed explicitly.
1123
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1124
to whether they were referred to by the inventory of the
1125
revision_id that they contain. The inventory texts from all present
1126
revision ids are assessed to generate this report.
1128
revision_ids = self.all_revision_ids()
1129
w = self.get_inventory_weave()
1130
pb = ui.ui_factory.nested_progress_bar()
1132
return self._find_text_key_references_from_xml_inventory_lines(
1133
w.iter_lines_added_or_present_in_versions(revision_ids, pb=pb))
1137
def _find_text_key_references_from_xml_inventory_lines(self,
1139
"""Core routine for extracting references to texts from inventories.
1141
This performs the translation of xml lines to revision ids.
1143
:param line_iterator: An iterator of lines, origin_version_id
1144
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1145
to whether they were referred to by the inventory of the
1146
revision_id that they contain. Note that if that revision_id was
1147
not part of the line_iterator's output then False will be given -
1148
even though it may actually refer to that key.
1150
if not self._serializer.support_altered_by_hack:
1151
raise AssertionError(
1152
"_find_text_key_references_from_xml_inventory_lines only "
1153
"supported for branches which store inventory as unnested xml"
1154
", not on %r" % self)
1157
# this code needs to read every new line in every inventory for the
1158
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1159
# not present in one of those inventories is unnecessary but not
1160
# harmful because we are filtering by the revision id marker in the
1161
# inventory lines : we only select file ids altered in one of those
1162
# revisions. We don't need to see all lines in the inventory because
1163
# only those added in an inventory in rev X can contain a revision=X
1165
unescape_revid_cache = {}
1166
unescape_fileid_cache = {}
1168
# jam 20061218 In a big fetch, this handles hundreds of thousands
1169
# of lines, so it has had a lot of inlining and optimizing done.
1170
# Sorry that it is a little bit messy.
1171
# Move several functions to be local variables, since this is a long
1173
search = self._file_ids_altered_regex.search
1174
unescape = _unescape_xml
1175
setdefault = result.setdefault
1176
for line, version_id in line_iterator:
1177
match = search(line)
1180
# One call to match.group() returning multiple items is quite a
1181
# bit faster than 2 calls to match.group() each returning 1
1182
file_id, revision_id = match.group('file_id', 'revision_id')
1184
# Inlining the cache lookups helps a lot when you make 170,000
1185
# lines and 350k ids, versus 8.4 unique ids.
1186
# Using a cache helps in 2 ways:
1187
# 1) Avoids unnecessary decoding calls
1188
# 2) Re-uses cached strings, which helps in future set and
1190
# (2) is enough that removing encoding entirely along with
1191
# the cache (so we are using plain strings) results in no
1192
# performance improvement.
1194
revision_id = unescape_revid_cache[revision_id]
1196
unescaped = unescape(revision_id)
1197
unescape_revid_cache[revision_id] = unescaped
1198
revision_id = unescaped
1200
# Note that unconditionally unescaping means that we deserialise
1201
# every fileid, which for general 'pull' is not great, but we don't
1202
# really want to have some many fulltexts that this matters anyway.
1205
file_id = unescape_fileid_cache[file_id]
1207
unescaped = unescape(file_id)
1208
unescape_fileid_cache[file_id] = unescaped
1211
key = (file_id, revision_id)
1212
setdefault(key, False)
1213
if revision_id == version_id:
1217
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1219
"""Helper routine for fileids_altered_by_revision_ids.
1221
This performs the translation of xml lines to revision ids.
1223
:param line_iterator: An iterator of lines, origin_version_id
1224
:param revision_ids: The revision ids to filter for. This should be a
1225
set or other type which supports efficient __contains__ lookups, as
1226
the revision id from each parsed line will be looked up in the
1227
revision_ids filter.
1228
:return: a dictionary mapping altered file-ids to an iterable of
1229
revision_ids. Each altered file-ids has the exact revision_ids that
1230
altered it listed explicitly.
1233
setdefault = result.setdefault
1234
for file_id, revision_id in \
1235
self._find_text_key_references_from_xml_inventory_lines(
1236
line_iterator).iterkeys():
1237
# once data is all ensured-consistent; then this is
1238
# if revision_id == version_id
1239
if revision_id in revision_ids:
1240
setdefault(file_id, set()).add(revision_id)
1243
def fileids_altered_by_revision_ids(self, revision_ids):
1244
"""Find the file ids and versions affected by revisions.
1246
:param revisions: an iterable containing revision ids.
1247
:return: a dictionary mapping altered file-ids to an iterable of
1248
revision_ids. Each altered file-ids has the exact revision_ids that
1249
altered it listed explicitly.
1251
selected_revision_ids = set(revision_ids)
1252
w = self.get_inventory_weave()
1253
pb = ui.ui_factory.nested_progress_bar()
1255
return self._find_file_ids_from_xml_inventory_lines(
1256
w.iter_lines_added_or_present_in_versions(
1257
selected_revision_ids, pb=pb),
1258
selected_revision_ids)
1262
def iter_files_bytes(self, desired_files):
1263
"""Iterate through file versions.
1265
Files will not necessarily be returned in the order they occur in
1266
desired_files. No specific order is guaranteed.
1268
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1269
value supplied by the caller as part of desired_files. It should
1270
uniquely identify the file version in the caller's context. (Examples:
1271
an index number or a TreeTransform trans_id.)
1273
bytes_iterator is an iterable of bytestrings for the file. The
1274
kind of iterable and length of the bytestrings are unspecified, but for
1275
this implementation, it is a list of lines produced by
1276
VersionedFile.get_lines().
1278
:param desired_files: a list of (file_id, revision_id, identifier)
1281
transaction = self.get_transaction()
1282
for file_id, revision_id, callable_data in desired_files:
1284
weave = self.weave_store.get_weave(file_id, transaction)
1285
except errors.NoSuchFile:
1286
raise errors.NoSuchIdInRepository(self, file_id)
1287
yield callable_data, weave.get_lines(revision_id)
1289
def _generate_text_key_index(self, text_key_references=None,
1291
"""Generate a new text key index for the repository.
1293
This is an expensive function that will take considerable time to run.
1295
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1296
list of parents, also text keys. When a given key has no parents,
1297
the parents list will be [NULL_REVISION].
1299
# All revisions, to find inventory parents.
1300
if ancestors is None:
1301
revision_graph = self.get_revision_graph_with_ghosts()
1302
ancestors = revision_graph.get_ancestors()
1303
if text_key_references is None:
1304
text_key_references = self.find_text_key_references()
1305
pb = ui.ui_factory.nested_progress_bar()
1307
return self._do_generate_text_key_index(ancestors,
1308
text_key_references, pb)
1312
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1313
"""Helper for _generate_text_key_index to avoid deep nesting."""
1314
revision_order = tsort.topo_sort(ancestors)
1315
invalid_keys = set()
1317
for revision_id in revision_order:
1318
revision_keys[revision_id] = set()
1319
text_count = len(text_key_references)
1320
# a cache of the text keys to allow reuse; costs a dict of all the
1321
# keys, but saves a 2-tuple for every child of a given key.
1323
for text_key, valid in text_key_references.iteritems():
1325
invalid_keys.add(text_key)
1327
revision_keys[text_key[1]].add(text_key)
1328
text_key_cache[text_key] = text_key
1329
del text_key_references
1331
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1332
NULL_REVISION = _mod_revision.NULL_REVISION
1333
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1334
# too small for large or very branchy trees. However, for 55K path
1335
# trees, it would be easy to use too much memory trivially. Ideally we
1336
# could gauge this by looking at available real memory etc, but this is
1337
# always a tricky proposition.
1338
inventory_cache = lru_cache.LRUCache(10)
1339
batch_size = 10 # should be ~150MB on a 55K path tree
1340
batch_count = len(revision_order) / batch_size + 1
1342
pb.update("Calculating text parents.", processed_texts, text_count)
1343
for offset in xrange(batch_count):
1344
to_query = revision_order[offset * batch_size:(offset + 1) *
1348
for rev_tree in self.revision_trees(to_query):
1349
revision_id = rev_tree.get_revision_id()
1350
parent_ids = ancestors[revision_id]
1351
for text_key in revision_keys[revision_id]:
1352
pb.update("Calculating text parents.", processed_texts)
1353
processed_texts += 1
1354
candidate_parents = []
1355
for parent_id in parent_ids:
1356
parent_text_key = (text_key[0], parent_id)
1358
check_parent = parent_text_key not in \
1359
revision_keys[parent_id]
1361
# the parent parent_id is a ghost:
1362
check_parent = False
1363
# truncate the derived graph against this ghost.
1364
parent_text_key = None
1366
# look at the parent commit details inventories to
1367
# determine possible candidates in the per file graph.
1370
inv = inventory_cache[parent_id]
1372
inv = self.revision_tree(parent_id).inventory
1373
inventory_cache[parent_id] = inv
1374
parent_entry = inv._byid.get(text_key[0], None)
1375
if parent_entry is not None:
1377
text_key[0], parent_entry.revision)
1379
parent_text_key = None
1380
if parent_text_key is not None:
1381
candidate_parents.append(
1382
text_key_cache[parent_text_key])
1383
parent_heads = text_graph.heads(candidate_parents)
1384
new_parents = list(parent_heads)
1385
new_parents.sort(key=lambda x:candidate_parents.index(x))
1386
if new_parents == []:
1387
new_parents = [NULL_REVISION]
1388
text_index[text_key] = new_parents
1390
for text_key in invalid_keys:
1391
text_index[text_key] = [NULL_REVISION]
1394
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1395
"""Get an iterable listing the keys of all the data introduced by a set
1398
The keys will be ordered so that the corresponding items can be safely
1399
fetched and inserted in that order.
1401
:returns: An iterable producing tuples of (knit-kind, file-id,
1402
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1403
'revisions'. file-id is None unless knit-kind is 'file'.
1405
# XXX: it's a bit weird to control the inventory weave caching in this
1406
# generator. Ideally the caching would be done in fetch.py I think. Or
1407
# maybe this generator should explicitly have the contract that it
1408
# should not be iterated until the previously yielded item has been
1411
inv_w = self.get_inventory_weave()
1412
inv_w.enable_cache()
1414
# file ids that changed
1415
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
1417
num_file_ids = len(file_ids)
1418
for file_id, altered_versions in file_ids.iteritems():
1419
if _files_pb is not None:
1420
_files_pb.update("fetch texts", count, num_file_ids)
1422
yield ("file", file_id, altered_versions)
1423
# We're done with the files_pb. Note that it finished by the caller,
1424
# just as it was created by the caller.
1428
yield ("inventory", None, revision_ids)
1432
revisions_with_signatures = set()
1433
for rev_id in revision_ids:
1435
self.get_signature_text(rev_id)
1436
except errors.NoSuchRevision:
1440
revisions_with_signatures.add(rev_id)
1442
yield ("signatures", None, revisions_with_signatures)
1445
yield ("revisions", None, revision_ids)
1448
def get_inventory_weave(self):
1449
return self.control_weaves.get_weave('inventory',
1450
self.get_transaction())
1453
def get_inventory(self, revision_id):
1454
"""Get Inventory object by revision id."""
1455
return self.iter_inventories([revision_id]).next()
1457
def iter_inventories(self, revision_ids):
1458
"""Get many inventories by revision_ids.
1460
This will buffer some or all of the texts used in constructing the
1461
inventories in memory, but will only parse a single inventory at a
1464
:return: An iterator of inventories.
1466
assert None not in revision_ids
1467
assert _mod_revision.NULL_REVISION not in revision_ids
1468
return self._iter_inventories(revision_ids)
1470
def _iter_inventories(self, revision_ids):
1471
"""single-document based inventory iteration."""
1472
texts = self.get_inventory_weave().get_texts(revision_ids)
1473
for text, revision_id in zip(texts, revision_ids):
1474
yield self.deserialise_inventory(revision_id, text)
1476
def deserialise_inventory(self, revision_id, xml):
1477
"""Transform the xml into an inventory object.
1479
:param revision_id: The expected revision id of the inventory.
1480
:param xml: A serialised inventory.
1482
result = self._serializer.read_inventory_from_string(xml, revision_id)
1483
if result.revision_id != revision_id:
1484
raise AssertionError('revision id mismatch %s != %s' % (
1485
result.revision_id, revision_id))
1488
def serialise_inventory(self, inv):
1489
return self._serializer.write_inventory_to_string(inv)
1491
def _serialise_inventory_to_lines(self, inv):
1492
return self._serializer.write_inventory_to_lines(inv)
1494
def get_serializer_format(self):
1495
return self._serializer.format_num
1498
def get_inventory_xml(self, revision_id):
1499
"""Get inventory XML as a file object."""
1501
assert isinstance(revision_id, str), type(revision_id)
1502
iw = self.get_inventory_weave()
1503
return iw.get_text(revision_id)
1505
raise errors.HistoryMissing(self, 'inventory', revision_id)
1508
def get_inventory_sha1(self, revision_id):
1509
"""Return the sha1 hash of the inventory entry
1511
return self.get_revision(revision_id).inventory_sha1
1514
def get_revision_graph(self, revision_id=None):
1515
"""Return a dictionary containing the revision graph.
1517
NB: This method should not be used as it accesses the entire graph all
1518
at once, which is much more data than most operations should require.
1520
:param revision_id: The revision_id to get a graph from. If None, then
1521
the entire revision graph is returned. This is a deprecated mode of
1522
operation and will be removed in the future.
1523
:return: a dictionary of revision_id->revision_parents_list.
1525
raise NotImplementedError(self.get_revision_graph)
1528
def get_revision_graph_with_ghosts(self, revision_ids=None):
1529
"""Return a graph of the revisions with ghosts marked as applicable.
1531
:param revision_ids: an iterable of revisions to graph or None for all.
1532
:return: a Graph object with the graph reachable from revision_ids.
1534
if 'evil' in debug.debug_flags:
1536
"get_revision_graph_with_ghosts scales with size of history.")
1537
result = deprecated_graph.Graph()
1538
if not revision_ids:
1539
pending = set(self.all_revision_ids())
1542
pending = set(revision_ids)
1543
# special case NULL_REVISION
1544
if _mod_revision.NULL_REVISION in pending:
1545
pending.remove(_mod_revision.NULL_REVISION)
1546
required = set(pending)
1549
revision_id = pending.pop()
1551
rev = self.get_revision(revision_id)
1552
except errors.NoSuchRevision:
1553
if revision_id in required:
1556
result.add_ghost(revision_id)
1558
for parent_id in rev.parent_ids:
1559
# is this queued or done ?
1560
if (parent_id not in pending and
1561
parent_id not in done):
1563
pending.add(parent_id)
1564
result.add_node(revision_id, rev.parent_ids)
1565
done.add(revision_id)
1568
def _get_history_vf(self):
1569
"""Get a versionedfile whose history graph reflects all revisions.
1571
For weave repositories, this is the inventory weave.
1573
return self.get_inventory_weave()
1575
def iter_reverse_revision_history(self, revision_id):
1576
"""Iterate backwards through revision ids in the lefthand history
1578
:param revision_id: The revision id to start with. All its lefthand
1579
ancestors will be traversed.
1581
if revision_id in (None, _mod_revision.NULL_REVISION):
1583
next_id = revision_id
1584
versionedfile = self._get_history_vf()
1587
parents = versionedfile.get_parents(next_id)
1588
if len(parents) == 0:
1591
next_id = parents[0]
1594
def get_revision_inventory(self, revision_id):
1595
"""Return inventory of a past revision."""
1596
# TODO: Unify this with get_inventory()
1597
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1598
# must be the same as its revision, so this is trivial.
1599
if revision_id is None:
1600
# This does not make sense: if there is no revision,
1601
# then it is the current tree inventory surely ?!
1602
# and thus get_root_id() is something that looks at the last
1603
# commit on the branch, and the get_root_id is an inventory check.
1604
raise NotImplementedError
1605
# return Inventory(self.get_root_id())
1607
return self.get_inventory(revision_id)
1610
def is_shared(self):
1611
"""Return True if this repository is flagged as a shared repository."""
1612
raise NotImplementedError(self.is_shared)
1615
def reconcile(self, other=None, thorough=False):
1616
"""Reconcile this repository."""
1617
from bzrlib.reconcile import RepoReconciler
1618
reconciler = RepoReconciler(self, thorough=thorough)
1619
reconciler.reconcile()
1622
def _refresh_data(self):
1623
"""Helper called from lock_* to ensure coherency with disk.
1625
The default implementation does nothing; it is however possible
1626
for repositories to maintain loaded indices across multiple locks
1627
by checking inside their implementation of this method to see
1628
whether their indices are still valid. This depends of course on
1629
the disk format being validatable in this manner.
1633
def revision_tree(self, revision_id):
1634
"""Return Tree for a revision on this branch.
1636
`revision_id` may be None for the empty tree revision.
1638
# TODO: refactor this to use an existing revision object
1639
# so we don't need to read it in twice.
1640
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1641
return RevisionTree(self, Inventory(root_id=None),
1642
_mod_revision.NULL_REVISION)
1644
inv = self.get_revision_inventory(revision_id)
1645
return RevisionTree(self, inv, revision_id)
1648
def revision_trees(self, revision_ids):
1649
"""Return Tree for a revision on this branch.
1651
`revision_id` may not be None or 'null:'"""
1652
inventories = self.iter_inventories(revision_ids)
1653
for inv in inventories:
1654
yield RevisionTree(self, inv, inv.revision_id)
1657
def get_ancestry(self, revision_id, topo_sorted=True):
1658
"""Return a list of revision-ids integrated by a revision.
1660
The first element of the list is always None, indicating the origin
1661
revision. This might change when we have history horizons, or
1662
perhaps we should have a new API.
1664
This is topologically sorted.
1666
if _mod_revision.is_null(revision_id):
1668
if not self.has_revision(revision_id):
1669
raise errors.NoSuchRevision(self, revision_id)
1670
w = self.get_inventory_weave()
1671
candidates = w.get_ancestry(revision_id, topo_sorted)
1672
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
1675
"""Compress the data within the repository.
1677
This operation only makes sense for some repository types. For other
1678
types it should be a no-op that just returns.
1680
This stub method does not require a lock, but subclasses should use
1681
@needs_write_lock as this is a long running call its reasonable to
1682
implicitly lock for the user.
1686
def print_file(self, file, revision_id):
1687
"""Print `file` to stdout.
1689
FIXME RBC 20060125 as John Meinel points out this is a bad api
1690
- it writes to stdout, it assumes that that is valid etc. Fix
1691
by creating a new more flexible convenience function.
1693
tree = self.revision_tree(revision_id)
1694
# use inventory as it was in that revision
1695
file_id = tree.inventory.path2id(file)
1697
# TODO: jam 20060427 Write a test for this code path
1698
# it had a bug in it, and was raising the wrong
1700
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1701
tree.print_file(file_id)
1703
def get_transaction(self):
1704
return self.control_files.get_transaction()
1706
def revision_parents(self, revision_id):
1707
return self.get_inventory_weave().parent_names(revision_id)
1709
@deprecated_method(symbol_versioning.one_one)
1710
def get_parents(self, revision_ids):
1711
"""See StackedParentsProvider.get_parents"""
1712
parent_map = self.get_parent_map(revision_ids)
1713
return [parent_map.get(r, None) for r in revision_ids]
1715
def get_parent_map(self, keys):
1716
"""See graph._StackedParentsProvider.get_parent_map"""
1718
for revision_id in keys:
1719
if revision_id == _mod_revision.NULL_REVISION:
1720
parent_map[revision_id] = ()
1723
parent_id_list = self.get_revision(revision_id).parent_ids
1724
except errors.NoSuchRevision:
1727
if len(parent_id_list) == 0:
1728
parent_ids = (_mod_revision.NULL_REVISION,)
1730
parent_ids = tuple(parent_id_list)
1731
parent_map[revision_id] = parent_ids
1734
def _make_parents_provider(self):
1737
def get_graph(self, other_repository=None):
1738
"""Return the graph walker for this repository format"""
1739
parents_provider = self._make_parents_provider()
1740
if (other_repository is not None and
1741
other_repository.bzrdir.transport.base !=
1742
self.bzrdir.transport.base):
1743
parents_provider = graph._StackedParentsProvider(
1744
[parents_provider, other_repository._make_parents_provider()])
1745
return graph.Graph(parents_provider)
1747
def _get_versioned_file_checker(self):
1748
"""Return an object suitable for checking versioned files."""
1749
return _VersionedFileChecker(self)
1752
def set_make_working_trees(self, new_value):
1753
"""Set the policy flag for making working trees when creating branches.
1755
This only applies to branches that use this repository.
1757
The default is 'True'.
1758
:param new_value: True to restore the default, False to disable making
1761
raise NotImplementedError(self.set_make_working_trees)
1763
def make_working_trees(self):
1764
"""Returns the policy for making working trees on new branches."""
1765
raise NotImplementedError(self.make_working_trees)
1768
def sign_revision(self, revision_id, gpg_strategy):
1769
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1770
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1773
def has_signature_for_revision_id(self, revision_id):
1774
"""Query for a revision signature for revision_id in the repository."""
1775
return self._revision_store.has_signature(revision_id,
1776
self.get_transaction())
1779
def get_signature_text(self, revision_id):
1780
"""Return the text for a signature."""
1781
return self._revision_store.get_signature_text(revision_id,
1782
self.get_transaction())
1785
def check(self, revision_ids=None):
1786
"""Check consistency of all history of given revision_ids.
1788
Different repository implementations should override _check().
1790
:param revision_ids: A non-empty list of revision_ids whose ancestry
1791
will be checked. Typically the last revision_id of a branch.
1793
return self._check(revision_ids)
1795
def _check(self, revision_ids):
1796
result = check.Check(self)
1800
def _warn_if_deprecated(self):
1801
global _deprecation_warning_done
1802
if _deprecation_warning_done:
1804
_deprecation_warning_done = True
1805
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1806
% (self._format, self.bzrdir.transport.base))
1808
def supports_rich_root(self):
1809
return self._format.rich_root_data
1811
def _check_ascii_revisionid(self, revision_id, method):
1812
"""Private helper for ascii-only repositories."""
1813
# weave repositories refuse to store revisionids that are non-ascii.
1814
if revision_id is not None:
1815
# weaves require ascii revision ids.
1816
if isinstance(revision_id, unicode):
1818
revision_id.encode('ascii')
1819
except UnicodeEncodeError:
1820
raise errors.NonAsciiRevisionId(method, self)
1823
revision_id.decode('ascii')
1824
except UnicodeDecodeError:
1825
raise errors.NonAsciiRevisionId(method, self)
1827
def revision_graph_can_have_wrong_parents(self):
1828
"""Is it possible for this repository to have a revision graph with
1831
If True, then this repository must also implement
1832
_find_inconsistent_revision_parents so that check and reconcile can
1833
check for inconsistencies before proceeding with other checks that may
1834
depend on the revision index being consistent.
1836
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1838
# remove these delegates a while after bzr 0.15
1839
def __make_delegated(name, from_module):
1840
def _deprecated_repository_forwarder():
1841
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1842
% (name, from_module),
1845
m = __import__(from_module, globals(), locals(), [name])
1847
return getattr(m, name)
1848
except AttributeError:
1849
raise AttributeError('module %s has no name %s'
1851
globals()[name] = _deprecated_repository_forwarder
1854
'AllInOneRepository',
1855
'WeaveMetaDirRepository',
1856
'PreSplitOutRepositoryFormat',
1857
'RepositoryFormat4',
1858
'RepositoryFormat5',
1859
'RepositoryFormat6',
1860
'RepositoryFormat7',
1862
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1866
'RepositoryFormatKnit',
1867
'RepositoryFormatKnit1',
1869
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1872
def install_revision(repository, rev, revision_tree):
1873
"""Install all revision data into a repository."""
1874
install_revisions(repository, [(rev, revision_tree, None)])
1877
def install_revisions(repository, iterable, num_revisions=None, pb=None):
1878
"""Install all revision data into a repository.
1880
Accepts an iterable of revision, tree, signature tuples. The signature
1883
repository.start_write_group()
1885
for n, (revision, revision_tree, signature) in enumerate(iterable):
1886
_install_revision(repository, revision, revision_tree, signature)
1888
pb.update('Transferring revisions', n + 1, num_revisions)
1890
repository.abort_write_group()
1893
repository.commit_write_group()
1896
def _install_revision(repository, rev, revision_tree, signature):
1897
"""Install all revision data into a repository."""
1898
present_parents = []
1900
for p_id in rev.parent_ids:
1901
if repository.has_revision(p_id):
1902
present_parents.append(p_id)
1903
parent_trees[p_id] = repository.revision_tree(p_id)
1905
parent_trees[p_id] = repository.revision_tree(None)
1907
inv = revision_tree.inventory
1908
entries = inv.iter_entries()
1909
# backwards compatibility hack: skip the root id.
1910
if not repository.supports_rich_root():
1911
path, root = entries.next()
1912
if root.revision != rev.revision_id:
1913
raise errors.IncompatibleRevision(repr(repository))
1914
# Add the texts that are not already present
1915
for path, ie in entries:
1916
w = repository.weave_store.get_weave_or_empty(ie.file_id,
1917
repository.get_transaction())
1918
if ie.revision not in w:
1920
# FIXME: TODO: The following loop *may* be overlapping/duplicate
1921
# with InventoryEntry.find_previous_heads(). if it is, then there
1922
# is a latent bug here where the parents may have ancestors of each
1924
for revision, tree in parent_trees.iteritems():
1925
if ie.file_id not in tree:
1927
parent_id = tree.inventory[ie.file_id].revision
1928
if parent_id in text_parents:
1930
text_parents.append(parent_id)
1932
vfile = repository.weave_store.get_weave_or_empty(ie.file_id,
1933
repository.get_transaction())
1934
lines = revision_tree.get_file(ie.file_id).readlines()
1935
vfile.add_lines(rev.revision_id, text_parents, lines)
1937
# install the inventory
1938
repository.add_inventory(rev.revision_id, inv, present_parents)
1939
except errors.RevisionAlreadyPresent:
1941
if signature is not None:
1942
repository.add_signature_text(rev.revision_id, signature)
1943
repository.add_revision(rev.revision_id, rev, inv)
1946
class MetaDirRepository(Repository):
1947
"""Repositories in the new meta-dir layout."""
1949
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1950
super(MetaDirRepository, self).__init__(_format,
1956
dir_mode = self.control_files._dir_mode
1957
file_mode = self.control_files._file_mode
1960
def is_shared(self):
1961
"""Return True if this repository is flagged as a shared repository."""
1962
return self.control_files._transport.has('shared-storage')
1965
def set_make_working_trees(self, new_value):
1966
"""Set the policy flag for making working trees when creating branches.
1968
This only applies to branches that use this repository.
1970
The default is 'True'.
1971
:param new_value: True to restore the default, False to disable making
1976
self.control_files._transport.delete('no-working-trees')
1977
except errors.NoSuchFile:
1980
self.control_files.put_utf8('no-working-trees', '')
1982
def make_working_trees(self):
1983
"""Returns the policy for making working trees on new branches."""
1984
return not self.control_files._transport.has('no-working-trees')
1987
class RepositoryFormatRegistry(registry.Registry):
1988
"""Registry of RepositoryFormats."""
1990
def get(self, format_string):
1991
r = registry.Registry.get(self, format_string)
1997
format_registry = RepositoryFormatRegistry()
1998
"""Registry of formats, indexed by their identifying format string.
2000
This can contain either format instances themselves, or classes/factories that
2001
can be called to obtain one.
2005
#####################################################################
2006
# Repository Formats
2008
class RepositoryFormat(object):
2009
"""A repository format.
2011
Formats provide three things:
2012
* An initialization routine to construct repository data on disk.
2013
* a format string which is used when the BzrDir supports versioned
2015
* an open routine which returns a Repository instance.
2017
There is one and only one Format subclass for each on-disk format. But
2018
there can be one Repository subclass that is used for several different
2019
formats. The _format attribute on a Repository instance can be used to
2020
determine the disk format.
2022
Formats are placed in an dict by their format string for reference
2023
during opening. These should be subclasses of RepositoryFormat
2026
Once a format is deprecated, just deprecate the initialize and open
2027
methods on the format class. Do not deprecate the object, as the
2028
object will be created every system load.
2030
Common instance attributes:
2031
_matchingbzrdir - the bzrdir format that the repository format was
2032
originally written to work with. This can be used if manually
2033
constructing a bzrdir and repository, or more commonly for test suite
2037
# Set to True or False in derived classes. True indicates that the format
2038
# supports ghosts gracefully.
2039
supports_ghosts = None
2042
return "<%s>" % self.__class__.__name__
2044
def __eq__(self, other):
2045
# format objects are generally stateless
2046
return isinstance(other, self.__class__)
2048
def __ne__(self, other):
2049
return not self == other
2052
def find_format(klass, a_bzrdir):
2053
"""Return the format for the repository object in a_bzrdir.
2055
This is used by bzr native formats that have a "format" file in
2056
the repository. Other methods may be used by different types of
2060
transport = a_bzrdir.get_repository_transport(None)
2061
format_string = transport.get("format").read()
2062
return format_registry.get(format_string)
2063
except errors.NoSuchFile:
2064
raise errors.NoRepositoryPresent(a_bzrdir)
2066
raise errors.UnknownFormatError(format=format_string)
2069
def register_format(klass, format):
2070
format_registry.register(format.get_format_string(), format)
2073
def unregister_format(klass, format):
2074
format_registry.remove(format.get_format_string())
2077
def get_default_format(klass):
2078
"""Return the current default format."""
2079
from bzrlib import bzrdir
2080
return bzrdir.format_registry.make_bzrdir('default').repository_format
2082
def _get_control_store(self, repo_transport, control_files):
2083
"""Return the control store for this repository."""
2084
raise NotImplementedError(self._get_control_store)
2086
def get_format_string(self):
2087
"""Return the ASCII format string that identifies this format.
2089
Note that in pre format ?? repositories the format string is
2090
not permitted nor written to disk.
2092
raise NotImplementedError(self.get_format_string)
2094
def get_format_description(self):
2095
"""Return the short description for this format."""
2096
raise NotImplementedError(self.get_format_description)
2098
def _get_revision_store(self, repo_transport, control_files):
2099
"""Return the revision store object for this a_bzrdir."""
2100
raise NotImplementedError(self._get_revision_store)
2102
def _get_text_rev_store(self,
2109
"""Common logic for getting a revision store for a repository.
2111
see self._get_revision_store for the subclass-overridable method to
2112
get the store for a repository.
2114
from bzrlib.store.revision.text import TextRevisionStore
2115
dir_mode = control_files._dir_mode
2116
file_mode = control_files._file_mode
2117
text_store = TextStore(transport.clone(name),
2119
compressed=compressed,
2121
file_mode=file_mode)
2122
_revision_store = TextRevisionStore(text_store, serializer)
2123
return _revision_store
2125
# TODO: this shouldn't be in the base class, it's specific to things that
2126
# use weaves or knits -- mbp 20070207
2127
def _get_versioned_file_store(self,
2132
versionedfile_class=None,
2133
versionedfile_kwargs={},
2135
if versionedfile_class is None:
2136
versionedfile_class = self._versionedfile_class
2137
weave_transport = control_files._transport.clone(name)
2138
dir_mode = control_files._dir_mode
2139
file_mode = control_files._file_mode
2140
return VersionedFileStore(weave_transport, prefixed=prefixed,
2142
file_mode=file_mode,
2143
versionedfile_class=versionedfile_class,
2144
versionedfile_kwargs=versionedfile_kwargs,
2147
def initialize(self, a_bzrdir, shared=False):
2148
"""Initialize a repository of this format in a_bzrdir.
2150
:param a_bzrdir: The bzrdir to put the new repository in it.
2151
:param shared: The repository should be initialized as a sharable one.
2152
:returns: The new repository object.
2154
This may raise UninitializableFormat if shared repository are not
2155
compatible the a_bzrdir.
2157
raise NotImplementedError(self.initialize)
2159
def is_supported(self):
2160
"""Is this format supported?
2162
Supported formats must be initializable and openable.
2163
Unsupported formats may not support initialization or committing or
2164
some other features depending on the reason for not being supported.
2168
def check_conversion_target(self, target_format):
2169
raise NotImplementedError(self.check_conversion_target)
2171
def open(self, a_bzrdir, _found=False):
2172
"""Return an instance of this format for the bzrdir a_bzrdir.
2174
_found is a private parameter, do not use it.
2176
raise NotImplementedError(self.open)
2179
class MetaDirRepositoryFormat(RepositoryFormat):
2180
"""Common base class for the new repositories using the metadir layout."""
2182
rich_root_data = False
2183
supports_tree_reference = False
2184
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2187
super(MetaDirRepositoryFormat, self).__init__()
2189
def _create_control_files(self, a_bzrdir):
2190
"""Create the required files and the initial control_files object."""
2191
# FIXME: RBC 20060125 don't peek under the covers
2192
# NB: no need to escape relative paths that are url safe.
2193
repository_transport = a_bzrdir.get_repository_transport(self)
2194
control_files = lockable_files.LockableFiles(repository_transport,
2195
'lock', lockdir.LockDir)
2196
control_files.create_lock()
2197
return control_files
2199
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2200
"""Upload the initial blank content."""
2201
control_files = self._create_control_files(a_bzrdir)
2202
control_files.lock_write()
2204
control_files._transport.mkdir_multi(dirs,
2205
mode=control_files._dir_mode)
2206
for file, content in files:
2207
control_files.put(file, content)
2208
for file, content in utf8_files:
2209
control_files.put_utf8(file, content)
2211
control_files.put_utf8('shared-storage', '')
2213
control_files.unlock()
2216
# formats which have no format string are not discoverable
2217
# and not independently creatable, so are not registered. They're
2218
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2219
# needed, it's constructed directly by the BzrDir. Non-native formats where
2220
# the repository is not separately opened are similar.
2222
format_registry.register_lazy(
2223
'Bazaar-NG Repository format 7',
2224
'bzrlib.repofmt.weaverepo',
2228
format_registry.register_lazy(
2229
'Bazaar-NG Knit Repository Format 1',
2230
'bzrlib.repofmt.knitrepo',
2231
'RepositoryFormatKnit1',
2234
format_registry.register_lazy(
2235
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2236
'bzrlib.repofmt.knitrepo',
2237
'RepositoryFormatKnit3',
2240
format_registry.register_lazy(
2241
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2242
'bzrlib.repofmt.knitrepo',
2243
'RepositoryFormatKnit4',
2246
# Pack-based formats. There is one format for pre-subtrees, and one for
2247
# post-subtrees to allow ease of testing.
2248
# NOTE: These are experimental in 0.92.
2249
format_registry.register_lazy(
2250
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2251
'bzrlib.repofmt.pack_repo',
2252
'RepositoryFormatKnitPack1',
2254
format_registry.register_lazy(
2255
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2256
'bzrlib.repofmt.pack_repo',
2257
'RepositoryFormatKnitPack3',
2259
format_registry.register_lazy(
2260
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2261
'bzrlib.repofmt.pack_repo',
2262
'RepositoryFormatKnitPack4',
2266
class InterRepository(InterObject):
2267
"""This class represents operations taking place between two repositories.
2269
Its instances have methods like copy_content and fetch, and contain
2270
references to the source and target repositories these operations can be
2273
Often we will provide convenience methods on 'repository' which carry out
2274
operations with another repository - they will always forward to
2275
InterRepository.get(other).method_name(parameters).
2279
"""The available optimised InterRepository types."""
2281
def copy_content(self, revision_id=None):
2282
raise NotImplementedError(self.copy_content)
2284
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2285
"""Fetch the content required to construct revision_id.
2287
The content is copied from self.source to self.target.
2289
:param revision_id: if None all content is copied, if NULL_REVISION no
2291
:param pb: optional progress bar to use for progress reports. If not
2292
provided a default one will be created.
2294
Returns the copied revision count and the failed revisions in a tuple:
2297
raise NotImplementedError(self.fetch)
2300
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2301
"""Return the revision ids that source has that target does not.
2303
These are returned in topological order.
2305
:param revision_id: only return revision ids included by this
2308
# generic, possibly worst case, slow code path.
2309
target_ids = set(self.target.all_revision_ids())
2310
if revision_id is not None:
2311
source_ids = self.source.get_ancestry(revision_id)
2312
assert source_ids[0] is None
2315
source_ids = self.source.all_revision_ids()
2316
result_set = set(source_ids).difference(target_ids)
2317
# this may look like a no-op: its not. It preserves the ordering
2318
# other_ids had while only returning the members from other_ids
2319
# that we've decided we need.
2320
return [rev_id for rev_id in source_ids if rev_id in result_set]
2323
def _same_model(source, target):
2324
"""True if source and target have the same data representation."""
2325
if source.supports_rich_root() != target.supports_rich_root():
2327
if source._serializer != target._serializer:
2332
class InterSameDataRepository(InterRepository):
2333
"""Code for converting between repositories that represent the same data.
2335
Data format and model must match for this to work.
2339
def _get_repo_format_to_test(self):
2340
"""Repository format for testing with.
2342
InterSameData can pull from subtree to subtree and from non-subtree to
2343
non-subtree, so we test this with the richest repository format.
2345
from bzrlib.repofmt import knitrepo
2346
return knitrepo.RepositoryFormatKnit3()
2349
def is_compatible(source, target):
2350
return InterRepository._same_model(source, target)
2353
def copy_content(self, revision_id=None):
2354
"""Make a complete copy of the content in self into destination.
2356
This copies both the repository's revision data, and configuration information
2357
such as the make_working_trees setting.
2359
This is a destructive operation! Do not use it on existing
2362
:param revision_id: Only copy the content needed to construct
2363
revision_id and its parents.
2366
self.target.set_make_working_trees(self.source.make_working_trees())
2367
except NotImplementedError:
2369
# but don't bother fetching if we have the needed data now.
2370
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2371
self.target.has_revision(revision_id)):
2373
self.target.fetch(self.source, revision_id=revision_id)
2376
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2377
"""See InterRepository.fetch()."""
2378
from bzrlib.fetch import GenericRepoFetcher
2379
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2380
self.source, self.source._format, self.target,
2381
self.target._format)
2382
f = GenericRepoFetcher(to_repository=self.target,
2383
from_repository=self.source,
2384
last_revision=revision_id,
2386
return f.count_copied, f.failed_revisions
2389
class InterWeaveRepo(InterSameDataRepository):
2390
"""Optimised code paths between Weave based repositories.
2392
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2393
implemented lazy inter-object optimisation.
2397
def _get_repo_format_to_test(self):
2398
from bzrlib.repofmt import weaverepo
2399
return weaverepo.RepositoryFormat7()
2402
def is_compatible(source, target):
2403
"""Be compatible with known Weave formats.
2405
We don't test for the stores being of specific types because that
2406
could lead to confusing results, and there is no need to be
2409
from bzrlib.repofmt.weaverepo import (
2415
return (isinstance(source._format, (RepositoryFormat5,
2417
RepositoryFormat7)) and
2418
isinstance(target._format, (RepositoryFormat5,
2420
RepositoryFormat7)))
2421
except AttributeError:
2425
def copy_content(self, revision_id=None):
2426
"""See InterRepository.copy_content()."""
2427
# weave specific optimised path:
2429
self.target.set_make_working_trees(self.source.make_working_trees())
2430
except NotImplementedError:
2432
# FIXME do not peek!
2433
if self.source.control_files._transport.listable():
2434
pb = ui.ui_factory.nested_progress_bar()
2436
self.target.weave_store.copy_all_ids(
2437
self.source.weave_store,
2439
from_transaction=self.source.get_transaction(),
2440
to_transaction=self.target.get_transaction())
2441
pb.update('copying inventory', 0, 1)
2442
self.target.control_weaves.copy_multi(
2443
self.source.control_weaves, ['inventory'],
2444
from_transaction=self.source.get_transaction(),
2445
to_transaction=self.target.get_transaction())
2446
self.target._revision_store.text_store.copy_all_ids(
2447
self.source._revision_store.text_store,
2452
self.target.fetch(self.source, revision_id=revision_id)
2455
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2456
"""See InterRepository.fetch()."""
2457
from bzrlib.fetch import GenericRepoFetcher
2458
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2459
self.source, self.source._format, self.target, self.target._format)
2460
f = GenericRepoFetcher(to_repository=self.target,
2461
from_repository=self.source,
2462
last_revision=revision_id,
2464
return f.count_copied, f.failed_revisions
2467
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2468
"""See InterRepository.missing_revision_ids()."""
2469
# we want all revisions to satisfy revision_id in source.
2470
# but we don't want to stat every file here and there.
2471
# we want then, all revisions other needs to satisfy revision_id
2472
# checked, but not those that we have locally.
2473
# so the first thing is to get a subset of the revisions to
2474
# satisfy revision_id in source, and then eliminate those that
2475
# we do already have.
2476
# this is slow on high latency connection to self, but as as this
2477
# disk format scales terribly for push anyway due to rewriting
2478
# inventory.weave, this is considered acceptable.
2480
if revision_id is not None:
2481
source_ids = self.source.get_ancestry(revision_id)
2482
assert source_ids[0] is None
2485
source_ids = self.source._all_possible_ids()
2486
source_ids_set = set(source_ids)
2487
# source_ids is the worst possible case we may need to pull.
2488
# now we want to filter source_ids against what we actually
2489
# have in target, but don't try to check for existence where we know
2490
# we do not have a revision as that would be pointless.
2491
target_ids = set(self.target._all_possible_ids())
2492
possibly_present_revisions = target_ids.intersection(source_ids_set)
2493
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2494
required_revisions = source_ids_set.difference(actually_present_revisions)
2495
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2496
if revision_id is not None:
2497
# we used get_ancestry to determine source_ids then we are assured all
2498
# revisions referenced are present as they are installed in topological order.
2499
# and the tip revision was validated by get_ancestry.
2500
return required_topo_revisions
2502
# if we just grabbed the possibly available ids, then
2503
# we only have an estimate of whats available and need to validate
2504
# that against the revision records.
2505
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2508
class InterKnitRepo(InterSameDataRepository):
2509
"""Optimised code paths between Knit based repositories."""
2512
def _get_repo_format_to_test(self):
2513
from bzrlib.repofmt import knitrepo
2514
return knitrepo.RepositoryFormatKnit1()
2517
def is_compatible(source, target):
2518
"""Be compatible with known Knit formats.
2520
We don't test for the stores being of specific types because that
2521
could lead to confusing results, and there is no need to be
2524
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2526
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2527
isinstance(target._format, RepositoryFormatKnit))
2528
except AttributeError:
2530
return are_knits and InterRepository._same_model(source, target)
2533
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2534
"""See InterRepository.fetch()."""
2535
from bzrlib.fetch import KnitRepoFetcher
2536
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2537
self.source, self.source._format, self.target, self.target._format)
2538
f = KnitRepoFetcher(to_repository=self.target,
2539
from_repository=self.source,
2540
last_revision=revision_id,
2542
return f.count_copied, f.failed_revisions
2545
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2546
"""See InterRepository.missing_revision_ids()."""
2547
if revision_id is not None:
2548
source_ids = self.source.get_ancestry(revision_id)
2549
assert source_ids[0] is None
2552
source_ids = self.source.all_revision_ids()
2553
source_ids_set = set(source_ids)
2554
# source_ids is the worst possible case we may need to pull.
2555
# now we want to filter source_ids against what we actually
2556
# have in target, but don't try to check for existence where we know
2557
# we do not have a revision as that would be pointless.
2558
target_ids = set(self.target.all_revision_ids())
2559
possibly_present_revisions = target_ids.intersection(source_ids_set)
2560
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2561
required_revisions = source_ids_set.difference(actually_present_revisions)
2562
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2563
if revision_id is not None:
2564
# we used get_ancestry to determine source_ids then we are assured all
2565
# revisions referenced are present as they are installed in topological order.
2566
# and the tip revision was validated by get_ancestry.
2567
return required_topo_revisions
2569
# if we just grabbed the possibly available ids, then
2570
# we only have an estimate of whats available and need to validate
2571
# that against the revision records.
2572
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2575
class InterPackRepo(InterSameDataRepository):
2576
"""Optimised code paths between Pack based repositories."""
2579
def _get_repo_format_to_test(self):
2580
from bzrlib.repofmt import pack_repo
2581
return pack_repo.RepositoryFormatKnitPack1()
2584
def is_compatible(source, target):
2585
"""Be compatible with known Pack formats.
2587
We don't test for the stores being of specific types because that
2588
could lead to confusing results, and there is no need to be
2591
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2593
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2594
isinstance(target._format, RepositoryFormatPack))
2595
except AttributeError:
2597
return are_packs and InterRepository._same_model(source, target)
2600
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2601
"""See InterRepository.fetch()."""
2602
from bzrlib.repofmt.pack_repo import Packer
2603
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2604
self.source, self.source._format, self.target, self.target._format)
2605
self.count_copied = 0
2606
if revision_id is None:
2608
# everything to do - use pack logic
2609
# to fetch from all packs to one without
2610
# inventory parsing etc, IFF nothing to be copied is in the target.
2612
revision_ids = self.source.all_revision_ids()
2613
# implementing the TODO will involve:
2614
# - detecting when all of a pack is selected
2615
# - avoiding as much as possible pre-selection, so the
2616
# more-core routines such as create_pack_from_packs can filter in
2617
# a just-in-time fashion. (though having a HEADS list on a
2618
# repository might make this a lot easier, because we could
2619
# sensibly detect 'new revisions' without doing a full index scan.
2620
elif _mod_revision.is_null(revision_id):
2625
revision_ids = self.missing_revision_ids(revision_id,
2626
find_ghosts=find_ghosts)
2627
except errors.NoSuchRevision:
2628
raise errors.InstallFailed([revision_id])
2629
packs = self.source._pack_collection.all_packs()
2630
pack = Packer(self.target._pack_collection, packs, '.fetch',
2631
revision_ids).pack()
2632
if pack is not None:
2633
self.target._pack_collection._save_pack_names()
2634
# Trigger an autopack. This may duplicate effort as we've just done
2635
# a pack creation, but for now it is simpler to think about as
2636
# 'upload data, then repack if needed'.
2637
self.target._pack_collection.autopack()
2638
return (pack.get_revision_count(), [])
2643
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2644
"""See InterRepository.missing_revision_ids().
2646
:param find_ghosts: Find ghosts throughough the ancestry of
2649
if not find_ghosts and revision_id is not None:
2650
graph = self.source.get_graph()
2651
missing_revs = set()
2652
searcher = graph._make_breadth_first_searcher([revision_id])
2654
self.target._pack_collection.revision_index.combined_index
2655
null_set = frozenset([_mod_revision.NULL_REVISION])
2658
next_revs = set(searcher.next())
2659
except StopIteration:
2661
next_revs.difference_update(null_set)
2662
target_keys = [(key,) for key in next_revs]
2663
have_revs = frozenset(node[1][0] for node in
2664
target_index.iter_entries(target_keys))
2665
missing_revs.update(next_revs - have_revs)
2666
searcher.stop_searching_any(have_revs)
2667
if next_revs - have_revs == set([revision_id]):
2668
# we saw the start rev itself, but no parents from it (or
2669
# next_revs would have been updated to e.g. set(). We remove
2670
# have_revs because if we found revision_id locally we
2671
# stop_searching at the first time around.
2672
raise errors.NoSuchRevision(self.source, revision_id)
2674
elif revision_id is not None:
2675
source_ids = self.source.get_ancestry(revision_id)
2676
assert source_ids[0] is None
2679
source_ids = self.source.all_revision_ids()
2680
# source_ids is the worst possible case we may need to pull.
2681
# now we want to filter source_ids against what we actually
2682
# have in target, but don't try to check for existence where we know
2683
# we do not have a revision as that would be pointless.
2684
target_ids = set(self.target.all_revision_ids())
2685
return [r for r in source_ids if (r not in target_ids)]
2688
class InterModel1and2(InterRepository):
2691
def _get_repo_format_to_test(self):
2695
def is_compatible(source, target):
2696
if not source.supports_rich_root() and target.supports_rich_root():
2702
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2703
"""See InterRepository.fetch()."""
2704
from bzrlib.fetch import Model1toKnit2Fetcher
2705
f = Model1toKnit2Fetcher(to_repository=self.target,
2706
from_repository=self.source,
2707
last_revision=revision_id,
2709
return f.count_copied, f.failed_revisions
2712
def copy_content(self, revision_id=None):
2713
"""Make a complete copy of the content in self into destination.
2715
This is a destructive operation! Do not use it on existing
2718
:param revision_id: Only copy the content needed to construct
2719
revision_id and its parents.
2722
self.target.set_make_working_trees(self.source.make_working_trees())
2723
except NotImplementedError:
2725
# but don't bother fetching if we have the needed data now.
2726
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2727
self.target.has_revision(revision_id)):
2729
self.target.fetch(self.source, revision_id=revision_id)
2732
class InterKnit1and2(InterKnitRepo):
2735
def _get_repo_format_to_test(self):
2739
def is_compatible(source, target):
2740
"""Be compatible with Knit1 source and Knit3 target"""
2741
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2743
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2744
RepositoryFormatKnit3)
2745
from bzrlib.repofmt.pack_repo import (RepositoryFormatKnitPack1,
2746
RepositoryFormatKnitPack3)
2747
return (isinstance(source._format,
2748
(RepositoryFormatKnit1, RepositoryFormatKnitPack1)) and
2749
isinstance(target._format,
2750
(RepositoryFormatKnit3, RepositoryFormatKnitPack3))
2752
except AttributeError:
2756
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2757
"""See InterRepository.fetch()."""
2758
from bzrlib.fetch import Knit1to2Fetcher
2759
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2760
self.source, self.source._format, self.target,
2761
self.target._format)
2762
f = Knit1to2Fetcher(to_repository=self.target,
2763
from_repository=self.source,
2764
last_revision=revision_id,
2766
return f.count_copied, f.failed_revisions
2769
class InterDifferingSerializer(InterKnitRepo):
2772
def _get_repo_format_to_test(self):
2776
def is_compatible(source, target):
2777
"""Be compatible with Knit2 source and Knit3 target"""
2778
if source.supports_rich_root() != target.supports_rich_root():
2780
# Ideally, we'd support fetching if the source had no tree references
2781
# even if it supported them...
2782
if (getattr(source, '_format.supports_tree_reference', False) and
2783
not getattr(target, '_format.supports_tree_reference', False)):
2788
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2789
"""See InterRepository.fetch()."""
2790
revision_ids = self.target.missing_revision_ids(self.source,
2792
def revisions_iterator():
2793
for current_revision_id in revision_ids:
2794
revision = self.source.get_revision(current_revision_id)
2795
tree = self.source.revision_tree(current_revision_id)
2797
signature = self.source.get_signature_text(
2798
current_revision_id)
2799
except errors.NoSuchRevision:
2801
yield revision, tree, signature
2803
my_pb = ui.ui_factory.nested_progress_bar()
2808
install_revisions(self.target, revisions_iterator(),
2809
len(revision_ids), pb)
2811
if my_pb is not None:
2813
return len(revision_ids), 0
2816
class InterRemoteToOther(InterRepository):
2818
def __init__(self, source, target):
2819
InterRepository.__init__(self, source, target)
2820
self._real_inter = None
2823
def is_compatible(source, target):
2824
if not isinstance(source, remote.RemoteRepository):
2826
source._ensure_real()
2827
real_source = source._real_repository
2828
# Is source's model compatible with target's model, and are they the
2829
# same format? Currently we can only optimise fetching from an
2830
# identical model & format repo.
2831
assert not isinstance(real_source, remote.RemoteRepository), (
2832
"We don't support remote repos backed by remote repos yet.")
2833
return real_source._format == target._format
2836
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2837
"""See InterRepository.fetch()."""
2838
from bzrlib.fetch import RemoteToOtherFetcher
2839
mutter("Using fetch logic to copy between %s(remote) and %s(%s)",
2840
self.source, self.target, self.target._format)
2841
# TODO: jam 20070210 This should be an assert, not a translate
2842
revision_id = osutils.safe_revision_id(revision_id)
2843
f = RemoteToOtherFetcher(to_repository=self.target,
2844
from_repository=self.source,
2845
last_revision=revision_id,
2847
return f.count_copied, f.failed_revisions
2850
def _get_repo_format_to_test(self):
2854
class InterOtherToRemote(InterRepository):
2856
def __init__(self, source, target):
2857
InterRepository.__init__(self, source, target)
2858
self._real_inter = None
2861
def is_compatible(source, target):
2862
if isinstance(target, remote.RemoteRepository):
2866
def _ensure_real_inter(self):
2867
if self._real_inter is None:
2868
self.target._ensure_real()
2869
real_target = self.target._real_repository
2870
self._real_inter = InterRepository.get(self.source, real_target)
2872
def copy_content(self, revision_id=None):
2873
self._ensure_real_inter()
2874
self._real_inter.copy_content(revision_id=revision_id)
2876
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2877
self._ensure_real_inter()
2878
self._real_inter.fetch(revision_id=revision_id, pb=pb)
2881
def _get_repo_format_to_test(self):
2885
InterRepository.register_optimiser(InterDifferingSerializer)
2886
InterRepository.register_optimiser(InterSameDataRepository)
2887
InterRepository.register_optimiser(InterWeaveRepo)
2888
InterRepository.register_optimiser(InterKnitRepo)
2889
InterRepository.register_optimiser(InterModel1and2)
2890
InterRepository.register_optimiser(InterKnit1and2)
2891
InterRepository.register_optimiser(InterPackRepo)
2892
InterRepository.register_optimiser(InterRemoteToOther)
2893
InterRepository.register_optimiser(InterOtherToRemote)
2896
class CopyConverter(object):
2897
"""A repository conversion tool which just performs a copy of the content.
2899
This is slow but quite reliable.
2902
def __init__(self, target_format):
2903
"""Create a CopyConverter.
2905
:param target_format: The format the resulting repository should be.
2907
self.target_format = target_format
2909
def convert(self, repo, pb):
2910
"""Perform the conversion of to_convert, giving feedback via pb.
2912
:param to_convert: The disk object to convert.
2913
:param pb: a progress bar to use for progress information.
2918
# this is only useful with metadir layouts - separated repo content.
2919
# trigger an assertion if not such
2920
repo._format.get_format_string()
2921
self.repo_dir = repo.bzrdir
2922
self.step('Moving repository to repository.backup')
2923
self.repo_dir.transport.move('repository', 'repository.backup')
2924
backup_transport = self.repo_dir.transport.clone('repository.backup')
2925
repo._format.check_conversion_target(self.target_format)
2926
self.source_repo = repo._format.open(self.repo_dir,
2928
_override_transport=backup_transport)
2929
self.step('Creating new repository')
2930
converted = self.target_format.initialize(self.repo_dir,
2931
self.source_repo.is_shared())
2932
converted.lock_write()
2934
self.step('Copying content into repository.')
2935
self.source_repo.copy_content_into(converted)
2938
self.step('Deleting old repository content.')
2939
self.repo_dir.transport.delete_tree('repository.backup')
2940
self.pb.note('repository converted')
2942
def step(self, message):
2943
"""Update the pb by a step."""
2945
self.pb.update(message, self.count, self.total)
2957
def _unescaper(match, _map=_unescape_map):
2958
code = match.group(1)
2962
if not code.startswith('#'):
2964
return unichr(int(code[1:])).encode('utf8')
2970
def _unescape_xml(data):
2971
"""Unescape predefined XML entities in a string of data."""
2973
if _unescape_re is None:
2974
_unescape_re = re.compile('\&([^;]*);')
2975
return _unescape_re.sub(_unescaper, data)
2978
class _VersionedFileChecker(object):
2980
def __init__(self, repository):
2981
self.repository = repository
2982
self.text_index = self.repository._generate_text_key_index()
2984
def calculate_file_version_parents(self, revision_id, file_id):
2985
"""Calculate the correct parents for a file version according to
2988
parent_keys = self.text_index[(file_id, revision_id)]
2989
if parent_keys == [_mod_revision.NULL_REVISION]:
2991
# strip the file_id, for the weave api
2992
return tuple([revision_id for file_id, revision_id in parent_keys])
2994
def check_file_version_parents(self, weave, file_id):
2995
"""Check the parents stored in a versioned file are correct.
2997
It also detects file versions that are not referenced by their
2998
corresponding revision's inventory.
3000
:returns: A tuple of (wrong_parents, dangling_file_versions).
3001
wrong_parents is a dict mapping {revision_id: (stored_parents,
3002
correct_parents)} for each revision_id where the stored parents
3003
are not correct. dangling_file_versions is a set of (file_id,
3004
revision_id) tuples for versions that are present in this versioned
3005
file, but not used by the corresponding inventory.
3008
unused_versions = set()
3009
for num, revision_id in enumerate(weave.versions()):
3011
correct_parents = self.calculate_file_version_parents(
3012
revision_id, file_id)
3014
# The version is not part of the used keys.
3015
unused_versions.add(revision_id)
3018
knit_parents = tuple(weave.get_parents(revision_id))
3019
except errors.RevisionNotPresent:
3021
if correct_parents != knit_parents:
3022
wrong_parents[revision_id] = (knit_parents, correct_parents)
3023
return wrong_parents, unused_versions