1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.store.text import TextStore
50
from bzrlib.testament import Testament
51
from bzrlib.util import bencode
54
from bzrlib.decorators import needs_read_lock, needs_write_lock
55
from bzrlib.inter import InterObject
56
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
57
from bzrlib.symbol_versioning import (
60
from bzrlib.trace import mutter, mutter_callsite, note, warning
63
# Old formats display a warning, but only once
64
_deprecation_warning_done = False
67
class CommitBuilder(object):
68
"""Provides an interface to build up a commit.
70
This allows describing a tree to be committed without needing to
71
know the internals of the format of the repository.
74
# all clients should supply tree roots.
75
record_root_entry = True
76
# the default CommitBuilder does not manage trees whose root is versioned.
77
_versioned_root = False
79
def __init__(self, repository, parents, config, timestamp=None,
80
timezone=None, committer=None, revprops=None,
82
"""Initiate a CommitBuilder.
84
:param repository: Repository to commit to.
85
:param parents: Revision ids of the parents of the new revision.
86
:param config: Configuration to use.
87
:param timestamp: Optional timestamp recorded for commit.
88
:param timezone: Optional timezone for timestamp.
89
:param committer: Optional committer to set for commit.
90
:param revprops: Optional dictionary of revision properties.
91
:param revision_id: Optional revision id.
96
self._committer = self._config.username()
98
assert isinstance(committer, basestring), type(committer)
99
self._committer = committer
101
self.new_inventory = Inventory(None)
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._revprops.update(revprops)
110
if timestamp is None:
111
timestamp = time.time()
112
# Restrict resolution to 1ms
113
self._timestamp = round(timestamp, 3)
116
self._timezone = osutils.local_time_offset()
118
self._timezone = int(timezone)
120
self._generate_revision_if_needed()
121
self._heads = graph.HeadsCache(repository.get_graph()).heads
123
def commit(self, message):
124
"""Make the actual commit.
126
:return: The revision id of the recorded revision.
128
rev = _mod_revision.Revision(
129
timestamp=self._timestamp,
130
timezone=self._timezone,
131
committer=self._committer,
133
inventory_sha1=self.inv_sha1,
134
revision_id=self._new_revision_id,
135
properties=self._revprops)
136
rev.parent_ids = self.parents
137
self.repository.add_revision(self._new_revision_id, rev,
138
self.new_inventory, self._config)
139
self.repository.commit_write_group()
140
return self._new_revision_id
143
"""Abort the commit that is being built.
145
self.repository.abort_write_group()
147
def revision_tree(self):
148
"""Return the tree that was just committed.
150
After calling commit() this can be called to get a RevisionTree
151
representing the newly committed tree. This is preferred to
152
calling Repository.revision_tree() because that may require
153
deserializing the inventory, while we already have a copy in
156
return RevisionTree(self.repository, self.new_inventory,
157
self._new_revision_id)
159
def finish_inventory(self):
160
"""Tell the builder that the inventory is finished."""
161
if self.new_inventory.root is None:
162
raise AssertionError('Root entry should be supplied to'
163
' record_entry_contents, as of bzr 0.10.',
164
DeprecationWarning, stacklevel=2)
165
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
166
self.new_inventory.revision_id = self._new_revision_id
167
self.inv_sha1 = self.repository.add_inventory(
168
self._new_revision_id,
173
def _gen_revision_id(self):
174
"""Return new revision-id."""
175
return generate_ids.gen_revision_id(self._config.username(),
178
def _generate_revision_if_needed(self):
179
"""Create a revision id if None was supplied.
181
If the repository can not support user-specified revision ids
182
they should override this function and raise CannotSetRevisionId
183
if _new_revision_id is not None.
185
:raises: CannotSetRevisionId
187
if self._new_revision_id is None:
188
self._new_revision_id = self._gen_revision_id()
189
self.random_revid = True
191
self.random_revid = False
193
def _check_root(self, ie, parent_invs, tree):
194
"""Helper for record_entry_contents.
196
:param ie: An entry being added.
197
:param parent_invs: The inventories of the parent revisions of the
199
:param tree: The tree that is being committed.
201
# In this revision format, root entries have no knit or weave When
202
# serializing out to disk and back in root.revision is always
204
ie.revision = self._new_revision_id
206
def _get_delta(self, ie, basis_inv, path):
207
"""Get a delta against the basis inventory for ie."""
208
if ie.file_id not in basis_inv:
210
return (None, path, ie.file_id, ie)
211
elif ie != basis_inv[ie.file_id]:
213
# TODO: avoid tis id2path call.
214
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
219
def record_entry_contents(self, ie, parent_invs, path, tree,
221
"""Record the content of ie from tree into the commit if needed.
223
Side effect: sets ie.revision when unchanged
225
:param ie: An inventory entry present in the commit.
226
:param parent_invs: The inventories of the parent revisions of the
228
:param path: The path the entry is at in the tree.
229
:param tree: The tree which contains this entry and should be used to
231
:param content_summary: Summary data from the tree about the paths
232
content - stat, length, exec, sha/link target. This is only
233
accessed when the entry has a revision of None - that is when it is
234
a candidate to commit.
235
:return: A tuple (change_delta, version_recorded). change_delta is
236
an inventory_delta change for this entry against the basis tree of
237
the commit, or None if no change occured against the basis tree.
238
version_recorded is True if a new version of the entry has been
239
recorded. For instance, committing a merge where a file was only
240
changed on the other side will return (delta, False).
242
if self.new_inventory.root is None:
243
if ie.parent_id is not None:
244
raise errors.RootMissing()
245
self._check_root(ie, parent_invs, tree)
246
if ie.revision is None:
247
kind = content_summary[0]
249
# ie is carried over from a prior commit
251
# XXX: repository specific check for nested tree support goes here - if
252
# the repo doesn't want nested trees we skip it ?
253
if (kind == 'tree-reference' and
254
not self.repository._format.supports_tree_reference):
255
# mismatch between commit builder logic and repository:
256
# this needs the entry creation pushed down into the builder.
257
raise NotImplementedError('Missing repository subtree support.')
258
self.new_inventory.add(ie)
260
# TODO: slow, take it out of the inner loop.
262
basis_inv = parent_invs[0]
264
basis_inv = Inventory(root_id=None)
266
# ie.revision is always None if the InventoryEntry is considered
267
# for committing. We may record the previous parents revision if the
268
# content is actually unchanged against a sole head.
269
if ie.revision is not None:
270
if not self._versioned_root and path == '':
271
# repositories that do not version the root set the root's
272
# revision to the new commit even when no change occurs, and
273
# this masks when a change may have occurred against the basis,
274
# so calculate if one happened.
275
if ie.file_id in basis_inv:
276
delta = (basis_inv.id2path(ie.file_id), path,
280
delta = (None, path, ie.file_id, ie)
283
# we don't need to commit this, because the caller already
284
# determined that an existing revision of this file is
286
return None, (ie.revision == self._new_revision_id)
287
# XXX: Friction: parent_candidates should return a list not a dict
288
# so that we don't have to walk the inventories again.
289
parent_candiate_entries = ie.parent_candidates(parent_invs)
290
head_set = self._heads(parent_candiate_entries.keys())
292
for inv in parent_invs:
293
if ie.file_id in inv:
294
old_rev = inv[ie.file_id].revision
295
if old_rev in head_set:
296
heads.append(inv[ie.file_id].revision)
297
head_set.remove(inv[ie.file_id].revision)
300
# now we check to see if we need to write a new record to the
302
# We write a new entry unless there is one head to the ancestors, and
303
# the kind-derived content is unchanged.
305
# Cheapest check first: no ancestors, or more the one head in the
306
# ancestors, we write a new node.
310
# There is a single head, look it up for comparison
311
parent_entry = parent_candiate_entries[heads[0]]
312
# if the non-content specific data has changed, we'll be writing a
314
if (parent_entry.parent_id != ie.parent_id or
315
parent_entry.name != ie.name):
317
# now we need to do content specific checks:
319
# if the kind changed the content obviously has
320
if kind != parent_entry.kind:
323
assert content_summary[2] is not None, \
324
"Files must not have executable = None"
326
if (# if the file length changed we have to store:
327
parent_entry.text_size != content_summary[1] or
328
# if the exec bit has changed we have to store:
329
parent_entry.executable != content_summary[2]):
331
elif parent_entry.text_sha1 == content_summary[3]:
332
# all meta and content is unchanged (using a hash cache
333
# hit to check the sha)
334
ie.revision = parent_entry.revision
335
ie.text_size = parent_entry.text_size
336
ie.text_sha1 = parent_entry.text_sha1
337
ie.executable = parent_entry.executable
338
return self._get_delta(ie, basis_inv, path), False
340
# Either there is only a hash change(no hash cache entry,
341
# or same size content change), or there is no change on
343
# Provide the parent's hash to the store layer, so that the
344
# content is unchanged we will not store a new node.
345
nostore_sha = parent_entry.text_sha1
347
# We want to record a new node regardless of the presence or
348
# absence of a content change in the file.
350
ie.executable = content_summary[2]
351
lines = tree.get_file(ie.file_id, path).readlines()
353
ie.text_sha1, ie.text_size = self._add_text_to_weave(
354
ie.file_id, lines, heads, nostore_sha)
355
except errors.ExistingContent:
356
# Turns out that the file content was unchanged, and we were
357
# only going to store a new node if it was changed. Carry over
359
ie.revision = parent_entry.revision
360
ie.text_size = parent_entry.text_size
361
ie.text_sha1 = parent_entry.text_sha1
362
ie.executable = parent_entry.executable
363
return self._get_delta(ie, basis_inv, path), False
364
elif kind == 'directory':
366
# all data is meta here, nothing specific to directory, so
368
ie.revision = parent_entry.revision
369
return self._get_delta(ie, basis_inv, path), False
371
self._add_text_to_weave(ie.file_id, lines, heads, None)
372
elif kind == 'symlink':
373
current_link_target = content_summary[3]
375
# symlink target is not generic metadata, check if it has
377
if current_link_target != parent_entry.symlink_target:
380
# unchanged, carry over.
381
ie.revision = parent_entry.revision
382
ie.symlink_target = parent_entry.symlink_target
383
return self._get_delta(ie, basis_inv, path), False
384
ie.symlink_target = current_link_target
386
self._add_text_to_weave(ie.file_id, lines, heads, None)
387
elif kind == 'tree-reference':
389
if content_summary[3] != parent_entry.reference_revision:
392
# unchanged, carry over.
393
ie.reference_revision = parent_entry.reference_revision
394
ie.revision = parent_entry.revision
395
return self._get_delta(ie, basis_inv, path), False
396
ie.reference_revision = content_summary[3]
398
self._add_text_to_weave(ie.file_id, lines, heads, None)
400
raise NotImplementedError('unknown kind')
401
ie.revision = self._new_revision_id
402
return self._get_delta(ie, basis_inv, path), True
404
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
405
versionedfile = self.repository.weave_store.get_weave_or_empty(
406
file_id, self.repository.get_transaction())
407
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
408
# than add_lines, and allows committing when a parent is ghosted for
410
# Note: as we read the content directly from the tree, we know its not
411
# been turned into unicode or badly split - but a broken tree
412
# implementation could give us bad output from readlines() so this is
413
# not a guarantee of safety. What would be better is always checking
414
# the content during test suite execution. RBC 20070912
416
return versionedfile.add_lines_with_ghosts(
417
self._new_revision_id, parents, new_lines,
418
nostore_sha=nostore_sha, random_id=self.random_revid,
419
check_content=False)[0:2]
421
versionedfile.clear_cache()
424
class RootCommitBuilder(CommitBuilder):
425
"""This commitbuilder actually records the root id"""
427
# the root entry gets versioned properly by this builder.
428
_versioned_root = True
430
def _check_root(self, ie, parent_invs, tree):
431
"""Helper for record_entry_contents.
433
:param ie: An entry being added.
434
:param parent_invs: The inventories of the parent revisions of the
436
:param tree: The tree that is being committed.
440
######################################################################
443
class Repository(object):
444
"""Repository holding history for one or more branches.
446
The repository holds and retrieves historical information including
447
revisions and file history. It's normally accessed only by the Branch,
448
which views a particular line of development through that history.
450
The Repository builds on top of Stores and a Transport, which respectively
451
describe the disk data format and the way of accessing the (possibly
455
# What class to use for a CommitBuilder. Often its simpler to change this
456
# in a Repository class subclass rather than to override
457
# get_commit_builder.
458
_commit_builder_class = CommitBuilder
459
# The search regex used by xml based repositories to determine what things
460
# where changed in a single commit.
461
_file_ids_altered_regex = lazy_regex.lazy_compile(
462
r'file_id="(?P<file_id>[^"]+)"'
463
r'.* revision="(?P<revision_id>[^"]+)"'
466
def abort_write_group(self):
467
"""Commit the contents accrued within the current write group.
469
:seealso: start_write_group.
471
if self._write_group is not self.get_transaction():
472
# has an unlock or relock occured ?
473
raise errors.BzrError('mismatched lock context and write group.')
474
self._abort_write_group()
475
self._write_group = None
477
def _abort_write_group(self):
478
"""Template method for per-repository write group cleanup.
480
This is called during abort before the write group is considered to be
481
finished and should cleanup any internal state accrued during the write
482
group. There is no requirement that data handed to the repository be
483
*not* made available - this is not a rollback - but neither should any
484
attempt be made to ensure that data added is fully commited. Abort is
485
invoked when an error has occured so futher disk or network operations
486
may not be possible or may error and if possible should not be
491
def add_inventory(self, revision_id, inv, parents):
492
"""Add the inventory inv to the repository as revision_id.
494
:param parents: The revision ids of the parents that revision_id
495
is known to have and are in the repository already.
497
returns the sha1 of the serialized inventory.
499
assert self.is_in_write_group()
500
_mod_revision.check_not_reserved_id(revision_id)
501
assert inv.revision_id is None or inv.revision_id == revision_id, \
502
"Mismatch between inventory revision" \
503
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
504
assert inv.root is not None
505
inv_lines = self._serialise_inventory_to_lines(inv)
506
inv_vf = self.get_inventory_weave()
507
return self._inventory_add_lines(inv_vf, revision_id, parents,
508
inv_lines, check_content=False)
510
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
512
"""Store lines in inv_vf and return the sha1 of the inventory."""
514
for parent in parents:
516
final_parents.append(parent)
517
return inv_vf.add_lines(revision_id, final_parents, lines,
518
check_content=check_content)[0]
521
def add_revision(self, revision_id, rev, inv=None, config=None):
522
"""Add rev to the revision store as revision_id.
524
:param revision_id: the revision id to use.
525
:param rev: The revision object.
526
:param inv: The inventory for the revision. if None, it will be looked
527
up in the inventory storer
528
:param config: If None no digital signature will be created.
529
If supplied its signature_needed method will be used
530
to determine if a signature should be made.
532
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
534
_mod_revision.check_not_reserved_id(revision_id)
535
if config is not None and config.signature_needed():
537
inv = self.get_inventory(revision_id)
538
plaintext = Testament(rev, inv).as_short_text()
539
self.store_revision_signature(
540
gpg.GPGStrategy(config), plaintext, revision_id)
541
if not revision_id in self.get_inventory_weave():
543
raise errors.WeaveRevisionNotPresent(revision_id,
544
self.get_inventory_weave())
546
# yes, this is not suitable for adding with ghosts.
547
self.add_inventory(revision_id, inv, rev.parent_ids)
548
self._revision_store.add_revision(rev, self.get_transaction())
550
def _add_revision_text(self, revision_id, text):
551
revision = self._revision_store._serializer.read_revision_from_string(
553
self._revision_store._add_revision(revision, StringIO(text),
554
self.get_transaction())
556
def all_revision_ids(self):
557
"""Returns a list of all the revision ids in the repository.
559
This is deprecated because code should generally work on the graph
560
reachable from a particular revision, and ignore any other revisions
561
that might be present. There is no direct replacement method.
563
if 'evil' in debug.debug_flags:
564
mutter_callsite(2, "all_revision_ids is linear with history.")
565
return self._all_revision_ids()
567
def _all_revision_ids(self):
568
"""Returns a list of all the revision ids in the repository.
570
These are in as much topological order as the underlying store can
573
raise NotImplementedError(self._all_revision_ids)
575
def break_lock(self):
576
"""Break a lock if one is present from another instance.
578
Uses the ui factory to ask for confirmation if the lock may be from
581
self.control_files.break_lock()
584
def _eliminate_revisions_not_present(self, revision_ids):
585
"""Check every revision id in revision_ids to see if we have it.
587
Returns a set of the present revisions.
590
for id in revision_ids:
591
if self.has_revision(id):
596
def create(a_bzrdir):
597
"""Construct the current default format repository in a_bzrdir."""
598
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
600
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
601
"""instantiate a Repository.
603
:param _format: The format of the repository on disk.
604
:param a_bzrdir: The BzrDir of the repository.
606
In the future we will have a single api for all stores for
607
getting file texts, inventories and revisions, then
608
this construct will accept instances of those things.
610
super(Repository, self).__init__()
611
self._format = _format
612
# the following are part of the public API for Repository:
613
self.bzrdir = a_bzrdir
614
self.control_files = control_files
615
self._revision_store = _revision_store
616
# backwards compatibility
617
self.weave_store = text_store
619
self._reconcile_does_inventory_gc = True
620
self._reconcile_fixes_text_parents = False
621
self._reconcile_backsup_inventory = True
622
# not right yet - should be more semantically clear ?
624
self.control_store = control_store
625
self.control_weaves = control_store
626
# TODO: make sure to construct the right store classes, etc, depending
627
# on whether escaping is required.
628
self._warn_if_deprecated()
629
self._write_group = None
630
self.base = control_files._transport.base
633
return '%s(%r)' % (self.__class__.__name__,
636
def has_same_location(self, other):
637
"""Returns a boolean indicating if this repository is at the same
638
location as another repository.
640
This might return False even when two repository objects are accessing
641
the same physical repository via different URLs.
643
if self.__class__ is not other.__class__:
645
return (self.control_files._transport.base ==
646
other.control_files._transport.base)
648
def is_in_write_group(self):
649
"""Return True if there is an open write group.
651
:seealso: start_write_group.
653
return self._write_group is not None
656
return self.control_files.is_locked()
658
def is_write_locked(self):
659
"""Return True if this object is write locked."""
660
return self.is_locked() and self.control_files._lock_mode == 'w'
662
def lock_write(self, token=None):
663
"""Lock this repository for writing.
665
This causes caching within the repository obejct to start accumlating
666
data during reads, and allows a 'write_group' to be obtained. Write
667
groups must be used for actual data insertion.
669
:param token: if this is already locked, then lock_write will fail
670
unless the token matches the existing lock.
671
:returns: a token if this instance supports tokens, otherwise None.
672
:raises TokenLockingNotSupported: when a token is given but this
673
instance doesn't support using token locks.
674
:raises MismatchedToken: if the specified token doesn't match the token
675
of the existing lock.
676
:seealso: start_write_group.
678
A token should be passed in if you know that you have locked the object
679
some other way, and need to synchronise this object's state with that
682
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
684
result = self.control_files.lock_write(token=token)
689
self.control_files.lock_read()
692
def get_physical_lock_status(self):
693
return self.control_files.get_physical_lock_status()
695
def leave_lock_in_place(self):
696
"""Tell this repository not to release the physical lock when this
699
If lock_write doesn't return a token, then this method is not supported.
701
self.control_files.leave_in_place()
703
def dont_leave_lock_in_place(self):
704
"""Tell this repository to release the physical lock when this
705
object is unlocked, even if it didn't originally acquire it.
707
If lock_write doesn't return a token, then this method is not supported.
709
self.control_files.dont_leave_in_place()
712
def gather_stats(self, revid=None, committers=None):
713
"""Gather statistics from a revision id.
715
:param revid: The revision id to gather statistics from, if None, then
716
no revision specific statistics are gathered.
717
:param committers: Optional parameter controlling whether to grab
718
a count of committers from the revision specific statistics.
719
:return: A dictionary of statistics. Currently this contains:
720
committers: The number of committers if requested.
721
firstrev: A tuple with timestamp, timezone for the penultimate left
722
most ancestor of revid, if revid is not the NULL_REVISION.
723
latestrev: A tuple with timestamp, timezone for revid, if revid is
724
not the NULL_REVISION.
725
revisions: The total revision count in the repository.
726
size: An estimate disk size of the repository in bytes.
729
if revid and committers:
730
result['committers'] = 0
731
if revid and revid != _mod_revision.NULL_REVISION:
733
all_committers = set()
734
revisions = self.get_ancestry(revid)
735
# pop the leading None
737
first_revision = None
739
# ignore the revisions in the middle - just grab first and last
740
revisions = revisions[0], revisions[-1]
741
for revision in self.get_revisions(revisions):
742
if not first_revision:
743
first_revision = revision
745
all_committers.add(revision.committer)
746
last_revision = revision
748
result['committers'] = len(all_committers)
749
result['firstrev'] = (first_revision.timestamp,
750
first_revision.timezone)
751
result['latestrev'] = (last_revision.timestamp,
752
last_revision.timezone)
754
# now gather global repository information
755
if self.bzrdir.root_transport.listable():
756
c, t = self._revision_store.total_size(self.get_transaction())
757
result['revisions'] = c
761
def get_data_stream(self, revision_ids):
762
raise NotImplementedError(self.get_data_stream)
764
def insert_data_stream(self, stream):
765
"""XXX What does this really do?
767
Is it a substitute for fetch?
768
Should it manage its own write group ?
770
for item_key, bytes in stream:
771
if item_key[0] == 'file':
772
(file_id,) = item_key[1:]
773
knit = self.weave_store.get_weave_or_empty(
774
file_id, self.get_transaction())
775
elif item_key == ('inventory',):
776
knit = self.get_inventory_weave()
777
elif item_key == ('revisions',):
778
knit = self._revision_store.get_revision_file(
779
self.get_transaction())
780
elif item_key == ('signatures',):
781
knit = self._revision_store.get_signature_file(
782
self.get_transaction())
784
raise RepositoryDataStreamError(
785
"Unrecognised data stream key '%s'" % (item_key,))
786
decoded_list = bencode.bdecode(bytes)
787
format = decoded_list.pop(0)
790
for version, options, parents, some_bytes in decoded_list:
791
data_list.append((version, options, len(some_bytes), parents))
792
knit_bytes += some_bytes
793
knit.insert_data_stream(
794
(format, data_list, StringIO(knit_bytes).read))
797
def missing_revision_ids(self, other, revision_id=None):
798
"""Return the revision ids that other has that this does not.
800
These are returned in topological order.
802
revision_id: only return revision ids included by revision_id.
804
return InterRepository.get(other, self).missing_revision_ids(revision_id)
808
"""Open the repository rooted at base.
810
For instance, if the repository is at URL/.bzr/repository,
811
Repository.open(URL) -> a Repository instance.
813
control = bzrdir.BzrDir.open(base)
814
return control.open_repository()
816
def copy_content_into(self, destination, revision_id=None):
817
"""Make a complete copy of the content in self into destination.
819
This is a destructive operation! Do not use it on existing
822
return InterRepository.get(self, destination).copy_content(revision_id)
824
def commit_write_group(self):
825
"""Commit the contents accrued within the current write group.
827
:seealso: start_write_group.
829
if self._write_group is not self.get_transaction():
830
# has an unlock or relock occured ?
831
raise errors.BzrError('mismatched lock context %r and '
833
(self.get_transaction(), self._write_group))
834
self._commit_write_group()
835
self._write_group = None
837
def _commit_write_group(self):
838
"""Template method for per-repository write group cleanup.
840
This is called before the write group is considered to be
841
finished and should ensure that all data handed to the repository
842
for writing during the write group is safely committed (to the
843
extent possible considering file system caching etc).
846
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
847
"""Fetch the content required to construct revision_id from source.
849
If revision_id is None all content is copied.
850
:param find_ghosts: Find and copy revisions in the source that are
851
ghosts in the target (and not reachable directly by walking out to
852
the first-present revision in target from revision_id).
854
# fast path same-url fetch operations
855
if self.has_same_location(source):
856
# check that last_revision is in 'from' and then return a
858
if (revision_id is not None and
859
not _mod_revision.is_null(revision_id)):
860
self.get_revision(revision_id)
862
inter = InterRepository.get(source, self)
864
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
865
except NotImplementedError:
866
raise errors.IncompatibleRepositories(source, self)
868
def create_bundle(self, target, base, fileobj, format=None):
869
return serializer.write_bundle(self, target, base, fileobj, format)
871
def get_commit_builder(self, branch, parents, config, timestamp=None,
872
timezone=None, committer=None, revprops=None,
874
"""Obtain a CommitBuilder for this repository.
876
:param branch: Branch to commit to.
877
:param parents: Revision ids of the parents of the new revision.
878
:param config: Configuration to use.
879
:param timestamp: Optional timestamp recorded for commit.
880
:param timezone: Optional timezone for timestamp.
881
:param committer: Optional committer to set for commit.
882
:param revprops: Optional dictionary of revision properties.
883
:param revision_id: Optional revision id.
885
result = self._commit_builder_class(self, parents, config,
886
timestamp, timezone, committer, revprops, revision_id)
887
self.start_write_group()
891
if (self.control_files._lock_count == 1 and
892
self.control_files._lock_mode == 'w'):
893
if self._write_group is not None:
894
self.abort_write_group()
895
self.control_files.unlock()
896
raise errors.BzrError(
897
'Must end write groups before releasing write locks.')
898
self.control_files.unlock()
901
def clone(self, a_bzrdir, revision_id=None):
902
"""Clone this repository into a_bzrdir using the current format.
904
Currently no check is made that the format of this repository and
905
the bzrdir format are compatible. FIXME RBC 20060201.
907
:return: The newly created destination repository.
909
# TODO: deprecate after 0.16; cloning this with all its settings is
910
# probably not very useful -- mbp 20070423
911
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
912
self.copy_content_into(dest_repo, revision_id)
915
def start_write_group(self):
916
"""Start a write group in the repository.
918
Write groups are used by repositories which do not have a 1:1 mapping
919
between file ids and backend store to manage the insertion of data from
920
both fetch and commit operations.
922
A write lock is required around the start_write_group/commit_write_group
923
for the support of lock-requiring repository formats.
925
One can only insert data into a repository inside a write group.
929
if not self.is_write_locked():
930
raise errors.NotWriteLocked(self)
931
if self._write_group:
932
raise errors.BzrError('already in a write group')
933
self._start_write_group()
934
# so we can detect unlock/relock - the write group is now entered.
935
self._write_group = self.get_transaction()
937
def _start_write_group(self):
938
"""Template method for per-repository write group startup.
940
This is called before the write group is considered to be
945
def sprout(self, to_bzrdir, revision_id=None):
946
"""Create a descendent repository for new development.
948
Unlike clone, this does not copy the settings of the repository.
950
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
951
dest_repo.fetch(self, revision_id=revision_id)
954
def _create_sprouting_repo(self, a_bzrdir, shared):
955
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
956
# use target default format.
957
dest_repo = a_bzrdir.create_repository()
959
# Most control formats need the repository to be specifically
960
# created, but on some old all-in-one formats it's not needed
962
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
963
except errors.UninitializableFormat:
964
dest_repo = a_bzrdir.open_repository()
968
def has_revision(self, revision_id):
969
"""True if this repository has a copy of the revision."""
970
if 'evil' in debug.debug_flags:
971
mutter_callsite(3, "has_revision is a LBYL symptom.")
972
return self._revision_store.has_revision_id(revision_id,
973
self.get_transaction())
976
def get_revision(self, revision_id):
977
"""Return the Revision object for a named revision."""
978
return self.get_revisions([revision_id])[0]
981
def get_revision_reconcile(self, revision_id):
982
"""'reconcile' helper routine that allows access to a revision always.
984
This variant of get_revision does not cross check the weave graph
985
against the revision one as get_revision does: but it should only
986
be used by reconcile, or reconcile-alike commands that are correcting
987
or testing the revision graph.
989
return self._get_revisions([revision_id])[0]
992
def get_revisions(self, revision_ids):
993
"""Get many revisions at once."""
994
return self._get_revisions(revision_ids)
997
def _get_revisions(self, revision_ids):
998
"""Core work logic to get many revisions without sanity checks."""
999
for rev_id in revision_ids:
1000
if not rev_id or not isinstance(rev_id, basestring):
1001
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1002
revs = self._revision_store.get_revisions(revision_ids,
1003
self.get_transaction())
1005
assert not isinstance(rev.revision_id, unicode)
1006
for parent_id in rev.parent_ids:
1007
assert not isinstance(parent_id, unicode)
1011
def get_revision_xml(self, revision_id):
1012
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1013
# would have already do it.
1014
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1015
rev = self.get_revision(revision_id)
1016
rev_tmp = StringIO()
1017
# the current serializer..
1018
self._revision_store._serializer.write_revision(rev, rev_tmp)
1020
return rev_tmp.getvalue()
1023
def get_deltas_for_revisions(self, revisions):
1024
"""Produce a generator of revision deltas.
1026
Note that the input is a sequence of REVISIONS, not revision_ids.
1027
Trees will be held in memory until the generator exits.
1028
Each delta is relative to the revision's lefthand predecessor.
1030
required_trees = set()
1031
for revision in revisions:
1032
required_trees.add(revision.revision_id)
1033
required_trees.update(revision.parent_ids[:1])
1034
trees = dict((t.get_revision_id(), t) for
1035
t in self.revision_trees(required_trees))
1036
for revision in revisions:
1037
if not revision.parent_ids:
1038
old_tree = self.revision_tree(None)
1040
old_tree = trees[revision.parent_ids[0]]
1041
yield trees[revision.revision_id].changes_from(old_tree)
1044
def get_revision_delta(self, revision_id):
1045
"""Return the delta for one revision.
1047
The delta is relative to the left-hand predecessor of the
1050
r = self.get_revision(revision_id)
1051
return list(self.get_deltas_for_revisions([r]))[0]
1054
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1055
signature = gpg_strategy.sign(plaintext)
1056
self._revision_store.add_revision_signature_text(revision_id,
1058
self.get_transaction())
1060
def find_text_key_references(self):
1061
"""Find the text key references within the repository.
1063
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
1064
revision_ids. Each altered file-ids has the exact revision_ids that
1065
altered it listed explicitly.
1066
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1067
to whether they were referred to by the inventory of the
1068
revision_id that they contain. The inventory texts from all present
1069
revision ids are assessed to generate this report.
1071
revision_ids = self.all_revision_ids()
1072
w = self.get_inventory_weave()
1073
pb = ui.ui_factory.nested_progress_bar()
1075
return self._find_text_key_references_from_xml_inventory_lines(
1076
w.iter_lines_added_or_present_in_versions(revision_ids, pb=pb))
1081
def _find_text_key_references_from_xml_inventory_lines(self,
1083
"""Core routine for extracting references to texts from inventories.
1085
This performs the translation of xml lines to revision ids.
1087
:param line_iterator: An iterator of lines, origin_version_id
1088
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1089
to whether they were referred to by the inventory of the
1090
revision_id that they contain. Note that if that revision_id was
1091
not part of the line_iterator's output then False will be given -
1092
even though it may actually refer to that key.
1094
assert self._serializer.support_altered_by_hack, \
1095
("_find_text_key_references_from_xml_inventory_lines only "
1096
"supported for branches which store inventory as unnested xml, "
1100
# this code needs to read every new line in every inventory for the
1101
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1102
# not present in one of those inventories is unnecessary but not
1103
# harmful because we are filtering by the revision id marker in the
1104
# inventory lines : we only select file ids altered in one of those
1105
# revisions. We don't need to see all lines in the inventory because
1106
# only those added in an inventory in rev X can contain a revision=X
1108
unescape_revid_cache = {}
1109
unescape_fileid_cache = {}
1111
# jam 20061218 In a big fetch, this handles hundreds of thousands
1112
# of lines, so it has had a lot of inlining and optimizing done.
1113
# Sorry that it is a little bit messy.
1114
# Move several functions to be local variables, since this is a long
1116
search = self._file_ids_altered_regex.search
1117
unescape = _unescape_xml
1118
setdefault = result.setdefault
1119
for line, version_id in line_iterator:
1120
match = search(line)
1123
# One call to match.group() returning multiple items is quite a
1124
# bit faster than 2 calls to match.group() each returning 1
1125
file_id, revision_id = match.group('file_id', 'revision_id')
1127
# Inlining the cache lookups helps a lot when you make 170,000
1128
# lines and 350k ids, versus 8.4 unique ids.
1129
# Using a cache helps in 2 ways:
1130
# 1) Avoids unnecessary decoding calls
1131
# 2) Re-uses cached strings, which helps in future set and
1133
# (2) is enough that removing encoding entirely along with
1134
# the cache (so we are using plain strings) results in no
1135
# performance improvement.
1137
revision_id = unescape_revid_cache[revision_id]
1139
unescaped = unescape(revision_id)
1140
unescape_revid_cache[revision_id] = unescaped
1141
revision_id = unescaped
1143
# Note that unescaping always means that on a fulltext cached
1144
# inventory we deserialised every fileid, which for general 'pull'
1145
# is not great, but we don't really want to have some many
1146
# fulltexts that this matters anyway. RBC 20071114.
1148
file_id = unescape_fileid_cache[file_id]
1150
unescaped = unescape(file_id)
1151
unescape_fileid_cache[file_id] = unescaped
1154
key = (file_id, revision_id)
1155
setdefault(key, False)
1156
if revision_id == version_id:
1160
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1162
"""Helper routine for fileids_altered_by_revision_ids.
1164
This performs the translation of xml lines to revision ids.
1166
:param line_iterator: An iterator of lines, origin_version_id
1167
:param revision_ids: The revision ids to filter for. This should be a
1168
set or other type which supports efficient __contains__ lookups, as
1169
the revision id from each parsed line will be looked up in the
1170
revision_ids filter.
1171
:return: a dictionary mapping altered file-ids to an iterable of
1172
revision_ids. Each altered file-ids has the exact revision_ids that
1173
altered it listed explicitly.
1176
setdefault = result.setdefault
1177
for file_id, revision_id in \
1178
self._find_text_key_references_from_xml_inventory_lines(
1179
line_iterator).iterkeys():
1180
# once data is all ensured-consistent; then this is
1181
# if revision_id == version_id
1182
if revision_id in revision_ids:
1183
setdefault(file_id, set()).add(revision_id)
1186
def fileids_altered_by_revision_ids(self, revision_ids):
1187
"""Find the file ids and versions affected by revisions.
1189
:param revisions: an iterable containing revision ids.
1190
:return: a dictionary mapping altered file-ids to an iterable of
1191
revision_ids. Each altered file-ids has the exact revision_ids that
1192
altered it listed explicitly.
1194
selected_revision_ids = set(revision_ids)
1195
w = self.get_inventory_weave()
1196
pb = ui.ui_factory.nested_progress_bar()
1198
return self._find_file_ids_from_xml_inventory_lines(
1199
w.iter_lines_added_or_present_in_versions(
1200
selected_revision_ids, pb=pb),
1201
selected_revision_ids)
1205
def iter_files_bytes(self, desired_files):
1206
"""Iterate through file versions.
1208
Files will not necessarily be returned in the order they occur in
1209
desired_files. No specific order is guaranteed.
1211
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1212
value supplied by the caller as part of desired_files. It should
1213
uniquely identify the file version in the caller's context. (Examples:
1214
an index number or a TreeTransform trans_id.)
1216
bytes_iterator is an iterable of bytestrings for the file. The
1217
kind of iterable and length of the bytestrings are unspecified, but for
1218
this implementation, it is a list of lines produced by
1219
VersionedFile.get_lines().
1221
:param desired_files: a list of (file_id, revision_id, identifier)
1224
transaction = self.get_transaction()
1225
for file_id, revision_id, callable_data in desired_files:
1227
weave = self.weave_store.get_weave(file_id, transaction)
1228
except errors.NoSuchFile:
1229
raise errors.NoSuchIdInRepository(self, file_id)
1230
yield callable_data, weave.get_lines(revision_id)
1232
def _generate_text_key_index(self):
1233
"""Generate a new text key index for the repository.
1235
This is an expensive function that will take considerable time to run.
1237
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1238
list of parents, also text keys. When a given key has no parents,
1239
the parents list will be [NULL_REVISION].
1241
# All revisions, to find inventory parents.
1242
revision_graph = self.get_revision_graph_with_ghosts()
1243
ancestors = revision_graph.get_ancestors()
1244
text_key_references = self.find_text_key_references()
1245
pb = ui.ui_factory.nested_progress_bar()
1247
return self._do_generate_text_key_index(ancestors,
1248
text_key_references, pb)
1252
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1253
"""Helper for _generate_text_key_index to avoid deep nesting."""
1254
revision_order = tsort.topo_sort(ancestors)
1255
invalid_keys = set()
1257
for revision_id in revision_order:
1258
revision_keys[revision_id] = set()
1259
text_count = len(text_key_references)
1260
# a cache of the text keys to allow reuse; costs a dict of all the
1261
# keys, but saves a 2-tuple for every child of a given key.
1263
for text_key, valid in text_key_references.iteritems():
1265
invalid_keys.add(text_key)
1267
revision_keys[text_key[1]].add(text_key)
1268
text_key_cache[text_key] = text_key
1269
del text_key_references
1271
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1272
NULL_REVISION = _mod_revision.NULL_REVISION
1273
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1274
# too small for large or very branchy trees. However, for 55K path
1275
# trees, it would be easy to use too much memory trivially. Ideally we
1276
# could gauge this by looking at available real memory etc, but this is
1277
# always a tricky proposition.
1278
inventory_cache = lru_cache.LRUCache(10)
1279
batch_size = 10 # should be ~150MB on a 55K path tree
1280
batch_count = len(revision_order) / batch_size + 1
1282
pb.update("Calculating text parents.", processed_texts, text_count)
1283
for offset in xrange(batch_count):
1284
to_query = revision_order[offset * batch_size:(offset + 1) *
1288
for rev_tree in self.revision_trees(to_query):
1289
revision_id = rev_tree.get_revision_id()
1290
parent_ids = ancestors[revision_id]
1291
for text_key in revision_keys[revision_id]:
1292
pb.update("Calculating text parents.", processed_texts)
1293
processed_texts += 1
1294
candidate_parents = []
1295
for parent_id in parent_ids:
1296
parent_text_key = (text_key[0], parent_id)
1298
check_parent = parent_text_key not in \
1299
revision_keys[parent_id]
1301
# the parent parent_id is a ghost:
1302
check_parent = False
1303
# truncate the derived graph against this ghost.
1304
parent_text_key = None
1306
# look at the parent commit details inventories to
1307
# determine possible candidates in the per file graph.
1310
inv = inventory_cache[parent_id]
1312
inv = self.revision_tree(parent_id).inventory
1313
inventory_cache[parent_id] = inv
1314
parent_entry = inv._byid.get(text_key[0], None)
1315
if parent_entry is not None:
1317
text_key[0], parent_entry.revision)
1319
parent_text_key = None
1320
if parent_text_key is not None:
1321
candidate_parents.append(
1322
text_key_cache[parent_text_key])
1323
parent_heads = text_graph.heads(candidate_parents)
1324
new_parents = list(parent_heads)
1325
new_parents.sort(key=lambda x:candidate_parents.index(x))
1326
if new_parents == []:
1327
new_parents = [NULL_REVISION]
1328
text_index[text_key] = new_parents
1330
for text_key in invalid_keys:
1331
text_index[text_key] = [NULL_REVISION]
1334
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1335
"""Get an iterable listing the keys of all the data introduced by a set
1338
The keys will be ordered so that the corresponding items can be safely
1339
fetched and inserted in that order.
1341
:returns: An iterable producing tuples of (knit-kind, file-id,
1342
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1343
'revisions'. file-id is None unless knit-kind is 'file'.
1345
# XXX: it's a bit weird to control the inventory weave caching in this
1346
# generator. Ideally the caching would be done in fetch.py I think. Or
1347
# maybe this generator should explicitly have the contract that it
1348
# should not be iterated until the previously yielded item has been
1351
inv_w = self.get_inventory_weave()
1352
inv_w.enable_cache()
1354
# file ids that changed
1355
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
1357
num_file_ids = len(file_ids)
1358
for file_id, altered_versions in file_ids.iteritems():
1359
if _files_pb is not None:
1360
_files_pb.update("fetch texts", count, num_file_ids)
1362
yield ("file", file_id, altered_versions)
1363
# We're done with the files_pb. Note that it finished by the caller,
1364
# just as it was created by the caller.
1368
yield ("inventory", None, revision_ids)
1372
revisions_with_signatures = set()
1373
for rev_id in revision_ids:
1375
self.get_signature_text(rev_id)
1376
except errors.NoSuchRevision:
1380
revisions_with_signatures.add(rev_id)
1382
yield ("signatures", None, revisions_with_signatures)
1385
yield ("revisions", None, revision_ids)
1388
def get_inventory_weave(self):
1389
return self.control_weaves.get_weave('inventory',
1390
self.get_transaction())
1393
def get_inventory(self, revision_id):
1394
"""Get Inventory object by hash."""
1395
return self.deserialise_inventory(
1396
revision_id, self.get_inventory_xml(revision_id))
1398
def deserialise_inventory(self, revision_id, xml):
1399
"""Transform the xml into an inventory object.
1401
:param revision_id: The expected revision id of the inventory.
1402
:param xml: A serialised inventory.
1404
return self._serializer.read_inventory_from_string(xml, revision_id)
1406
def serialise_inventory(self, inv):
1407
return self._serializer.write_inventory_to_string(inv)
1409
def _serialise_inventory_to_lines(self, inv):
1410
return self._serializer.write_inventory_to_lines(inv)
1412
def get_serializer_format(self):
1413
return self._serializer.format_num
1416
def get_inventory_xml(self, revision_id):
1417
"""Get inventory XML as a file object."""
1419
assert isinstance(revision_id, str), type(revision_id)
1420
iw = self.get_inventory_weave()
1421
return iw.get_text(revision_id)
1423
raise errors.HistoryMissing(self, 'inventory', revision_id)
1426
def get_inventory_sha1(self, revision_id):
1427
"""Return the sha1 hash of the inventory entry
1429
return self.get_revision(revision_id).inventory_sha1
1432
def get_revision_graph(self, revision_id=None):
1433
"""Return a dictionary containing the revision graph.
1435
NB: This method should not be used as it accesses the entire graph all
1436
at once, which is much more data than most operations should require.
1438
:param revision_id: The revision_id to get a graph from. If None, then
1439
the entire revision graph is returned. This is a deprecated mode of
1440
operation and will be removed in the future.
1441
:return: a dictionary of revision_id->revision_parents_list.
1443
raise NotImplementedError(self.get_revision_graph)
1446
def get_revision_graph_with_ghosts(self, revision_ids=None):
1447
"""Return a graph of the revisions with ghosts marked as applicable.
1449
:param revision_ids: an iterable of revisions to graph or None for all.
1450
:return: a Graph object with the graph reachable from revision_ids.
1452
if 'evil' in debug.debug_flags:
1454
"get_revision_graph_with_ghosts scales with size of history.")
1455
result = deprecated_graph.Graph()
1456
if not revision_ids:
1457
pending = set(self.all_revision_ids())
1460
pending = set(revision_ids)
1461
# special case NULL_REVISION
1462
if _mod_revision.NULL_REVISION in pending:
1463
pending.remove(_mod_revision.NULL_REVISION)
1464
required = set(pending)
1467
revision_id = pending.pop()
1469
rev = self.get_revision(revision_id)
1470
except errors.NoSuchRevision:
1471
if revision_id in required:
1474
result.add_ghost(revision_id)
1476
for parent_id in rev.parent_ids:
1477
# is this queued or done ?
1478
if (parent_id not in pending and
1479
parent_id not in done):
1481
pending.add(parent_id)
1482
result.add_node(revision_id, rev.parent_ids)
1483
done.add(revision_id)
1486
def _get_history_vf(self):
1487
"""Get a versionedfile whose history graph reflects all revisions.
1489
For weave repositories, this is the inventory weave.
1491
return self.get_inventory_weave()
1493
def iter_reverse_revision_history(self, revision_id):
1494
"""Iterate backwards through revision ids in the lefthand history
1496
:param revision_id: The revision id to start with. All its lefthand
1497
ancestors will be traversed.
1499
if revision_id in (None, _mod_revision.NULL_REVISION):
1501
next_id = revision_id
1502
versionedfile = self._get_history_vf()
1505
parents = versionedfile.get_parents(next_id)
1506
if len(parents) == 0:
1509
next_id = parents[0]
1512
def get_revision_inventory(self, revision_id):
1513
"""Return inventory of a past revision."""
1514
# TODO: Unify this with get_inventory()
1515
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1516
# must be the same as its revision, so this is trivial.
1517
if revision_id is None:
1518
# This does not make sense: if there is no revision,
1519
# then it is the current tree inventory surely ?!
1520
# and thus get_root_id() is something that looks at the last
1521
# commit on the branch, and the get_root_id is an inventory check.
1522
raise NotImplementedError
1523
# return Inventory(self.get_root_id())
1525
return self.get_inventory(revision_id)
1528
def is_shared(self):
1529
"""Return True if this repository is flagged as a shared repository."""
1530
raise NotImplementedError(self.is_shared)
1533
def reconcile(self, other=None, thorough=False):
1534
"""Reconcile this repository."""
1535
from bzrlib.reconcile import RepoReconciler
1536
reconciler = RepoReconciler(self, thorough=thorough)
1537
reconciler.reconcile()
1540
def _refresh_data(self):
1541
"""Helper called from lock_* to ensure coherency with disk.
1543
The default implementation does nothing; it is however possible
1544
for repositories to maintain loaded indices across multiple locks
1545
by checking inside their implementation of this method to see
1546
whether their indices are still valid. This depends of course on
1547
the disk format being validatable in this manner.
1551
def revision_tree(self, revision_id):
1552
"""Return Tree for a revision on this branch.
1554
`revision_id` may be None for the empty tree revision.
1556
# TODO: refactor this to use an existing revision object
1557
# so we don't need to read it in twice.
1558
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1559
return RevisionTree(self, Inventory(root_id=None),
1560
_mod_revision.NULL_REVISION)
1562
inv = self.get_revision_inventory(revision_id)
1563
return RevisionTree(self, inv, revision_id)
1566
def revision_trees(self, revision_ids):
1567
"""Return Tree for a revision on this branch.
1569
`revision_id` may not be None or 'null:'"""
1570
assert None not in revision_ids
1571
assert _mod_revision.NULL_REVISION not in revision_ids
1572
texts = self.get_inventory_weave().get_texts(revision_ids)
1573
for text, revision_id in zip(texts, revision_ids):
1574
inv = self.deserialise_inventory(revision_id, text)
1575
yield RevisionTree(self, inv, revision_id)
1578
def get_ancestry(self, revision_id, topo_sorted=True):
1579
"""Return a list of revision-ids integrated by a revision.
1581
The first element of the list is always None, indicating the origin
1582
revision. This might change when we have history horizons, or
1583
perhaps we should have a new API.
1585
This is topologically sorted.
1587
if _mod_revision.is_null(revision_id):
1589
if not self.has_revision(revision_id):
1590
raise errors.NoSuchRevision(self, revision_id)
1591
w = self.get_inventory_weave()
1592
candidates = w.get_ancestry(revision_id, topo_sorted)
1593
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
1596
"""Compress the data within the repository.
1598
This operation only makes sense for some repository types. For other
1599
types it should be a no-op that just returns.
1601
This stub method does not require a lock, but subclasses should use
1602
@needs_write_lock as this is a long running call its reasonable to
1603
implicitly lock for the user.
1607
def print_file(self, file, revision_id):
1608
"""Print `file` to stdout.
1610
FIXME RBC 20060125 as John Meinel points out this is a bad api
1611
- it writes to stdout, it assumes that that is valid etc. Fix
1612
by creating a new more flexible convenience function.
1614
tree = self.revision_tree(revision_id)
1615
# use inventory as it was in that revision
1616
file_id = tree.inventory.path2id(file)
1618
# TODO: jam 20060427 Write a test for this code path
1619
# it had a bug in it, and was raising the wrong
1621
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1622
tree.print_file(file_id)
1624
def get_transaction(self):
1625
return self.control_files.get_transaction()
1627
def revision_parents(self, revision_id):
1628
return self.get_inventory_weave().parent_names(revision_id)
1630
def get_parents(self, revision_ids):
1631
"""See StackedParentsProvider.get_parents"""
1633
for revision_id in revision_ids:
1634
if revision_id == _mod_revision.NULL_REVISION:
1638
parents = self.get_revision(revision_id).parent_ids
1639
except errors.NoSuchRevision:
1642
if len(parents) == 0:
1643
parents = [_mod_revision.NULL_REVISION]
1644
parents_list.append(parents)
1647
def _make_parents_provider(self):
1650
def get_graph(self, other_repository=None):
1651
"""Return the graph walker for this repository format"""
1652
parents_provider = self._make_parents_provider()
1653
if (other_repository is not None and
1654
other_repository.bzrdir.transport.base !=
1655
self.bzrdir.transport.base):
1656
parents_provider = graph._StackedParentsProvider(
1657
[parents_provider, other_repository._make_parents_provider()])
1658
return graph.Graph(parents_provider)
1660
def get_versioned_file_checker(self):
1661
"""Return an object suitable for checking versioned files."""
1662
return VersionedFileChecker(self)
1665
def set_make_working_trees(self, new_value):
1666
"""Set the policy flag for making working trees when creating branches.
1668
This only applies to branches that use this repository.
1670
The default is 'True'.
1671
:param new_value: True to restore the default, False to disable making
1674
raise NotImplementedError(self.set_make_working_trees)
1676
def make_working_trees(self):
1677
"""Returns the policy for making working trees on new branches."""
1678
raise NotImplementedError(self.make_working_trees)
1681
def sign_revision(self, revision_id, gpg_strategy):
1682
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1683
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1686
def has_signature_for_revision_id(self, revision_id):
1687
"""Query for a revision signature for revision_id in the repository."""
1688
return self._revision_store.has_signature(revision_id,
1689
self.get_transaction())
1692
def get_signature_text(self, revision_id):
1693
"""Return the text for a signature."""
1694
return self._revision_store.get_signature_text(revision_id,
1695
self.get_transaction())
1698
def check(self, revision_ids=None):
1699
"""Check consistency of all history of given revision_ids.
1701
Different repository implementations should override _check().
1703
:param revision_ids: A non-empty list of revision_ids whose ancestry
1704
will be checked. Typically the last revision_id of a branch.
1706
return self._check(revision_ids)
1708
def _check(self, revision_ids):
1709
result = check.Check(self)
1713
def _warn_if_deprecated(self):
1714
global _deprecation_warning_done
1715
if _deprecation_warning_done:
1717
_deprecation_warning_done = True
1718
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1719
% (self._format, self.bzrdir.transport.base))
1721
def supports_rich_root(self):
1722
return self._format.rich_root_data
1724
def _check_ascii_revisionid(self, revision_id, method):
1725
"""Private helper for ascii-only repositories."""
1726
# weave repositories refuse to store revisionids that are non-ascii.
1727
if revision_id is not None:
1728
# weaves require ascii revision ids.
1729
if isinstance(revision_id, unicode):
1731
revision_id.encode('ascii')
1732
except UnicodeEncodeError:
1733
raise errors.NonAsciiRevisionId(method, self)
1736
revision_id.decode('ascii')
1737
except UnicodeDecodeError:
1738
raise errors.NonAsciiRevisionId(method, self)
1740
def revision_graph_can_have_wrong_parents(self):
1741
"""Is it possible for this repository to have a revision graph with
1744
If True, then this repository must also implement
1745
_find_inconsistent_revision_parents so that check and reconcile can
1746
check for inconsistencies before proceeding with other checks that may
1747
depend on the revision index being consistent.
1749
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1751
# remove these delegates a while after bzr 0.15
1752
def __make_delegated(name, from_module):
1753
def _deprecated_repository_forwarder():
1754
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1755
% (name, from_module),
1758
m = __import__(from_module, globals(), locals(), [name])
1760
return getattr(m, name)
1761
except AttributeError:
1762
raise AttributeError('module %s has no name %s'
1764
globals()[name] = _deprecated_repository_forwarder
1767
'AllInOneRepository',
1768
'WeaveMetaDirRepository',
1769
'PreSplitOutRepositoryFormat',
1770
'RepositoryFormat4',
1771
'RepositoryFormat5',
1772
'RepositoryFormat6',
1773
'RepositoryFormat7',
1775
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1779
'RepositoryFormatKnit',
1780
'RepositoryFormatKnit1',
1782
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1785
def install_revision(repository, rev, revision_tree):
1786
"""Install all revision data into a repository."""
1787
repository.start_write_group()
1789
_install_revision(repository, rev, revision_tree)
1791
repository.abort_write_group()
1794
repository.commit_write_group()
1797
def _install_revision(repository, rev, revision_tree):
1798
"""Install all revision data into a repository."""
1799
present_parents = []
1801
for p_id in rev.parent_ids:
1802
if repository.has_revision(p_id):
1803
present_parents.append(p_id)
1804
parent_trees[p_id] = repository.revision_tree(p_id)
1806
parent_trees[p_id] = repository.revision_tree(None)
1808
inv = revision_tree.inventory
1809
entries = inv.iter_entries()
1810
# backwards compatibility hack: skip the root id.
1811
if not repository.supports_rich_root():
1812
path, root = entries.next()
1813
if root.revision != rev.revision_id:
1814
raise errors.IncompatibleRevision(repr(repository))
1815
# Add the texts that are not already present
1816
for path, ie in entries:
1817
w = repository.weave_store.get_weave_or_empty(ie.file_id,
1818
repository.get_transaction())
1819
if ie.revision not in w:
1821
# FIXME: TODO: The following loop *may* be overlapping/duplicate
1822
# with InventoryEntry.find_previous_heads(). if it is, then there
1823
# is a latent bug here where the parents may have ancestors of each
1825
for revision, tree in parent_trees.iteritems():
1826
if ie.file_id not in tree:
1828
parent_id = tree.inventory[ie.file_id].revision
1829
if parent_id in text_parents:
1831
text_parents.append(parent_id)
1833
vfile = repository.weave_store.get_weave_or_empty(ie.file_id,
1834
repository.get_transaction())
1835
lines = revision_tree.get_file(ie.file_id).readlines()
1836
vfile.add_lines(rev.revision_id, text_parents, lines)
1838
# install the inventory
1839
repository.add_inventory(rev.revision_id, inv, present_parents)
1840
except errors.RevisionAlreadyPresent:
1842
repository.add_revision(rev.revision_id, rev, inv)
1845
class MetaDirRepository(Repository):
1846
"""Repositories in the new meta-dir layout."""
1848
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1849
super(MetaDirRepository, self).__init__(_format,
1855
dir_mode = self.control_files._dir_mode
1856
file_mode = self.control_files._file_mode
1859
def is_shared(self):
1860
"""Return True if this repository is flagged as a shared repository."""
1861
return self.control_files._transport.has('shared-storage')
1864
def set_make_working_trees(self, new_value):
1865
"""Set the policy flag for making working trees when creating branches.
1867
This only applies to branches that use this repository.
1869
The default is 'True'.
1870
:param new_value: True to restore the default, False to disable making
1875
self.control_files._transport.delete('no-working-trees')
1876
except errors.NoSuchFile:
1879
self.control_files.put_utf8('no-working-trees', '')
1881
def make_working_trees(self):
1882
"""Returns the policy for making working trees on new branches."""
1883
return not self.control_files._transport.has('no-working-trees')
1886
class RepositoryFormatRegistry(registry.Registry):
1887
"""Registry of RepositoryFormats."""
1889
def get(self, format_string):
1890
r = registry.Registry.get(self, format_string)
1896
format_registry = RepositoryFormatRegistry()
1897
"""Registry of formats, indexed by their identifying format string.
1899
This can contain either format instances themselves, or classes/factories that
1900
can be called to obtain one.
1904
#####################################################################
1905
# Repository Formats
1907
class RepositoryFormat(object):
1908
"""A repository format.
1910
Formats provide three things:
1911
* An initialization routine to construct repository data on disk.
1912
* a format string which is used when the BzrDir supports versioned
1914
* an open routine which returns a Repository instance.
1916
There is one and only one Format subclass for each on-disk format. But
1917
there can be one Repository subclass that is used for several different
1918
formats. The _format attribute on a Repository instance can be used to
1919
determine the disk format.
1921
Formats are placed in an dict by their format string for reference
1922
during opening. These should be subclasses of RepositoryFormat
1925
Once a format is deprecated, just deprecate the initialize and open
1926
methods on the format class. Do not deprecate the object, as the
1927
object will be created every system load.
1929
Common instance attributes:
1930
_matchingbzrdir - the bzrdir format that the repository format was
1931
originally written to work with. This can be used if manually
1932
constructing a bzrdir and repository, or more commonly for test suite
1936
# Set to True or False in derived classes. True indicates that the format
1937
# supports ghosts gracefully.
1938
supports_ghosts = None
1941
return "<%s>" % self.__class__.__name__
1943
def __eq__(self, other):
1944
# format objects are generally stateless
1945
return isinstance(other, self.__class__)
1947
def __ne__(self, other):
1948
return not self == other
1951
def find_format(klass, a_bzrdir):
1952
"""Return the format for the repository object in a_bzrdir.
1954
This is used by bzr native formats that have a "format" file in
1955
the repository. Other methods may be used by different types of
1959
transport = a_bzrdir.get_repository_transport(None)
1960
format_string = transport.get("format").read()
1961
return format_registry.get(format_string)
1962
except errors.NoSuchFile:
1963
raise errors.NoRepositoryPresent(a_bzrdir)
1965
raise errors.UnknownFormatError(format=format_string)
1968
def register_format(klass, format):
1969
format_registry.register(format.get_format_string(), format)
1972
def unregister_format(klass, format):
1973
format_registry.remove(format.get_format_string())
1976
def get_default_format(klass):
1977
"""Return the current default format."""
1978
from bzrlib import bzrdir
1979
return bzrdir.format_registry.make_bzrdir('default').repository_format
1981
def _get_control_store(self, repo_transport, control_files):
1982
"""Return the control store for this repository."""
1983
raise NotImplementedError(self._get_control_store)
1985
def get_format_string(self):
1986
"""Return the ASCII format string that identifies this format.
1988
Note that in pre format ?? repositories the format string is
1989
not permitted nor written to disk.
1991
raise NotImplementedError(self.get_format_string)
1993
def get_format_description(self):
1994
"""Return the short description for this format."""
1995
raise NotImplementedError(self.get_format_description)
1997
def _get_revision_store(self, repo_transport, control_files):
1998
"""Return the revision store object for this a_bzrdir."""
1999
raise NotImplementedError(self._get_revision_store)
2001
def _get_text_rev_store(self,
2008
"""Common logic for getting a revision store for a repository.
2010
see self._get_revision_store for the subclass-overridable method to
2011
get the store for a repository.
2013
from bzrlib.store.revision.text import TextRevisionStore
2014
dir_mode = control_files._dir_mode
2015
file_mode = control_files._file_mode
2016
text_store = TextStore(transport.clone(name),
2018
compressed=compressed,
2020
file_mode=file_mode)
2021
_revision_store = TextRevisionStore(text_store, serializer)
2022
return _revision_store
2024
# TODO: this shouldn't be in the base class, it's specific to things that
2025
# use weaves or knits -- mbp 20070207
2026
def _get_versioned_file_store(self,
2031
versionedfile_class=None,
2032
versionedfile_kwargs={},
2034
if versionedfile_class is None:
2035
versionedfile_class = self._versionedfile_class
2036
weave_transport = control_files._transport.clone(name)
2037
dir_mode = control_files._dir_mode
2038
file_mode = control_files._file_mode
2039
return VersionedFileStore(weave_transport, prefixed=prefixed,
2041
file_mode=file_mode,
2042
versionedfile_class=versionedfile_class,
2043
versionedfile_kwargs=versionedfile_kwargs,
2046
def initialize(self, a_bzrdir, shared=False):
2047
"""Initialize a repository of this format in a_bzrdir.
2049
:param a_bzrdir: The bzrdir to put the new repository in it.
2050
:param shared: The repository should be initialized as a sharable one.
2051
:returns: The new repository object.
2053
This may raise UninitializableFormat if shared repository are not
2054
compatible the a_bzrdir.
2056
raise NotImplementedError(self.initialize)
2058
def is_supported(self):
2059
"""Is this format supported?
2061
Supported formats must be initializable and openable.
2062
Unsupported formats may not support initialization or committing or
2063
some other features depending on the reason for not being supported.
2067
def check_conversion_target(self, target_format):
2068
raise NotImplementedError(self.check_conversion_target)
2070
def open(self, a_bzrdir, _found=False):
2071
"""Return an instance of this format for the bzrdir a_bzrdir.
2073
_found is a private parameter, do not use it.
2075
raise NotImplementedError(self.open)
2078
class MetaDirRepositoryFormat(RepositoryFormat):
2079
"""Common base class for the new repositories using the metadir layout."""
2081
rich_root_data = False
2082
supports_tree_reference = False
2083
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2086
super(MetaDirRepositoryFormat, self).__init__()
2088
def _create_control_files(self, a_bzrdir):
2089
"""Create the required files and the initial control_files object."""
2090
# FIXME: RBC 20060125 don't peek under the covers
2091
# NB: no need to escape relative paths that are url safe.
2092
repository_transport = a_bzrdir.get_repository_transport(self)
2093
control_files = lockable_files.LockableFiles(repository_transport,
2094
'lock', lockdir.LockDir)
2095
control_files.create_lock()
2096
return control_files
2098
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2099
"""Upload the initial blank content."""
2100
control_files = self._create_control_files(a_bzrdir)
2101
control_files.lock_write()
2103
control_files._transport.mkdir_multi(dirs,
2104
mode=control_files._dir_mode)
2105
for file, content in files:
2106
control_files.put(file, content)
2107
for file, content in utf8_files:
2108
control_files.put_utf8(file, content)
2110
control_files.put_utf8('shared-storage', '')
2112
control_files.unlock()
2115
# formats which have no format string are not discoverable
2116
# and not independently creatable, so are not registered. They're
2117
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2118
# needed, it's constructed directly by the BzrDir. Non-native formats where
2119
# the repository is not separately opened are similar.
2121
format_registry.register_lazy(
2122
'Bazaar-NG Repository format 7',
2123
'bzrlib.repofmt.weaverepo',
2127
# KEEP in sync with bzrdir.format_registry default, which controls the overall
2128
# default control directory format
2129
format_registry.register_lazy(
2130
'Bazaar-NG Knit Repository Format 1',
2131
'bzrlib.repofmt.knitrepo',
2132
'RepositoryFormatKnit1',
2134
format_registry.default_key = 'Bazaar-NG Knit Repository Format 1'
2136
format_registry.register_lazy(
2137
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2138
'bzrlib.repofmt.knitrepo',
2139
'RepositoryFormatKnit3',
2142
# Pack-based formats. There is one format for pre-subtrees, and one for
2143
# post-subtrees to allow ease of testing.
2144
# NOTE: These are experimental in 0.92.
2145
format_registry.register_lazy(
2146
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2147
'bzrlib.repofmt.pack_repo',
2148
'RepositoryFormatKnitPack1',
2150
format_registry.register_lazy(
2151
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2152
'bzrlib.repofmt.pack_repo',
2153
'RepositoryFormatKnitPack3',
2157
class InterRepository(InterObject):
2158
"""This class represents operations taking place between two repositories.
2160
Its instances have methods like copy_content and fetch, and contain
2161
references to the source and target repositories these operations can be
2164
Often we will provide convenience methods on 'repository' which carry out
2165
operations with another repository - they will always forward to
2166
InterRepository.get(other).method_name(parameters).
2170
"""The available optimised InterRepository types."""
2172
def copy_content(self, revision_id=None):
2173
raise NotImplementedError(self.copy_content)
2175
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2176
"""Fetch the content required to construct revision_id.
2178
The content is copied from self.source to self.target.
2180
:param revision_id: if None all content is copied, if NULL_REVISION no
2182
:param pb: optional progress bar to use for progress reports. If not
2183
provided a default one will be created.
2185
Returns the copied revision count and the failed revisions in a tuple:
2188
raise NotImplementedError(self.fetch)
2191
def missing_revision_ids(self, revision_id=None):
2192
"""Return the revision ids that source has that target does not.
2194
These are returned in topological order.
2196
:param revision_id: only return revision ids included by this
2199
# generic, possibly worst case, slow code path.
2200
target_ids = set(self.target.all_revision_ids())
2201
if revision_id is not None:
2202
source_ids = self.source.get_ancestry(revision_id)
2203
assert source_ids[0] is None
2206
source_ids = self.source.all_revision_ids()
2207
result_set = set(source_ids).difference(target_ids)
2208
# this may look like a no-op: its not. It preserves the ordering
2209
# other_ids had while only returning the members from other_ids
2210
# that we've decided we need.
2211
return [rev_id for rev_id in source_ids if rev_id in result_set]
2214
def _same_model(source, target):
2215
"""True if source and target have the same data representation."""
2216
if source.supports_rich_root() != target.supports_rich_root():
2218
if source._serializer != target._serializer:
2223
class InterSameDataRepository(InterRepository):
2224
"""Code for converting between repositories that represent the same data.
2226
Data format and model must match for this to work.
2230
def _get_repo_format_to_test(self):
2231
"""Repository format for testing with.
2233
InterSameData can pull from subtree to subtree and from non-subtree to
2234
non-subtree, so we test this with the richest repository format.
2236
from bzrlib.repofmt import knitrepo
2237
return knitrepo.RepositoryFormatKnit3()
2240
def is_compatible(source, target):
2241
return InterRepository._same_model(source, target)
2244
def copy_content(self, revision_id=None):
2245
"""Make a complete copy of the content in self into destination.
2247
This copies both the repository's revision data, and configuration information
2248
such as the make_working_trees setting.
2250
This is a destructive operation! Do not use it on existing
2253
:param revision_id: Only copy the content needed to construct
2254
revision_id and its parents.
2257
self.target.set_make_working_trees(self.source.make_working_trees())
2258
except NotImplementedError:
2260
# but don't bother fetching if we have the needed data now.
2261
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2262
self.target.has_revision(revision_id)):
2264
self.target.fetch(self.source, revision_id=revision_id)
2267
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2268
"""See InterRepository.fetch()."""
2269
from bzrlib.fetch import GenericRepoFetcher
2270
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2271
self.source, self.source._format, self.target,
2272
self.target._format)
2273
f = GenericRepoFetcher(to_repository=self.target,
2274
from_repository=self.source,
2275
last_revision=revision_id,
2277
return f.count_copied, f.failed_revisions
2280
class InterWeaveRepo(InterSameDataRepository):
2281
"""Optimised code paths between Weave based repositories.
2283
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2284
implemented lazy inter-object optimisation.
2288
def _get_repo_format_to_test(self):
2289
from bzrlib.repofmt import weaverepo
2290
return weaverepo.RepositoryFormat7()
2293
def is_compatible(source, target):
2294
"""Be compatible with known Weave formats.
2296
We don't test for the stores being of specific types because that
2297
could lead to confusing results, and there is no need to be
2300
from bzrlib.repofmt.weaverepo import (
2306
return (isinstance(source._format, (RepositoryFormat5,
2308
RepositoryFormat7)) and
2309
isinstance(target._format, (RepositoryFormat5,
2311
RepositoryFormat7)))
2312
except AttributeError:
2316
def copy_content(self, revision_id=None):
2317
"""See InterRepository.copy_content()."""
2318
# weave specific optimised path:
2320
self.target.set_make_working_trees(self.source.make_working_trees())
2321
except NotImplementedError:
2323
# FIXME do not peek!
2324
if self.source.control_files._transport.listable():
2325
pb = ui.ui_factory.nested_progress_bar()
2327
self.target.weave_store.copy_all_ids(
2328
self.source.weave_store,
2330
from_transaction=self.source.get_transaction(),
2331
to_transaction=self.target.get_transaction())
2332
pb.update('copying inventory', 0, 1)
2333
self.target.control_weaves.copy_multi(
2334
self.source.control_weaves, ['inventory'],
2335
from_transaction=self.source.get_transaction(),
2336
to_transaction=self.target.get_transaction())
2337
self.target._revision_store.text_store.copy_all_ids(
2338
self.source._revision_store.text_store,
2343
self.target.fetch(self.source, revision_id=revision_id)
2346
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2347
"""See InterRepository.fetch()."""
2348
from bzrlib.fetch import GenericRepoFetcher
2349
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2350
self.source, self.source._format, self.target, self.target._format)
2351
f = GenericRepoFetcher(to_repository=self.target,
2352
from_repository=self.source,
2353
last_revision=revision_id,
2355
return f.count_copied, f.failed_revisions
2358
def missing_revision_ids(self, revision_id=None):
2359
"""See InterRepository.missing_revision_ids()."""
2360
# we want all revisions to satisfy revision_id in source.
2361
# but we don't want to stat every file here and there.
2362
# we want then, all revisions other needs to satisfy revision_id
2363
# checked, but not those that we have locally.
2364
# so the first thing is to get a subset of the revisions to
2365
# satisfy revision_id in source, and then eliminate those that
2366
# we do already have.
2367
# this is slow on high latency connection to self, but as as this
2368
# disk format scales terribly for push anyway due to rewriting
2369
# inventory.weave, this is considered acceptable.
2371
if revision_id is not None:
2372
source_ids = self.source.get_ancestry(revision_id)
2373
assert source_ids[0] is None
2376
source_ids = self.source._all_possible_ids()
2377
source_ids_set = set(source_ids)
2378
# source_ids is the worst possible case we may need to pull.
2379
# now we want to filter source_ids against what we actually
2380
# have in target, but don't try to check for existence where we know
2381
# we do not have a revision as that would be pointless.
2382
target_ids = set(self.target._all_possible_ids())
2383
possibly_present_revisions = target_ids.intersection(source_ids_set)
2384
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2385
required_revisions = source_ids_set.difference(actually_present_revisions)
2386
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2387
if revision_id is not None:
2388
# we used get_ancestry to determine source_ids then we are assured all
2389
# revisions referenced are present as they are installed in topological order.
2390
# and the tip revision was validated by get_ancestry.
2391
return required_topo_revisions
2393
# if we just grabbed the possibly available ids, then
2394
# we only have an estimate of whats available and need to validate
2395
# that against the revision records.
2396
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2399
class InterKnitRepo(InterSameDataRepository):
2400
"""Optimised code paths between Knit based repositories."""
2403
def _get_repo_format_to_test(self):
2404
from bzrlib.repofmt import knitrepo
2405
return knitrepo.RepositoryFormatKnit1()
2408
def is_compatible(source, target):
2409
"""Be compatible with known Knit formats.
2411
We don't test for the stores being of specific types because that
2412
could lead to confusing results, and there is no need to be
2415
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2417
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2418
isinstance(target._format, RepositoryFormatKnit))
2419
except AttributeError:
2421
return are_knits and InterRepository._same_model(source, target)
2424
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2425
"""See InterRepository.fetch()."""
2426
from bzrlib.fetch import KnitRepoFetcher
2427
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2428
self.source, self.source._format, self.target, self.target._format)
2429
f = KnitRepoFetcher(to_repository=self.target,
2430
from_repository=self.source,
2431
last_revision=revision_id,
2433
return f.count_copied, f.failed_revisions
2436
def missing_revision_ids(self, revision_id=None):
2437
"""See InterRepository.missing_revision_ids()."""
2438
if revision_id is not None:
2439
source_ids = self.source.get_ancestry(revision_id)
2440
assert source_ids[0] is None
2443
source_ids = self.source.all_revision_ids()
2444
source_ids_set = set(source_ids)
2445
# source_ids is the worst possible case we may need to pull.
2446
# now we want to filter source_ids against what we actually
2447
# have in target, but don't try to check for existence where we know
2448
# we do not have a revision as that would be pointless.
2449
target_ids = set(self.target.all_revision_ids())
2450
possibly_present_revisions = target_ids.intersection(source_ids_set)
2451
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2452
required_revisions = source_ids_set.difference(actually_present_revisions)
2453
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
2454
if revision_id is not None:
2455
# we used get_ancestry to determine source_ids then we are assured all
2456
# revisions referenced are present as they are installed in topological order.
2457
# and the tip revision was validated by get_ancestry.
2458
return required_topo_revisions
2460
# if we just grabbed the possibly available ids, then
2461
# we only have an estimate of whats available and need to validate
2462
# that against the revision records.
2463
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2466
class InterPackRepo(InterSameDataRepository):
2467
"""Optimised code paths between Pack based repositories."""
2470
def _get_repo_format_to_test(self):
2471
from bzrlib.repofmt import pack_repo
2472
return pack_repo.RepositoryFormatKnitPack1()
2475
def is_compatible(source, target):
2476
"""Be compatible with known Pack formats.
2478
We don't test for the stores being of specific types because that
2479
could lead to confusing results, and there is no need to be
2482
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2484
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2485
isinstance(target._format, RepositoryFormatPack))
2486
except AttributeError:
2488
return are_packs and InterRepository._same_model(source, target)
2491
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2492
"""See InterRepository.fetch()."""
2493
from bzrlib.repofmt.pack_repo import Packer
2494
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2495
self.source, self.source._format, self.target, self.target._format)
2496
self.count_copied = 0
2497
if revision_id is None:
2499
# everything to do - use pack logic
2500
# to fetch from all packs to one without
2501
# inventory parsing etc, IFF nothing to be copied is in the target.
2503
revision_ids = self.source.all_revision_ids()
2504
# implementing the TODO will involve:
2505
# - detecting when all of a pack is selected
2506
# - avoiding as much as possible pre-selection, so the
2507
# more-core routines such as create_pack_from_packs can filter in
2508
# a just-in-time fashion. (though having a HEADS list on a
2509
# repository might make this a lot easier, because we could
2510
# sensibly detect 'new revisions' without doing a full index scan.
2511
elif _mod_revision.is_null(revision_id):
2516
revision_ids = self.missing_revision_ids(revision_id,
2517
find_ghosts=find_ghosts)
2518
except errors.NoSuchRevision:
2519
raise errors.InstallFailed([revision_id])
2520
packs = self.source._pack_collection.all_packs()
2521
pack = Packer(self.target._pack_collection, packs, '.fetch',
2522
revision_ids).pack()
2523
if pack is not None:
2524
self.target._pack_collection._save_pack_names()
2525
# Trigger an autopack. This may duplicate effort as we've just done
2526
# a pack creation, but for now it is simpler to think about as
2527
# 'upload data, then repack if needed'.
2528
self.target._pack_collection.autopack()
2529
return pack.get_revision_count()
2534
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2535
"""See InterRepository.missing_revision_ids().
2537
:param find_ghosts: Find ghosts throughough the ancestry of
2540
if not find_ghosts and revision_id is not None:
2541
graph = self.source.get_graph()
2542
missing_revs = set()
2543
searcher = graph._make_breadth_first_searcher([revision_id])
2545
self.target._pack_collection.revision_index.combined_index
2546
null_set = frozenset([_mod_revision.NULL_REVISION])
2549
next_revs = set(searcher.next())
2550
except StopIteration:
2552
next_revs.difference_update(null_set)
2553
target_keys = [(key,) for key in next_revs]
2554
have_revs = frozenset(node[1][0] for node in
2555
target_index.iter_entries(target_keys))
2556
missing_revs.update(next_revs - have_revs)
2557
searcher.stop_searching_any(have_revs)
2559
elif revision_id is not None:
2560
source_ids = self.source.get_ancestry(revision_id)
2561
assert source_ids[0] is None
2564
source_ids = self.source.all_revision_ids()
2565
# source_ids is the worst possible case we may need to pull.
2566
# now we want to filter source_ids against what we actually
2567
# have in target, but don't try to check for existence where we know
2568
# we do not have a revision as that would be pointless.
2569
target_ids = set(self.target.all_revision_ids())
2570
return [r for r in source_ids if (r not in target_ids)]
2573
class InterModel1and2(InterRepository):
2576
def _get_repo_format_to_test(self):
2580
def is_compatible(source, target):
2581
if not source.supports_rich_root() and target.supports_rich_root():
2587
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2588
"""See InterRepository.fetch()."""
2589
from bzrlib.fetch import Model1toKnit2Fetcher
2590
f = Model1toKnit2Fetcher(to_repository=self.target,
2591
from_repository=self.source,
2592
last_revision=revision_id,
2594
return f.count_copied, f.failed_revisions
2597
def copy_content(self, revision_id=None):
2598
"""Make a complete copy of the content in self into destination.
2600
This is a destructive operation! Do not use it on existing
2603
:param revision_id: Only copy the content needed to construct
2604
revision_id and its parents.
2607
self.target.set_make_working_trees(self.source.make_working_trees())
2608
except NotImplementedError:
2610
# but don't bother fetching if we have the needed data now.
2611
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2612
self.target.has_revision(revision_id)):
2614
self.target.fetch(self.source, revision_id=revision_id)
2617
class InterKnit1and2(InterKnitRepo):
2620
def _get_repo_format_to_test(self):
2624
def is_compatible(source, target):
2625
"""Be compatible with Knit1 source and Knit3 target"""
2626
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2628
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2629
RepositoryFormatKnit3)
2630
from bzrlib.repofmt.pack_repo import (RepositoryFormatKnitPack1,
2631
RepositoryFormatKnitPack3)
2632
return (isinstance(source._format,
2633
(RepositoryFormatKnit1, RepositoryFormatKnitPack1)) and
2634
isinstance(target._format,
2635
(RepositoryFormatKnit3, RepositoryFormatKnitPack3))
2637
except AttributeError:
2641
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2642
"""See InterRepository.fetch()."""
2643
from bzrlib.fetch import Knit1to2Fetcher
2644
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2645
self.source, self.source._format, self.target,
2646
self.target._format)
2647
f = Knit1to2Fetcher(to_repository=self.target,
2648
from_repository=self.source,
2649
last_revision=revision_id,
2651
return f.count_copied, f.failed_revisions
2654
class InterRemoteToOther(InterRepository):
2656
def __init__(self, source, target):
2657
InterRepository.__init__(self, source, target)
2658
self._real_inter = None
2661
def is_compatible(source, target):
2662
if not isinstance(source, remote.RemoteRepository):
2664
source._ensure_real()
2665
real_source = source._real_repository
2666
# Is source's model compatible with target's model, and are they the
2667
# same format? Currently we can only optimise fetching from an
2668
# identical model & format repo.
2669
assert not isinstance(real_source, remote.RemoteRepository), (
2670
"We don't support remote repos backed by remote repos yet.")
2671
return real_source._format == target._format
2674
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2675
"""See InterRepository.fetch()."""
2676
from bzrlib.fetch import RemoteToOtherFetcher
2677
mutter("Using fetch logic to copy between %s(remote) and %s(%s)",
2678
self.source, self.target, self.target._format)
2679
# TODO: jam 20070210 This should be an assert, not a translate
2680
revision_id = osutils.safe_revision_id(revision_id)
2681
f = RemoteToOtherFetcher(to_repository=self.target,
2682
from_repository=self.source,
2683
last_revision=revision_id,
2685
return f.count_copied, f.failed_revisions
2688
def _get_repo_format_to_test(self):
2692
class InterOtherToRemote(InterRepository):
2694
def __init__(self, source, target):
2695
InterRepository.__init__(self, source, target)
2696
self._real_inter = None
2699
def is_compatible(source, target):
2700
if isinstance(target, remote.RemoteRepository):
2704
def _ensure_real_inter(self):
2705
if self._real_inter is None:
2706
self.target._ensure_real()
2707
real_target = self.target._real_repository
2708
self._real_inter = InterRepository.get(self.source, real_target)
2710
def copy_content(self, revision_id=None):
2711
self._ensure_real_inter()
2712
self._real_inter.copy_content(revision_id=revision_id)
2714
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2715
self._ensure_real_inter()
2716
self._real_inter.fetch(revision_id=revision_id, pb=pb)
2719
def _get_repo_format_to_test(self):
2723
InterRepository.register_optimiser(InterSameDataRepository)
2724
InterRepository.register_optimiser(InterWeaveRepo)
2725
InterRepository.register_optimiser(InterKnitRepo)
2726
InterRepository.register_optimiser(InterModel1and2)
2727
InterRepository.register_optimiser(InterKnit1and2)
2728
InterRepository.register_optimiser(InterPackRepo)
2729
InterRepository.register_optimiser(InterRemoteToOther)
2730
InterRepository.register_optimiser(InterOtherToRemote)
2733
class CopyConverter(object):
2734
"""A repository conversion tool which just performs a copy of the content.
2736
This is slow but quite reliable.
2739
def __init__(self, target_format):
2740
"""Create a CopyConverter.
2742
:param target_format: The format the resulting repository should be.
2744
self.target_format = target_format
2746
def convert(self, repo, pb):
2747
"""Perform the conversion of to_convert, giving feedback via pb.
2749
:param to_convert: The disk object to convert.
2750
:param pb: a progress bar to use for progress information.
2755
# this is only useful with metadir layouts - separated repo content.
2756
# trigger an assertion if not such
2757
repo._format.get_format_string()
2758
self.repo_dir = repo.bzrdir
2759
self.step('Moving repository to repository.backup')
2760
self.repo_dir.transport.move('repository', 'repository.backup')
2761
backup_transport = self.repo_dir.transport.clone('repository.backup')
2762
repo._format.check_conversion_target(self.target_format)
2763
self.source_repo = repo._format.open(self.repo_dir,
2765
_override_transport=backup_transport)
2766
self.step('Creating new repository')
2767
converted = self.target_format.initialize(self.repo_dir,
2768
self.source_repo.is_shared())
2769
converted.lock_write()
2771
self.step('Copying content into repository.')
2772
self.source_repo.copy_content_into(converted)
2775
self.step('Deleting old repository content.')
2776
self.repo_dir.transport.delete_tree('repository.backup')
2777
self.pb.note('repository converted')
2779
def step(self, message):
2780
"""Update the pb by a step."""
2782
self.pb.update(message, self.count, self.total)
2794
def _unescaper(match, _map=_unescape_map):
2795
code = match.group(1)
2799
if not code.startswith('#'):
2801
return unichr(int(code[1:])).encode('utf8')
2807
def _unescape_xml(data):
2808
"""Unescape predefined XML entities in a string of data."""
2810
if _unescape_re is None:
2811
_unescape_re = re.compile('\&([^;]*);')
2812
return _unescape_re.sub(_unescaper, data)
2815
class VersionedFileChecker(object):
2817
def __init__(self, repository):
2818
self.repository = repository
2819
self.text_index = self.repository._generate_text_key_index()
2821
def calculate_file_version_parents(self, revision_id, file_id):
2822
"""Calculate the correct parents for a file version according to
2825
parent_keys = self.text_index[(file_id, revision_id)]
2826
if parent_keys == [_mod_revision.NULL_REVISION]:
2828
# strip the file_id, for the weave api
2829
return tuple([revision_id for file_id, revision_id in parent_keys])
2831
def check_file_version_parents(self, weave, file_id, planned_revisions):
2832
"""Check the parents stored in a versioned file are correct.
2834
It also detects file versions that are not referenced by their
2835
corresponding revision's inventory.
2837
:returns: A tuple of (wrong_parents, dangling_file_versions).
2838
wrong_parents is a dict mapping {revision_id: (stored_parents,
2839
correct_parents)} for each revision_id where the stored parents
2840
are not correct. dangling_file_versions is a set of (file_id,
2841
revision_id) tuples for versions that are present in this versioned
2842
file, but not used by the corresponding inventory.
2845
unused_versions = set()
2846
for num, revision_id in enumerate(planned_revisions):
2848
correct_parents = self.calculate_file_version_parents(
2849
revision_id, file_id)
2851
# we were asked to investigate a non-existant version.
2852
unused_versions.add(revision_id)
2855
knit_parents = tuple(weave.get_parents(revision_id))
2856
except errors.RevisionNotPresent:
2858
if correct_parents != knit_parents:
2859
wrong_parents[revision_id] = (knit_parents, correct_parents)
2860
return wrong_parents, unused_versions