1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
40
revision as _mod_revision,
46
from bzrlib.bundle import serializer
47
from bzrlib.revisiontree import RevisionTree
48
from bzrlib.store.versioned import VersionedFileStore
49
from bzrlib.store.text import TextStore
50
from bzrlib.testament import Testament
51
from bzrlib.util import bencode
54
from bzrlib.decorators import needs_read_lock, needs_write_lock
55
from bzrlib.inter import InterObject
56
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
57
from bzrlib.symbol_versioning import (
60
from bzrlib.trace import mutter, mutter_callsite, note, warning
63
# Old formats display a warning, but only once
64
_deprecation_warning_done = False
67
class CommitBuilder(object):
68
"""Provides an interface to build up a commit.
70
This allows describing a tree to be committed without needing to
71
know the internals of the format of the repository.
74
# all clients should supply tree roots.
75
record_root_entry = True
76
# the default CommitBuilder does not manage trees whose root is versioned.
77
_versioned_root = False
79
def __init__(self, repository, parents, config, timestamp=None,
80
timezone=None, committer=None, revprops=None,
82
"""Initiate a CommitBuilder.
84
:param repository: Repository to commit to.
85
:param parents: Revision ids of the parents of the new revision.
86
:param config: Configuration to use.
87
:param timestamp: Optional timestamp recorded for commit.
88
:param timezone: Optional timezone for timestamp.
89
:param committer: Optional committer to set for commit.
90
:param revprops: Optional dictionary of revision properties.
91
:param revision_id: Optional revision id.
96
self._committer = self._config.username()
98
assert isinstance(committer, basestring), type(committer)
99
self._committer = committer
101
self.new_inventory = Inventory(None)
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._revprops.update(revprops)
110
if timestamp is None:
111
timestamp = time.time()
112
# Restrict resolution to 1ms
113
self._timestamp = round(timestamp, 3)
116
self._timezone = osutils.local_time_offset()
118
self._timezone = int(timezone)
120
self._generate_revision_if_needed()
121
self.__heads = graph.HeadsCache(repository.get_graph()).heads
123
def commit(self, message):
124
"""Make the actual commit.
126
:return: The revision id of the recorded revision.
128
rev = _mod_revision.Revision(
129
timestamp=self._timestamp,
130
timezone=self._timezone,
131
committer=self._committer,
133
inventory_sha1=self.inv_sha1,
134
revision_id=self._new_revision_id,
135
properties=self._revprops)
136
rev.parent_ids = self.parents
137
self.repository.add_revision(self._new_revision_id, rev,
138
self.new_inventory, self._config)
139
self.repository.commit_write_group()
140
return self._new_revision_id
143
"""Abort the commit that is being built.
145
self.repository.abort_write_group()
147
def revision_tree(self):
148
"""Return the tree that was just committed.
150
After calling commit() this can be called to get a RevisionTree
151
representing the newly committed tree. This is preferred to
152
calling Repository.revision_tree() because that may require
153
deserializing the inventory, while we already have a copy in
156
return RevisionTree(self.repository, self.new_inventory,
157
self._new_revision_id)
159
def finish_inventory(self):
160
"""Tell the builder that the inventory is finished."""
161
if self.new_inventory.root is None:
162
raise AssertionError('Root entry should be supplied to'
163
' record_entry_contents, as of bzr 0.10.',
164
DeprecationWarning, stacklevel=2)
165
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
166
self.new_inventory.revision_id = self._new_revision_id
167
self.inv_sha1 = self.repository.add_inventory(
168
self._new_revision_id,
173
def _gen_revision_id(self):
174
"""Return new revision-id."""
175
return generate_ids.gen_revision_id(self._config.username(),
178
def _generate_revision_if_needed(self):
179
"""Create a revision id if None was supplied.
181
If the repository can not support user-specified revision ids
182
they should override this function and raise CannotSetRevisionId
183
if _new_revision_id is not None.
185
:raises: CannotSetRevisionId
187
if self._new_revision_id is None:
188
self._new_revision_id = self._gen_revision_id()
189
self.random_revid = True
191
self.random_revid = False
193
def _heads(self, file_id, revision_ids):
194
"""Calculate the graph heads for revision_ids in the graph of file_id.
196
This can use either a per-file graph or a global revision graph as we
197
have an identity relationship between the two graphs.
199
return self.__heads(revision_ids)
201
def _check_root(self, ie, parent_invs, tree):
202
"""Helper for record_entry_contents.
204
:param ie: An entry being added.
205
:param parent_invs: The inventories of the parent revisions of the
207
:param tree: The tree that is being committed.
209
# In this revision format, root entries have no knit or weave When
210
# serializing out to disk and back in root.revision is always
212
ie.revision = self._new_revision_id
214
def _get_delta(self, ie, basis_inv, path):
215
"""Get a delta against the basis inventory for ie."""
216
if ie.file_id not in basis_inv:
218
return (None, path, ie.file_id, ie)
219
elif ie != basis_inv[ie.file_id]:
221
# TODO: avoid tis id2path call.
222
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
227
def record_entry_contents(self, ie, parent_invs, path, tree,
229
"""Record the content of ie from tree into the commit if needed.
231
Side effect: sets ie.revision when unchanged
233
:param ie: An inventory entry present in the commit.
234
:param parent_invs: The inventories of the parent revisions of the
236
:param path: The path the entry is at in the tree.
237
:param tree: The tree which contains this entry and should be used to
239
:param content_summary: Summary data from the tree about the paths
240
content - stat, length, exec, sha/link target. This is only
241
accessed when the entry has a revision of None - that is when it is
242
a candidate to commit.
243
:return: A tuple (change_delta, version_recorded). change_delta is
244
an inventory_delta change for this entry against the basis tree of
245
the commit, or None if no change occured against the basis tree.
246
version_recorded is True if a new version of the entry has been
247
recorded. For instance, committing a merge where a file was only
248
changed on the other side will return (delta, False).
250
if self.new_inventory.root is None:
251
if ie.parent_id is not None:
252
raise errors.RootMissing()
253
self._check_root(ie, parent_invs, tree)
254
if ie.revision is None:
255
kind = content_summary[0]
257
# ie is carried over from a prior commit
259
# XXX: repository specific check for nested tree support goes here - if
260
# the repo doesn't want nested trees we skip it ?
261
if (kind == 'tree-reference' and
262
not self.repository._format.supports_tree_reference):
263
# mismatch between commit builder logic and repository:
264
# this needs the entry creation pushed down into the builder.
265
raise NotImplementedError('Missing repository subtree support.')
266
self.new_inventory.add(ie)
268
# TODO: slow, take it out of the inner loop.
270
basis_inv = parent_invs[0]
272
basis_inv = Inventory(root_id=None)
274
# ie.revision is always None if the InventoryEntry is considered
275
# for committing. We may record the previous parents revision if the
276
# content is actually unchanged against a sole head.
277
if ie.revision is not None:
278
if not self._versioned_root and path == '':
279
# repositories that do not version the root set the root's
280
# revision to the new commit even when no change occurs, and
281
# this masks when a change may have occurred against the basis,
282
# so calculate if one happened.
283
if ie.file_id in basis_inv:
284
delta = (basis_inv.id2path(ie.file_id), path,
288
delta = (None, path, ie.file_id, ie)
291
# we don't need to commit this, because the caller already
292
# determined that an existing revision of this file is
294
return None, (ie.revision == self._new_revision_id)
295
# XXX: Friction: parent_candidates should return a list not a dict
296
# so that we don't have to walk the inventories again.
297
parent_candiate_entries = ie.parent_candidates(parent_invs)
298
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
300
for inv in parent_invs:
301
if ie.file_id in inv:
302
old_rev = inv[ie.file_id].revision
303
if old_rev in head_set:
304
heads.append(inv[ie.file_id].revision)
305
head_set.remove(inv[ie.file_id].revision)
308
# now we check to see if we need to write a new record to the
310
# We write a new entry unless there is one head to the ancestors, and
311
# the kind-derived content is unchanged.
313
# Cheapest check first: no ancestors, or more the one head in the
314
# ancestors, we write a new node.
318
# There is a single head, look it up for comparison
319
parent_entry = parent_candiate_entries[heads[0]]
320
# if the non-content specific data has changed, we'll be writing a
322
if (parent_entry.parent_id != ie.parent_id or
323
parent_entry.name != ie.name):
325
# now we need to do content specific checks:
327
# if the kind changed the content obviously has
328
if kind != parent_entry.kind:
331
assert content_summary[2] is not None, \
332
"Files must not have executable = None"
334
if (# if the file length changed we have to store:
335
parent_entry.text_size != content_summary[1] or
336
# if the exec bit has changed we have to store:
337
parent_entry.executable != content_summary[2]):
339
elif parent_entry.text_sha1 == content_summary[3]:
340
# all meta and content is unchanged (using a hash cache
341
# hit to check the sha)
342
ie.revision = parent_entry.revision
343
ie.text_size = parent_entry.text_size
344
ie.text_sha1 = parent_entry.text_sha1
345
ie.executable = parent_entry.executable
346
return self._get_delta(ie, basis_inv, path), False
348
# Either there is only a hash change(no hash cache entry,
349
# or same size content change), or there is no change on
351
# Provide the parent's hash to the store layer, so that the
352
# content is unchanged we will not store a new node.
353
nostore_sha = parent_entry.text_sha1
355
# We want to record a new node regardless of the presence or
356
# absence of a content change in the file.
358
ie.executable = content_summary[2]
359
lines = tree.get_file(ie.file_id, path).readlines()
361
ie.text_sha1, ie.text_size = self._add_text_to_weave(
362
ie.file_id, lines, heads, nostore_sha)
363
except errors.ExistingContent:
364
# Turns out that the file content was unchanged, and we were
365
# only going to store a new node if it was changed. Carry over
367
ie.revision = parent_entry.revision
368
ie.text_size = parent_entry.text_size
369
ie.text_sha1 = parent_entry.text_sha1
370
ie.executable = parent_entry.executable
371
return self._get_delta(ie, basis_inv, path), False
372
elif kind == 'directory':
374
# all data is meta here, nothing specific to directory, so
376
ie.revision = parent_entry.revision
377
return self._get_delta(ie, basis_inv, path), False
379
self._add_text_to_weave(ie.file_id, lines, heads, None)
380
elif kind == 'symlink':
381
current_link_target = content_summary[3]
383
# symlink target is not generic metadata, check if it has
385
if current_link_target != parent_entry.symlink_target:
388
# unchanged, carry over.
389
ie.revision = parent_entry.revision
390
ie.symlink_target = parent_entry.symlink_target
391
return self._get_delta(ie, basis_inv, path), False
392
ie.symlink_target = current_link_target
394
self._add_text_to_weave(ie.file_id, lines, heads, None)
395
elif kind == 'tree-reference':
397
if content_summary[3] != parent_entry.reference_revision:
400
# unchanged, carry over.
401
ie.reference_revision = parent_entry.reference_revision
402
ie.revision = parent_entry.revision
403
return self._get_delta(ie, basis_inv, path), False
404
ie.reference_revision = content_summary[3]
406
self._add_text_to_weave(ie.file_id, lines, heads, None)
408
raise NotImplementedError('unknown kind')
409
ie.revision = self._new_revision_id
410
return self._get_delta(ie, basis_inv, path), True
412
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
413
versionedfile = self.repository.weave_store.get_weave_or_empty(
414
file_id, self.repository.get_transaction())
415
# Don't change this to add_lines - add_lines_with_ghosts is cheaper
416
# than add_lines, and allows committing when a parent is ghosted for
418
# Note: as we read the content directly from the tree, we know its not
419
# been turned into unicode or badly split - but a broken tree
420
# implementation could give us bad output from readlines() so this is
421
# not a guarantee of safety. What would be better is always checking
422
# the content during test suite execution. RBC 20070912
423
return versionedfile.add_lines_with_ghosts(
424
self._new_revision_id, parents, new_lines,
425
nostore_sha=nostore_sha, random_id=self.random_revid,
426
check_content=False)[0:2]
429
class RootCommitBuilder(CommitBuilder):
430
"""This commitbuilder actually records the root id"""
432
# the root entry gets versioned properly by this builder.
433
_versioned_root = True
435
def _check_root(self, ie, parent_invs, tree):
436
"""Helper for record_entry_contents.
438
:param ie: An entry being added.
439
:param parent_invs: The inventories of the parent revisions of the
441
:param tree: The tree that is being committed.
445
######################################################################
448
class Repository(object):
449
"""Repository holding history for one or more branches.
451
The repository holds and retrieves historical information including
452
revisions and file history. It's normally accessed only by the Branch,
453
which views a particular line of development through that history.
455
The Repository builds on top of Stores and a Transport, which respectively
456
describe the disk data format and the way of accessing the (possibly
460
# What class to use for a CommitBuilder. Often its simpler to change this
461
# in a Repository class subclass rather than to override
462
# get_commit_builder.
463
_commit_builder_class = CommitBuilder
464
# The search regex used by xml based repositories to determine what things
465
# where changed in a single commit.
466
_file_ids_altered_regex = lazy_regex.lazy_compile(
467
r'file_id="(?P<file_id>[^"]+)"'
468
r'.* revision="(?P<revision_id>[^"]+)"'
471
def abort_write_group(self):
472
"""Commit the contents accrued within the current write group.
474
:seealso: start_write_group.
476
if self._write_group is not self.get_transaction():
477
# has an unlock or relock occured ?
478
raise errors.BzrError('mismatched lock context and write group.')
479
self._abort_write_group()
480
self._write_group = None
482
def _abort_write_group(self):
483
"""Template method for per-repository write group cleanup.
485
This is called during abort before the write group is considered to be
486
finished and should cleanup any internal state accrued during the write
487
group. There is no requirement that data handed to the repository be
488
*not* made available - this is not a rollback - but neither should any
489
attempt be made to ensure that data added is fully commited. Abort is
490
invoked when an error has occured so futher disk or network operations
491
may not be possible or may error and if possible should not be
495
def add_inventory(self, revision_id, inv, parents):
496
"""Add the inventory inv to the repository as revision_id.
498
:param parents: The revision ids of the parents that revision_id
499
is known to have and are in the repository already.
501
:returns: The validator(which is a sha1 digest, though what is sha'd is
502
repository format specific) of the serialized inventory.
504
assert self.is_in_write_group()
505
_mod_revision.check_not_reserved_id(revision_id)
506
assert inv.revision_id is None or inv.revision_id == revision_id, \
507
"Mismatch between inventory revision" \
508
" id and insertion revid (%r, %r)" % (inv.revision_id, revision_id)
509
assert inv.root is not None
510
inv_lines = self._serialise_inventory_to_lines(inv)
511
inv_vf = self.get_inventory_weave()
512
return self._inventory_add_lines(inv_vf, revision_id, parents,
513
inv_lines, check_content=False)
515
def _inventory_add_lines(self, inv_vf, revision_id, parents, lines,
517
"""Store lines in inv_vf and return the sha1 of the inventory."""
519
for parent in parents:
521
final_parents.append(parent)
522
return inv_vf.add_lines(revision_id, final_parents, lines,
523
check_content=check_content)[0]
525
def add_revision(self, revision_id, rev, inv=None, config=None):
526
"""Add rev to the revision store as revision_id.
528
:param revision_id: the revision id to use.
529
:param rev: The revision object.
530
:param inv: The inventory for the revision. if None, it will be looked
531
up in the inventory storer
532
:param config: If None no digital signature will be created.
533
If supplied its signature_needed method will be used
534
to determine if a signature should be made.
536
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
538
_mod_revision.check_not_reserved_id(revision_id)
539
if config is not None and config.signature_needed():
541
inv = self.get_inventory(revision_id)
542
plaintext = Testament(rev, inv).as_short_text()
543
self.store_revision_signature(
544
gpg.GPGStrategy(config), plaintext, revision_id)
545
if not revision_id in self.get_inventory_weave():
547
raise errors.WeaveRevisionNotPresent(revision_id,
548
self.get_inventory_weave())
550
# yes, this is not suitable for adding with ghosts.
551
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
553
self._revision_store.add_revision(rev, self.get_transaction())
555
def _add_revision_text(self, revision_id, text):
556
revision = self._revision_store._serializer.read_revision_from_string(
558
self._revision_store._add_revision(revision, StringIO(text),
559
self.get_transaction())
561
def all_revision_ids(self):
562
"""Returns a list of all the revision ids in the repository.
564
This is deprecated because code should generally work on the graph
565
reachable from a particular revision, and ignore any other revisions
566
that might be present. There is no direct replacement method.
568
if 'evil' in debug.debug_flags:
569
mutter_callsite(2, "all_revision_ids is linear with history.")
570
return self._all_revision_ids()
572
def _all_revision_ids(self):
573
"""Returns a list of all the revision ids in the repository.
575
These are in as much topological order as the underlying store can
578
raise NotImplementedError(self._all_revision_ids)
580
def break_lock(self):
581
"""Break a lock if one is present from another instance.
583
Uses the ui factory to ask for confirmation if the lock may be from
586
self.control_files.break_lock()
589
def _eliminate_revisions_not_present(self, revision_ids):
590
"""Check every revision id in revision_ids to see if we have it.
592
Returns a set of the present revisions.
595
for id in revision_ids:
596
if self.has_revision(id):
601
def create(a_bzrdir):
602
"""Construct the current default format repository in a_bzrdir."""
603
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
605
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
606
"""instantiate a Repository.
608
:param _format: The format of the repository on disk.
609
:param a_bzrdir: The BzrDir of the repository.
611
In the future we will have a single api for all stores for
612
getting file texts, inventories and revisions, then
613
this construct will accept instances of those things.
615
super(Repository, self).__init__()
616
self._format = _format
617
# the following are part of the public API for Repository:
618
self.bzrdir = a_bzrdir
619
self.control_files = control_files
620
self._revision_store = _revision_store
621
# backwards compatibility
622
self.weave_store = text_store
624
self._reconcile_does_inventory_gc = True
625
self._reconcile_fixes_text_parents = False
626
self._reconcile_backsup_inventory = True
627
# not right yet - should be more semantically clear ?
629
self.control_store = control_store
630
self.control_weaves = control_store
631
# TODO: make sure to construct the right store classes, etc, depending
632
# on whether escaping is required.
633
self._warn_if_deprecated()
634
self._write_group = None
635
self.base = control_files._transport.base
638
return '%s(%r)' % (self.__class__.__name__,
641
def has_same_location(self, other):
642
"""Returns a boolean indicating if this repository is at the same
643
location as another repository.
645
This might return False even when two repository objects are accessing
646
the same physical repository via different URLs.
648
if self.__class__ is not other.__class__:
650
return (self.control_files._transport.base ==
651
other.control_files._transport.base)
653
def is_in_write_group(self):
654
"""Return True if there is an open write group.
656
:seealso: start_write_group.
658
return self._write_group is not None
661
return self.control_files.is_locked()
663
def is_write_locked(self):
664
"""Return True if this object is write locked."""
665
return self.is_locked() and self.control_files._lock_mode == 'w'
667
def lock_write(self, token=None):
668
"""Lock this repository for writing.
670
This causes caching within the repository obejct to start accumlating
671
data during reads, and allows a 'write_group' to be obtained. Write
672
groups must be used for actual data insertion.
674
:param token: if this is already locked, then lock_write will fail
675
unless the token matches the existing lock.
676
:returns: a token if this instance supports tokens, otherwise None.
677
:raises TokenLockingNotSupported: when a token is given but this
678
instance doesn't support using token locks.
679
:raises MismatchedToken: if the specified token doesn't match the token
680
of the existing lock.
681
:seealso: start_write_group.
683
A token should be passed in if you know that you have locked the object
684
some other way, and need to synchronise this object's state with that
687
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
689
result = self.control_files.lock_write(token=token)
694
self.control_files.lock_read()
697
def get_physical_lock_status(self):
698
return self.control_files.get_physical_lock_status()
700
def leave_lock_in_place(self):
701
"""Tell this repository not to release the physical lock when this
704
If lock_write doesn't return a token, then this method is not supported.
706
self.control_files.leave_in_place()
708
def dont_leave_lock_in_place(self):
709
"""Tell this repository to release the physical lock when this
710
object is unlocked, even if it didn't originally acquire it.
712
If lock_write doesn't return a token, then this method is not supported.
714
self.control_files.dont_leave_in_place()
717
def gather_stats(self, revid=None, committers=None):
718
"""Gather statistics from a revision id.
720
:param revid: The revision id to gather statistics from, if None, then
721
no revision specific statistics are gathered.
722
:param committers: Optional parameter controlling whether to grab
723
a count of committers from the revision specific statistics.
724
:return: A dictionary of statistics. Currently this contains:
725
committers: The number of committers if requested.
726
firstrev: A tuple with timestamp, timezone for the penultimate left
727
most ancestor of revid, if revid is not the NULL_REVISION.
728
latestrev: A tuple with timestamp, timezone for revid, if revid is
729
not the NULL_REVISION.
730
revisions: The total revision count in the repository.
731
size: An estimate disk size of the repository in bytes.
734
if revid and committers:
735
result['committers'] = 0
736
if revid and revid != _mod_revision.NULL_REVISION:
738
all_committers = set()
739
revisions = self.get_ancestry(revid)
740
# pop the leading None
742
first_revision = None
744
# ignore the revisions in the middle - just grab first and last
745
revisions = revisions[0], revisions[-1]
746
for revision in self.get_revisions(revisions):
747
if not first_revision:
748
first_revision = revision
750
all_committers.add(revision.committer)
751
last_revision = revision
753
result['committers'] = len(all_committers)
754
result['firstrev'] = (first_revision.timestamp,
755
first_revision.timezone)
756
result['latestrev'] = (last_revision.timestamp,
757
last_revision.timezone)
759
# now gather global repository information
760
if self.bzrdir.root_transport.listable():
761
c, t = self._revision_store.total_size(self.get_transaction())
762
result['revisions'] = c
766
def find_branches(self, using=False):
767
"""Find branches underneath this repository.
769
This will include branches inside other branches.
771
:param using: If True, list only branches using this repository.
773
if using and not self.is_shared():
775
return [self.bzrdir.open_branch()]
776
except errors.NotBranchError:
778
class Evaluator(object):
781
self.first_call = True
783
def __call__(self, bzrdir):
784
# On the first call, the parameter is always the bzrdir
785
# containing the current repo.
786
if not self.first_call:
788
repository = bzrdir.open_repository()
789
except errors.NoRepositoryPresent:
792
return False, (None, repository)
793
self.first_call = False
795
value = (bzrdir.open_branch(), None)
796
except errors.NotBranchError:
801
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
802
self.bzrdir.root_transport, evaluate=Evaluator()):
803
if branch is not None:
804
branches.append(branch)
805
if not using and repository is not None:
806
branches.extend(repository.find_branches())
809
def get_data_stream(self, revision_ids):
810
raise NotImplementedError(self.get_data_stream)
812
def get_data_stream_for_search(self, search_result):
813
"""Get a data stream that can be inserted to a repository.
815
:param search_result: A bzrlib.graph.SearchResult selecting the
817
:return: A data stream that can be inserted into a repository using
820
raise NotImplementedError(self.get_data_stream_for_search)
822
def insert_data_stream(self, stream):
823
"""XXX What does this really do?
825
Is it a substitute for fetch?
826
Should it manage its own write group ?
828
for item_key, bytes in stream:
829
if item_key[0] == 'file':
830
(file_id,) = item_key[1:]
831
knit = self.weave_store.get_weave_or_empty(
832
file_id, self.get_transaction())
833
elif item_key == ('inventory',):
834
knit = self.get_inventory_weave()
835
elif item_key == ('revisions',):
836
knit = self._revision_store.get_revision_file(
837
self.get_transaction())
838
elif item_key == ('signatures',):
839
knit = self._revision_store.get_signature_file(
840
self.get_transaction())
842
raise errors.RepositoryDataStreamError(
843
"Unrecognised data stream key '%s'" % (item_key,))
844
decoded_list = bencode.bdecode(bytes)
845
format = decoded_list.pop(0)
848
for version, options, parents, some_bytes in decoded_list:
849
data_list.append((version, options, len(some_bytes), parents))
850
knit_bytes += some_bytes
851
buffer = StringIO(knit_bytes)
852
def reader_func(count):
856
return buffer.read(count)
857
knit.insert_data_stream(
858
(format, data_list, reader_func))
861
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
862
"""Return the revision ids that other has that this does not.
864
These are returned in topological order.
866
revision_id: only return revision ids included by revision_id.
868
return InterRepository.get(other, self).search_missing_revision_ids(
869
revision_id, find_ghosts)
871
@deprecated_method(symbol_versioning.one_two)
873
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
874
"""Return the revision ids that other has that this does not.
876
These are returned in topological order.
878
revision_id: only return revision ids included by revision_id.
880
keys = self.search_missing_revision_ids(
881
other, revision_id, find_ghosts).get_keys()
884
parents = other.get_graph().get_parent_map(keys)
887
return tsort.topo_sort(parents)
891
"""Open the repository rooted at base.
893
For instance, if the repository is at URL/.bzr/repository,
894
Repository.open(URL) -> a Repository instance.
896
control = bzrdir.BzrDir.open(base)
897
return control.open_repository()
899
def copy_content_into(self, destination, revision_id=None):
900
"""Make a complete copy of the content in self into destination.
902
This is a destructive operation! Do not use it on existing
905
return InterRepository.get(self, destination).copy_content(revision_id)
907
def commit_write_group(self):
908
"""Commit the contents accrued within the current write group.
910
:seealso: start_write_group.
912
if self._write_group is not self.get_transaction():
913
# has an unlock or relock occured ?
914
raise errors.BzrError('mismatched lock context %r and '
916
(self.get_transaction(), self._write_group))
917
self._commit_write_group()
918
self._write_group = None
920
def _commit_write_group(self):
921
"""Template method for per-repository write group cleanup.
923
This is called before the write group is considered to be
924
finished and should ensure that all data handed to the repository
925
for writing during the write group is safely committed (to the
926
extent possible considering file system caching etc).
929
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False):
930
"""Fetch the content required to construct revision_id from source.
932
If revision_id is None all content is copied.
933
:param find_ghosts: Find and copy revisions in the source that are
934
ghosts in the target (and not reachable directly by walking out to
935
the first-present revision in target from revision_id).
937
# fast path same-url fetch operations
938
if self.has_same_location(source):
939
# check that last_revision is in 'from' and then return a
941
if (revision_id is not None and
942
not _mod_revision.is_null(revision_id)):
943
self.get_revision(revision_id)
945
inter = InterRepository.get(source, self)
947
return inter.fetch(revision_id=revision_id, pb=pb, find_ghosts=find_ghosts)
948
except NotImplementedError:
949
raise errors.IncompatibleRepositories(source, self)
951
def create_bundle(self, target, base, fileobj, format=None):
952
return serializer.write_bundle(self, target, base, fileobj, format)
954
def get_commit_builder(self, branch, parents, config, timestamp=None,
955
timezone=None, committer=None, revprops=None,
957
"""Obtain a CommitBuilder for this repository.
959
:param branch: Branch to commit to.
960
:param parents: Revision ids of the parents of the new revision.
961
:param config: Configuration to use.
962
:param timestamp: Optional timestamp recorded for commit.
963
:param timezone: Optional timezone for timestamp.
964
:param committer: Optional committer to set for commit.
965
:param revprops: Optional dictionary of revision properties.
966
:param revision_id: Optional revision id.
968
result = self._commit_builder_class(self, parents, config,
969
timestamp, timezone, committer, revprops, revision_id)
970
self.start_write_group()
974
if (self.control_files._lock_count == 1 and
975
self.control_files._lock_mode == 'w'):
976
if self._write_group is not None:
977
self.abort_write_group()
978
self.control_files.unlock()
979
raise errors.BzrError(
980
'Must end write groups before releasing write locks.')
981
self.control_files.unlock()
984
def clone(self, a_bzrdir, revision_id=None):
985
"""Clone this repository into a_bzrdir using the current format.
987
Currently no check is made that the format of this repository and
988
the bzrdir format are compatible. FIXME RBC 20060201.
990
:return: The newly created destination repository.
992
# TODO: deprecate after 0.16; cloning this with all its settings is
993
# probably not very useful -- mbp 20070423
994
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
995
self.copy_content_into(dest_repo, revision_id)
998
def start_write_group(self):
999
"""Start a write group in the repository.
1001
Write groups are used by repositories which do not have a 1:1 mapping
1002
between file ids and backend store to manage the insertion of data from
1003
both fetch and commit operations.
1005
A write lock is required around the start_write_group/commit_write_group
1006
for the support of lock-requiring repository formats.
1008
One can only insert data into a repository inside a write group.
1012
if not self.is_write_locked():
1013
raise errors.NotWriteLocked(self)
1014
if self._write_group:
1015
raise errors.BzrError('already in a write group')
1016
self._start_write_group()
1017
# so we can detect unlock/relock - the write group is now entered.
1018
self._write_group = self.get_transaction()
1020
def _start_write_group(self):
1021
"""Template method for per-repository write group startup.
1023
This is called before the write group is considered to be
1028
def sprout(self, to_bzrdir, revision_id=None):
1029
"""Create a descendent repository for new development.
1031
Unlike clone, this does not copy the settings of the repository.
1033
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1034
dest_repo.fetch(self, revision_id=revision_id)
1037
def _create_sprouting_repo(self, a_bzrdir, shared):
1038
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1039
# use target default format.
1040
dest_repo = a_bzrdir.create_repository()
1042
# Most control formats need the repository to be specifically
1043
# created, but on some old all-in-one formats it's not needed
1045
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1046
except errors.UninitializableFormat:
1047
dest_repo = a_bzrdir.open_repository()
1051
def has_revision(self, revision_id):
1052
"""True if this repository has a copy of the revision."""
1053
return revision_id in self.has_revisions((revision_id,))
1055
def has_revisions(self, revision_ids):
1056
"""Probe to find out the presence of multiple revisions.
1058
:param revision_ids: An iterable of revision_ids.
1059
:return: A set of the revision_ids that were present.
1061
raise NotImplementedError(self.has_revisions)
1063
return self._revision_store.has_revision_id(revision_id,
1064
self.get_transaction())
1067
def get_revision(self, revision_id):
1068
"""Return the Revision object for a named revision."""
1069
return self.get_revisions([revision_id])[0]
1072
def get_revision_reconcile(self, revision_id):
1073
"""'reconcile' helper routine that allows access to a revision always.
1075
This variant of get_revision does not cross check the weave graph
1076
against the revision one as get_revision does: but it should only
1077
be used by reconcile, or reconcile-alike commands that are correcting
1078
or testing the revision graph.
1080
return self._get_revisions([revision_id])[0]
1083
def get_revisions(self, revision_ids):
1084
"""Get many revisions at once."""
1085
return self._get_revisions(revision_ids)
1088
def _get_revisions(self, revision_ids):
1089
"""Core work logic to get many revisions without sanity checks."""
1090
for rev_id in revision_ids:
1091
if not rev_id or not isinstance(rev_id, basestring):
1092
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1093
revs = self._revision_store.get_revisions(revision_ids,
1094
self.get_transaction())
1096
assert not isinstance(rev.revision_id, unicode)
1097
for parent_id in rev.parent_ids:
1098
assert not isinstance(parent_id, unicode)
1102
def get_revision_xml(self, revision_id):
1103
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1104
# would have already do it.
1105
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1106
rev = self.get_revision(revision_id)
1107
rev_tmp = StringIO()
1108
# the current serializer..
1109
self._revision_store._serializer.write_revision(rev, rev_tmp)
1111
return rev_tmp.getvalue()
1114
def get_deltas_for_revisions(self, revisions):
1115
"""Produce a generator of revision deltas.
1117
Note that the input is a sequence of REVISIONS, not revision_ids.
1118
Trees will be held in memory until the generator exits.
1119
Each delta is relative to the revision's lefthand predecessor.
1121
required_trees = set()
1122
for revision in revisions:
1123
required_trees.add(revision.revision_id)
1124
required_trees.update(revision.parent_ids[:1])
1125
trees = dict((t.get_revision_id(), t) for
1126
t in self.revision_trees(required_trees))
1127
for revision in revisions:
1128
if not revision.parent_ids:
1129
old_tree = self.revision_tree(None)
1131
old_tree = trees[revision.parent_ids[0]]
1132
yield trees[revision.revision_id].changes_from(old_tree)
1135
def get_revision_delta(self, revision_id):
1136
"""Return the delta for one revision.
1138
The delta is relative to the left-hand predecessor of the
1141
r = self.get_revision(revision_id)
1142
return list(self.get_deltas_for_revisions([r]))[0]
1145
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1146
signature = gpg_strategy.sign(plaintext)
1147
self.add_signature_text(revision_id, signature)
1150
def add_signature_text(self, revision_id, signature):
1151
self._revision_store.add_revision_signature_text(revision_id,
1153
self.get_transaction())
1155
def find_text_key_references(self):
1156
"""Find the text key references within the repository.
1158
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
1159
revision_ids. Each altered file-ids has the exact revision_ids that
1160
altered it listed explicitly.
1161
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1162
to whether they were referred to by the inventory of the
1163
revision_id that they contain. The inventory texts from all present
1164
revision ids are assessed to generate this report.
1166
revision_ids = self.all_revision_ids()
1167
w = self.get_inventory_weave()
1168
pb = ui.ui_factory.nested_progress_bar()
1170
return self._find_text_key_references_from_xml_inventory_lines(
1171
w.iter_lines_added_or_present_in_versions(revision_ids, pb=pb))
1175
def _find_text_key_references_from_xml_inventory_lines(self,
1177
"""Core routine for extracting references to texts from inventories.
1179
This performs the translation of xml lines to revision ids.
1181
:param line_iterator: An iterator of lines, origin_version_id
1182
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1183
to whether they were referred to by the inventory of the
1184
revision_id that they contain. Note that if that revision_id was
1185
not part of the line_iterator's output then False will be given -
1186
even though it may actually refer to that key.
1188
if not self._serializer.support_altered_by_hack:
1189
raise AssertionError(
1190
"_find_text_key_references_from_xml_inventory_lines only "
1191
"supported for branches which store inventory as unnested xml"
1192
", not on %r" % self)
1195
# this code needs to read every new line in every inventory for the
1196
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1197
# not present in one of those inventories is unnecessary but not
1198
# harmful because we are filtering by the revision id marker in the
1199
# inventory lines : we only select file ids altered in one of those
1200
# revisions. We don't need to see all lines in the inventory because
1201
# only those added in an inventory in rev X can contain a revision=X
1203
unescape_revid_cache = {}
1204
unescape_fileid_cache = {}
1206
# jam 20061218 In a big fetch, this handles hundreds of thousands
1207
# of lines, so it has had a lot of inlining and optimizing done.
1208
# Sorry that it is a little bit messy.
1209
# Move several functions to be local variables, since this is a long
1211
search = self._file_ids_altered_regex.search
1212
unescape = _unescape_xml
1213
setdefault = result.setdefault
1214
for line, version_id in line_iterator:
1215
match = search(line)
1218
# One call to match.group() returning multiple items is quite a
1219
# bit faster than 2 calls to match.group() each returning 1
1220
file_id, revision_id = match.group('file_id', 'revision_id')
1222
# Inlining the cache lookups helps a lot when you make 170,000
1223
# lines and 350k ids, versus 8.4 unique ids.
1224
# Using a cache helps in 2 ways:
1225
# 1) Avoids unnecessary decoding calls
1226
# 2) Re-uses cached strings, which helps in future set and
1228
# (2) is enough that removing encoding entirely along with
1229
# the cache (so we are using plain strings) results in no
1230
# performance improvement.
1232
revision_id = unescape_revid_cache[revision_id]
1234
unescaped = unescape(revision_id)
1235
unescape_revid_cache[revision_id] = unescaped
1236
revision_id = unescaped
1238
# Note that unconditionally unescaping means that we deserialise
1239
# every fileid, which for general 'pull' is not great, but we don't
1240
# really want to have some many fulltexts that this matters anyway.
1243
file_id = unescape_fileid_cache[file_id]
1245
unescaped = unescape(file_id)
1246
unescape_fileid_cache[file_id] = unescaped
1249
key = (file_id, revision_id)
1250
setdefault(key, False)
1251
if revision_id == version_id:
1255
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1257
"""Helper routine for fileids_altered_by_revision_ids.
1259
This performs the translation of xml lines to revision ids.
1261
:param line_iterator: An iterator of lines, origin_version_id
1262
:param revision_ids: The revision ids to filter for. This should be a
1263
set or other type which supports efficient __contains__ lookups, as
1264
the revision id from each parsed line will be looked up in the
1265
revision_ids filter.
1266
:return: a dictionary mapping altered file-ids to an iterable of
1267
revision_ids. Each altered file-ids has the exact revision_ids that
1268
altered it listed explicitly.
1271
setdefault = result.setdefault
1272
for file_id, revision_id in \
1273
self._find_text_key_references_from_xml_inventory_lines(
1274
line_iterator).iterkeys():
1275
# once data is all ensured-consistent; then this is
1276
# if revision_id == version_id
1277
if revision_id in revision_ids:
1278
setdefault(file_id, set()).add(revision_id)
1281
def fileids_altered_by_revision_ids(self, revision_ids):
1282
"""Find the file ids and versions affected by revisions.
1284
:param revisions: an iterable containing revision ids.
1285
:return: a dictionary mapping altered file-ids to an iterable of
1286
revision_ids. Each altered file-ids has the exact revision_ids that
1287
altered it listed explicitly.
1289
selected_revision_ids = set(revision_ids)
1290
w = self.get_inventory_weave()
1291
pb = ui.ui_factory.nested_progress_bar()
1293
return self._find_file_ids_from_xml_inventory_lines(
1294
w.iter_lines_added_or_present_in_versions(
1295
selected_revision_ids, pb=pb),
1296
selected_revision_ids)
1300
def iter_files_bytes(self, desired_files):
1301
"""Iterate through file versions.
1303
Files will not necessarily be returned in the order they occur in
1304
desired_files. No specific order is guaranteed.
1306
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1307
value supplied by the caller as part of desired_files. It should
1308
uniquely identify the file version in the caller's context. (Examples:
1309
an index number or a TreeTransform trans_id.)
1311
bytes_iterator is an iterable of bytestrings for the file. The
1312
kind of iterable and length of the bytestrings are unspecified, but for
1313
this implementation, it is a list of lines produced by
1314
VersionedFile.get_lines().
1316
:param desired_files: a list of (file_id, revision_id, identifier)
1319
transaction = self.get_transaction()
1320
for file_id, revision_id, callable_data in desired_files:
1322
weave = self.weave_store.get_weave(file_id, transaction)
1323
except errors.NoSuchFile:
1324
raise errors.NoSuchIdInRepository(self, file_id)
1325
yield callable_data, weave.get_lines(revision_id)
1327
def _generate_text_key_index(self, text_key_references=None,
1329
"""Generate a new text key index for the repository.
1331
This is an expensive function that will take considerable time to run.
1333
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1334
list of parents, also text keys. When a given key has no parents,
1335
the parents list will be [NULL_REVISION].
1337
# All revisions, to find inventory parents.
1338
if ancestors is None:
1339
graph = self.get_graph()
1340
ancestors = graph.get_parent_map(self.all_revision_ids())
1341
if text_key_references is None:
1342
text_key_references = self.find_text_key_references()
1343
pb = ui.ui_factory.nested_progress_bar()
1345
return self._do_generate_text_key_index(ancestors,
1346
text_key_references, pb)
1350
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1351
"""Helper for _generate_text_key_index to avoid deep nesting."""
1352
revision_order = tsort.topo_sort(ancestors)
1353
invalid_keys = set()
1355
for revision_id in revision_order:
1356
revision_keys[revision_id] = set()
1357
text_count = len(text_key_references)
1358
# a cache of the text keys to allow reuse; costs a dict of all the
1359
# keys, but saves a 2-tuple for every child of a given key.
1361
for text_key, valid in text_key_references.iteritems():
1363
invalid_keys.add(text_key)
1365
revision_keys[text_key[1]].add(text_key)
1366
text_key_cache[text_key] = text_key
1367
del text_key_references
1369
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1370
NULL_REVISION = _mod_revision.NULL_REVISION
1371
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1372
# too small for large or very branchy trees. However, for 55K path
1373
# trees, it would be easy to use too much memory trivially. Ideally we
1374
# could gauge this by looking at available real memory etc, but this is
1375
# always a tricky proposition.
1376
inventory_cache = lru_cache.LRUCache(10)
1377
batch_size = 10 # should be ~150MB on a 55K path tree
1378
batch_count = len(revision_order) / batch_size + 1
1380
pb.update("Calculating text parents.", processed_texts, text_count)
1381
for offset in xrange(batch_count):
1382
to_query = revision_order[offset * batch_size:(offset + 1) *
1386
for rev_tree in self.revision_trees(to_query):
1387
revision_id = rev_tree.get_revision_id()
1388
parent_ids = ancestors[revision_id]
1389
for text_key in revision_keys[revision_id]:
1390
pb.update("Calculating text parents.", processed_texts)
1391
processed_texts += 1
1392
candidate_parents = []
1393
for parent_id in parent_ids:
1394
parent_text_key = (text_key[0], parent_id)
1396
check_parent = parent_text_key not in \
1397
revision_keys[parent_id]
1399
# the parent parent_id is a ghost:
1400
check_parent = False
1401
# truncate the derived graph against this ghost.
1402
parent_text_key = None
1404
# look at the parent commit details inventories to
1405
# determine possible candidates in the per file graph.
1408
inv = inventory_cache[parent_id]
1410
inv = self.revision_tree(parent_id).inventory
1411
inventory_cache[parent_id] = inv
1412
parent_entry = inv._byid.get(text_key[0], None)
1413
if parent_entry is not None:
1415
text_key[0], parent_entry.revision)
1417
parent_text_key = None
1418
if parent_text_key is not None:
1419
candidate_parents.append(
1420
text_key_cache[parent_text_key])
1421
parent_heads = text_graph.heads(candidate_parents)
1422
new_parents = list(parent_heads)
1423
new_parents.sort(key=lambda x:candidate_parents.index(x))
1424
if new_parents == []:
1425
new_parents = [NULL_REVISION]
1426
text_index[text_key] = new_parents
1428
for text_key in invalid_keys:
1429
text_index[text_key] = [NULL_REVISION]
1432
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1433
"""Get an iterable listing the keys of all the data introduced by a set
1436
The keys will be ordered so that the corresponding items can be safely
1437
fetched and inserted in that order.
1439
:returns: An iterable producing tuples of (knit-kind, file-id,
1440
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1441
'revisions'. file-id is None unless knit-kind is 'file'.
1443
# XXX: it's a bit weird to control the inventory weave caching in this
1444
# generator. Ideally the caching would be done in fetch.py I think. Or
1445
# maybe this generator should explicitly have the contract that it
1446
# should not be iterated until the previously yielded item has been
1449
inv_w = self.get_inventory_weave()
1451
# file ids that changed
1452
file_ids = self.fileids_altered_by_revision_ids(revision_ids)
1454
num_file_ids = len(file_ids)
1455
for file_id, altered_versions in file_ids.iteritems():
1456
if _files_pb is not None:
1457
_files_pb.update("fetch texts", count, num_file_ids)
1459
yield ("file", file_id, altered_versions)
1460
# We're done with the files_pb. Note that it finished by the caller,
1461
# just as it was created by the caller.
1465
yield ("inventory", None, revision_ids)
1468
revisions_with_signatures = set()
1469
for rev_id in revision_ids:
1471
self.get_signature_text(rev_id)
1472
except errors.NoSuchRevision:
1476
revisions_with_signatures.add(rev_id)
1478
yield ("signatures", None, revisions_with_signatures)
1481
yield ("revisions", None, revision_ids)
1484
def get_inventory_weave(self):
1485
return self.control_weaves.get_weave('inventory',
1486
self.get_transaction())
1489
def get_inventory(self, revision_id):
1490
"""Get Inventory object by revision id."""
1491
return self.iter_inventories([revision_id]).next()
1493
def iter_inventories(self, revision_ids):
1494
"""Get many inventories by revision_ids.
1496
This will buffer some or all of the texts used in constructing the
1497
inventories in memory, but will only parse a single inventory at a
1500
:return: An iterator of inventories.
1502
assert None not in revision_ids
1503
assert _mod_revision.NULL_REVISION not in revision_ids
1504
return self._iter_inventories(revision_ids)
1506
def _iter_inventories(self, revision_ids):
1507
"""single-document based inventory iteration."""
1508
texts = self.get_inventory_weave().get_texts(revision_ids)
1509
for text, revision_id in zip(texts, revision_ids):
1510
yield self.deserialise_inventory(revision_id, text)
1512
def deserialise_inventory(self, revision_id, xml):
1513
"""Transform the xml into an inventory object.
1515
:param revision_id: The expected revision id of the inventory.
1516
:param xml: A serialised inventory.
1518
result = self._serializer.read_inventory_from_string(xml, revision_id)
1519
if result.revision_id != revision_id:
1520
raise AssertionError('revision id mismatch %s != %s' % (
1521
result.revision_id, revision_id))
1524
def serialise_inventory(self, inv):
1525
return self._serializer.write_inventory_to_string(inv)
1527
def _serialise_inventory_to_lines(self, inv):
1528
return self._serializer.write_inventory_to_lines(inv)
1530
def get_serializer_format(self):
1531
return self._serializer.format_num
1534
def get_inventory_xml(self, revision_id):
1535
"""Get inventory XML as a file object."""
1537
assert isinstance(revision_id, str), type(revision_id)
1538
iw = self.get_inventory_weave()
1539
return iw.get_text(revision_id)
1541
raise errors.HistoryMissing(self, 'inventory', revision_id)
1544
def get_inventory_sha1(self, revision_id):
1545
"""Return the sha1 hash of the inventory entry
1547
return self.get_revision(revision_id).inventory_sha1
1550
@deprecated_method(symbol_versioning.one_four)
1551
def get_revision_graph(self, revision_id=None):
1552
"""Return a dictionary containing the revision graph.
1554
NB: This method should not be used as it accesses the entire graph all
1555
at once, which is much more data than most operations should require.
1557
:param revision_id: The revision_id to get a graph from. If None, then
1558
the entire revision graph is returned. This is a deprecated mode of
1559
operation and will be removed in the future.
1560
:return: a dictionary of revision_id->revision_parents_list.
1562
raise NotImplementedError(self.get_revision_graph)
1565
@deprecated_method(symbol_versioning.one_three)
1566
def get_revision_graph_with_ghosts(self, revision_ids=None):
1567
"""Return a graph of the revisions with ghosts marked as applicable.
1569
:param revision_ids: an iterable of revisions to graph or None for all.
1570
:return: a Graph object with the graph reachable from revision_ids.
1572
if 'evil' in debug.debug_flags:
1574
"get_revision_graph_with_ghosts scales with size of history.")
1575
result = deprecated_graph.Graph()
1576
if not revision_ids:
1577
pending = set(self.all_revision_ids())
1580
pending = set(revision_ids)
1581
# special case NULL_REVISION
1582
if _mod_revision.NULL_REVISION in pending:
1583
pending.remove(_mod_revision.NULL_REVISION)
1584
required = set(pending)
1587
revision_id = pending.pop()
1589
rev = self.get_revision(revision_id)
1590
except errors.NoSuchRevision:
1591
if revision_id in required:
1594
result.add_ghost(revision_id)
1596
for parent_id in rev.parent_ids:
1597
# is this queued or done ?
1598
if (parent_id not in pending and
1599
parent_id not in done):
1601
pending.add(parent_id)
1602
result.add_node(revision_id, rev.parent_ids)
1603
done.add(revision_id)
1606
def iter_reverse_revision_history(self, revision_id):
1607
"""Iterate backwards through revision ids in the lefthand history
1609
:param revision_id: The revision id to start with. All its lefthand
1610
ancestors will be traversed.
1612
graph = self.get_graph()
1613
next_id = revision_id
1615
if next_id in (None, _mod_revision.NULL_REVISION):
1618
# Note: The following line may raise KeyError in the event of
1619
# truncated history. We decided not to have a try:except:raise
1620
# RevisionNotPresent here until we see a use for it, because of the
1621
# cost in an inner loop that is by its very nature O(history).
1622
# Robert Collins 20080326
1623
parents = graph.get_parent_map([next_id])[next_id]
1624
if len(parents) == 0:
1627
next_id = parents[0]
1630
def get_revision_inventory(self, revision_id):
1631
"""Return inventory of a past revision."""
1632
# TODO: Unify this with get_inventory()
1633
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1634
# must be the same as its revision, so this is trivial.
1635
if revision_id is None:
1636
# This does not make sense: if there is no revision,
1637
# then it is the current tree inventory surely ?!
1638
# and thus get_root_id() is something that looks at the last
1639
# commit on the branch, and the get_root_id is an inventory check.
1640
raise NotImplementedError
1641
# return Inventory(self.get_root_id())
1643
return self.get_inventory(revision_id)
1646
def is_shared(self):
1647
"""Return True if this repository is flagged as a shared repository."""
1648
raise NotImplementedError(self.is_shared)
1651
def reconcile(self, other=None, thorough=False):
1652
"""Reconcile this repository."""
1653
from bzrlib.reconcile import RepoReconciler
1654
reconciler = RepoReconciler(self, thorough=thorough)
1655
reconciler.reconcile()
1658
def _refresh_data(self):
1659
"""Helper called from lock_* to ensure coherency with disk.
1661
The default implementation does nothing; it is however possible
1662
for repositories to maintain loaded indices across multiple locks
1663
by checking inside their implementation of this method to see
1664
whether their indices are still valid. This depends of course on
1665
the disk format being validatable in this manner.
1669
def revision_tree(self, revision_id):
1670
"""Return Tree for a revision on this branch.
1672
`revision_id` may be None for the empty tree revision.
1674
# TODO: refactor this to use an existing revision object
1675
# so we don't need to read it in twice.
1676
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
1677
return RevisionTree(self, Inventory(root_id=None),
1678
_mod_revision.NULL_REVISION)
1680
inv = self.get_revision_inventory(revision_id)
1681
return RevisionTree(self, inv, revision_id)
1684
def revision_trees(self, revision_ids):
1685
"""Return Tree for a revision on this branch.
1687
`revision_id` may not be None or 'null:'"""
1688
inventories = self.iter_inventories(revision_ids)
1689
for inv in inventories:
1690
yield RevisionTree(self, inv, inv.revision_id)
1693
def get_ancestry(self, revision_id, topo_sorted=True):
1694
"""Return a list of revision-ids integrated by a revision.
1696
The first element of the list is always None, indicating the origin
1697
revision. This might change when we have history horizons, or
1698
perhaps we should have a new API.
1700
This is topologically sorted.
1702
if _mod_revision.is_null(revision_id):
1704
if not self.has_revision(revision_id):
1705
raise errors.NoSuchRevision(self, revision_id)
1706
w = self.get_inventory_weave()
1707
candidates = w.get_ancestry(revision_id, topo_sorted)
1708
return [None] + candidates # self._eliminate_revisions_not_present(candidates)
1711
"""Compress the data within the repository.
1713
This operation only makes sense for some repository types. For other
1714
types it should be a no-op that just returns.
1716
This stub method does not require a lock, but subclasses should use
1717
@needs_write_lock as this is a long running call its reasonable to
1718
implicitly lock for the user.
1722
def print_file(self, file, revision_id):
1723
"""Print `file` to stdout.
1725
FIXME RBC 20060125 as John Meinel points out this is a bad api
1726
- it writes to stdout, it assumes that that is valid etc. Fix
1727
by creating a new more flexible convenience function.
1729
tree = self.revision_tree(revision_id)
1730
# use inventory as it was in that revision
1731
file_id = tree.inventory.path2id(file)
1733
# TODO: jam 20060427 Write a test for this code path
1734
# it had a bug in it, and was raising the wrong
1736
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1737
tree.print_file(file_id)
1739
def get_transaction(self):
1740
return self.control_files.get_transaction()
1742
def revision_parents(self, revision_id):
1743
return self.get_inventory_weave().parent_names(revision_id)
1745
@deprecated_method(symbol_versioning.one_one)
1746
def get_parents(self, revision_ids):
1747
"""See StackedParentsProvider.get_parents"""
1748
parent_map = self.get_parent_map(revision_ids)
1749
return [parent_map.get(r, None) for r in revision_ids]
1751
def get_parent_map(self, keys):
1752
"""See graph._StackedParentsProvider.get_parent_map"""
1754
for revision_id in keys:
1755
if revision_id == _mod_revision.NULL_REVISION:
1756
parent_map[revision_id] = ()
1759
parent_id_list = self.get_revision(revision_id).parent_ids
1760
except errors.NoSuchRevision:
1763
if len(parent_id_list) == 0:
1764
parent_ids = (_mod_revision.NULL_REVISION,)
1766
parent_ids = tuple(parent_id_list)
1767
parent_map[revision_id] = parent_ids
1770
def _make_parents_provider(self):
1773
def get_graph(self, other_repository=None):
1774
"""Return the graph walker for this repository format"""
1775
parents_provider = self._make_parents_provider()
1776
if (other_repository is not None and
1777
not self.has_same_location(other_repository)):
1778
parents_provider = graph._StackedParentsProvider(
1779
[parents_provider, other_repository._make_parents_provider()])
1780
return graph.Graph(parents_provider)
1782
def _get_versioned_file_checker(self):
1783
"""Return an object suitable for checking versioned files."""
1784
return _VersionedFileChecker(self)
1786
def revision_ids_to_search_result(self, result_set):
1787
"""Convert a set of revision ids to a graph SearchResult."""
1788
result_parents = set()
1789
for parents in self.get_graph().get_parent_map(
1790
result_set).itervalues():
1791
result_parents.update(parents)
1792
included_keys = result_set.intersection(result_parents)
1793
start_keys = result_set.difference(included_keys)
1794
exclude_keys = result_parents.difference(result_set)
1795
result = graph.SearchResult(start_keys, exclude_keys,
1796
len(result_set), result_set)
1800
def set_make_working_trees(self, new_value):
1801
"""Set the policy flag for making working trees when creating branches.
1803
This only applies to branches that use this repository.
1805
The default is 'True'.
1806
:param new_value: True to restore the default, False to disable making
1809
raise NotImplementedError(self.set_make_working_trees)
1811
def make_working_trees(self):
1812
"""Returns the policy for making working trees on new branches."""
1813
raise NotImplementedError(self.make_working_trees)
1816
def sign_revision(self, revision_id, gpg_strategy):
1817
plaintext = Testament.from_revision(self, revision_id).as_short_text()
1818
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
1821
def has_signature_for_revision_id(self, revision_id):
1822
"""Query for a revision signature for revision_id in the repository."""
1823
return self._revision_store.has_signature(revision_id,
1824
self.get_transaction())
1827
def get_signature_text(self, revision_id):
1828
"""Return the text for a signature."""
1829
return self._revision_store.get_signature_text(revision_id,
1830
self.get_transaction())
1833
def check(self, revision_ids=None):
1834
"""Check consistency of all history of given revision_ids.
1836
Different repository implementations should override _check().
1838
:param revision_ids: A non-empty list of revision_ids whose ancestry
1839
will be checked. Typically the last revision_id of a branch.
1841
return self._check(revision_ids)
1843
def _check(self, revision_ids):
1844
result = check.Check(self)
1848
def _warn_if_deprecated(self):
1849
global _deprecation_warning_done
1850
if _deprecation_warning_done:
1852
_deprecation_warning_done = True
1853
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1854
% (self._format, self.bzrdir.transport.base))
1856
def supports_rich_root(self):
1857
return self._format.rich_root_data
1859
def _check_ascii_revisionid(self, revision_id, method):
1860
"""Private helper for ascii-only repositories."""
1861
# weave repositories refuse to store revisionids that are non-ascii.
1862
if revision_id is not None:
1863
# weaves require ascii revision ids.
1864
if isinstance(revision_id, unicode):
1866
revision_id.encode('ascii')
1867
except UnicodeEncodeError:
1868
raise errors.NonAsciiRevisionId(method, self)
1871
revision_id.decode('ascii')
1872
except UnicodeDecodeError:
1873
raise errors.NonAsciiRevisionId(method, self)
1875
def revision_graph_can_have_wrong_parents(self):
1876
"""Is it possible for this repository to have a revision graph with
1879
If True, then this repository must also implement
1880
_find_inconsistent_revision_parents so that check and reconcile can
1881
check for inconsistencies before proceeding with other checks that may
1882
depend on the revision index being consistent.
1884
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1887
# remove these delegates a while after bzr 0.15
1888
def __make_delegated(name, from_module):
1889
def _deprecated_repository_forwarder():
1890
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1891
% (name, from_module),
1894
m = __import__(from_module, globals(), locals(), [name])
1896
return getattr(m, name)
1897
except AttributeError:
1898
raise AttributeError('module %s has no name %s'
1900
globals()[name] = _deprecated_repository_forwarder
1903
'AllInOneRepository',
1904
'WeaveMetaDirRepository',
1905
'PreSplitOutRepositoryFormat',
1906
'RepositoryFormat4',
1907
'RepositoryFormat5',
1908
'RepositoryFormat6',
1909
'RepositoryFormat7',
1911
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1915
'RepositoryFormatKnit',
1916
'RepositoryFormatKnit1',
1918
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
1921
def install_revision(repository, rev, revision_tree):
1922
"""Install all revision data into a repository."""
1923
install_revisions(repository, [(rev, revision_tree, None)])
1926
def install_revisions(repository, iterable, num_revisions=None, pb=None):
1927
"""Install all revision data into a repository.
1929
Accepts an iterable of revision, tree, signature tuples. The signature
1932
repository.start_write_group()
1934
for n, (revision, revision_tree, signature) in enumerate(iterable):
1935
_install_revision(repository, revision, revision_tree, signature)
1937
pb.update('Transferring revisions', n + 1, num_revisions)
1939
repository.abort_write_group()
1942
repository.commit_write_group()
1945
def _install_revision(repository, rev, revision_tree, signature):
1946
"""Install all revision data into a repository."""
1947
present_parents = []
1949
for p_id in rev.parent_ids:
1950
if repository.has_revision(p_id):
1951
present_parents.append(p_id)
1952
parent_trees[p_id] = repository.revision_tree(p_id)
1954
parent_trees[p_id] = repository.revision_tree(None)
1956
inv = revision_tree.inventory
1957
entries = inv.iter_entries()
1958
# backwards compatibility hack: skip the root id.
1959
if not repository.supports_rich_root():
1960
path, root = entries.next()
1961
if root.revision != rev.revision_id:
1962
raise errors.IncompatibleRevision(repr(repository))
1963
# Add the texts that are not already present
1964
for path, ie in entries:
1965
w = repository.weave_store.get_weave_or_empty(ie.file_id,
1966
repository.get_transaction())
1967
if ie.revision not in w:
1969
# FIXME: TODO: The following loop *may* be overlapping/duplicate
1970
# with InventoryEntry.find_previous_heads(). if it is, then there
1971
# is a latent bug here where the parents may have ancestors of each
1973
for revision, tree in parent_trees.iteritems():
1974
if ie.file_id not in tree:
1976
parent_id = tree.inventory[ie.file_id].revision
1977
if parent_id in text_parents:
1979
text_parents.append(parent_id)
1981
vfile = repository.weave_store.get_weave_or_empty(ie.file_id,
1982
repository.get_transaction())
1983
lines = revision_tree.get_file(ie.file_id).readlines()
1984
vfile.add_lines(rev.revision_id, text_parents, lines)
1986
# install the inventory
1987
repository.add_inventory(rev.revision_id, inv, present_parents)
1988
except errors.RevisionAlreadyPresent:
1990
if signature is not None:
1991
repository.add_signature_text(rev.revision_id, signature)
1992
repository.add_revision(rev.revision_id, rev, inv)
1995
class MetaDirRepository(Repository):
1996
"""Repositories in the new meta-dir layout."""
1998
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
1999
super(MetaDirRepository, self).__init__(_format,
2005
dir_mode = self.control_files._dir_mode
2006
file_mode = self.control_files._file_mode
2009
def is_shared(self):
2010
"""Return True if this repository is flagged as a shared repository."""
2011
return self.control_files._transport.has('shared-storage')
2014
def set_make_working_trees(self, new_value):
2015
"""Set the policy flag for making working trees when creating branches.
2017
This only applies to branches that use this repository.
2019
The default is 'True'.
2020
:param new_value: True to restore the default, False to disable making
2025
self.control_files._transport.delete('no-working-trees')
2026
except errors.NoSuchFile:
2029
self.control_files.put_utf8('no-working-trees', '')
2031
def make_working_trees(self):
2032
"""Returns the policy for making working trees on new branches."""
2033
return not self.control_files._transport.has('no-working-trees')
2036
class MetaDirVersionedFileRepository(MetaDirRepository):
2037
"""Repositories in a meta-dir, that work via versioned file objects."""
2039
def __init__(self, _format, a_bzrdir, control_files, _revision_store, control_store, text_store):
2040
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2041
control_files, _revision_store, control_store, text_store)
2042
_revision_store.get_scope = self.get_transaction
2043
control_store.get_scope = self.get_transaction
2044
text_store.get_scope = self.get_transaction
2047
class RepositoryFormatRegistry(registry.Registry):
2048
"""Registry of RepositoryFormats."""
2050
def get(self, format_string):
2051
r = registry.Registry.get(self, format_string)
2057
format_registry = RepositoryFormatRegistry()
2058
"""Registry of formats, indexed by their identifying format string.
2060
This can contain either format instances themselves, or classes/factories that
2061
can be called to obtain one.
2065
#####################################################################
2066
# Repository Formats
2068
class RepositoryFormat(object):
2069
"""A repository format.
2071
Formats provide three things:
2072
* An initialization routine to construct repository data on disk.
2073
* a format string which is used when the BzrDir supports versioned
2075
* an open routine which returns a Repository instance.
2077
There is one and only one Format subclass for each on-disk format. But
2078
there can be one Repository subclass that is used for several different
2079
formats. The _format attribute on a Repository instance can be used to
2080
determine the disk format.
2082
Formats are placed in an dict by their format string for reference
2083
during opening. These should be subclasses of RepositoryFormat
2086
Once a format is deprecated, just deprecate the initialize and open
2087
methods on the format class. Do not deprecate the object, as the
2088
object will be created every system load.
2090
Common instance attributes:
2091
_matchingbzrdir - the bzrdir format that the repository format was
2092
originally written to work with. This can be used if manually
2093
constructing a bzrdir and repository, or more commonly for test suite
2097
# Set to True or False in derived classes. True indicates that the format
2098
# supports ghosts gracefully.
2099
supports_ghosts = None
2100
# Can this repository be given external locations to lookup additional
2101
# data. Set to True or False in derived classes.
2102
supports_external_lookups = None
2105
return "<%s>" % self.__class__.__name__
2107
def __eq__(self, other):
2108
# format objects are generally stateless
2109
return isinstance(other, self.__class__)
2111
def __ne__(self, other):
2112
return not self == other
2115
def find_format(klass, a_bzrdir):
2116
"""Return the format for the repository object in a_bzrdir.
2118
This is used by bzr native formats that have a "format" file in
2119
the repository. Other methods may be used by different types of
2123
transport = a_bzrdir.get_repository_transport(None)
2124
format_string = transport.get("format").read()
2125
return format_registry.get(format_string)
2126
except errors.NoSuchFile:
2127
raise errors.NoRepositoryPresent(a_bzrdir)
2129
raise errors.UnknownFormatError(format=format_string,
2133
def register_format(klass, format):
2134
format_registry.register(format.get_format_string(), format)
2137
def unregister_format(klass, format):
2138
format_registry.remove(format.get_format_string())
2141
def get_default_format(klass):
2142
"""Return the current default format."""
2143
from bzrlib import bzrdir
2144
return bzrdir.format_registry.make_bzrdir('default').repository_format
2146
def _get_control_store(self, repo_transport, control_files):
2147
"""Return the control store for this repository."""
2148
raise NotImplementedError(self._get_control_store)
2150
def get_format_string(self):
2151
"""Return the ASCII format string that identifies this format.
2153
Note that in pre format ?? repositories the format string is
2154
not permitted nor written to disk.
2156
raise NotImplementedError(self.get_format_string)
2158
def get_format_description(self):
2159
"""Return the short description for this format."""
2160
raise NotImplementedError(self.get_format_description)
2162
def _get_revision_store(self, repo_transport, control_files):
2163
"""Return the revision store object for this a_bzrdir."""
2164
raise NotImplementedError(self._get_revision_store)
2166
def _get_text_rev_store(self,
2173
"""Common logic for getting a revision store for a repository.
2175
see self._get_revision_store for the subclass-overridable method to
2176
get the store for a repository.
2178
from bzrlib.store.revision.text import TextRevisionStore
2179
dir_mode = control_files._dir_mode
2180
file_mode = control_files._file_mode
2181
text_store = TextStore(transport.clone(name),
2183
compressed=compressed,
2185
file_mode=file_mode)
2186
_revision_store = TextRevisionStore(text_store, serializer)
2187
return _revision_store
2189
# TODO: this shouldn't be in the base class, it's specific to things that
2190
# use weaves or knits -- mbp 20070207
2191
def _get_versioned_file_store(self,
2196
versionedfile_class=None,
2197
versionedfile_kwargs={},
2199
if versionedfile_class is None:
2200
versionedfile_class = self._versionedfile_class
2201
weave_transport = control_files._transport.clone(name)
2202
dir_mode = control_files._dir_mode
2203
file_mode = control_files._file_mode
2204
return VersionedFileStore(weave_transport, prefixed=prefixed,
2206
file_mode=file_mode,
2207
versionedfile_class=versionedfile_class,
2208
versionedfile_kwargs=versionedfile_kwargs,
2211
def initialize(self, a_bzrdir, shared=False):
2212
"""Initialize a repository of this format in a_bzrdir.
2214
:param a_bzrdir: The bzrdir to put the new repository in it.
2215
:param shared: The repository should be initialized as a sharable one.
2216
:returns: The new repository object.
2218
This may raise UninitializableFormat if shared repository are not
2219
compatible the a_bzrdir.
2221
raise NotImplementedError(self.initialize)
2223
def is_supported(self):
2224
"""Is this format supported?
2226
Supported formats must be initializable and openable.
2227
Unsupported formats may not support initialization or committing or
2228
some other features depending on the reason for not being supported.
2232
def check_conversion_target(self, target_format):
2233
raise NotImplementedError(self.check_conversion_target)
2235
def open(self, a_bzrdir, _found=False):
2236
"""Return an instance of this format for the bzrdir a_bzrdir.
2238
_found is a private parameter, do not use it.
2240
raise NotImplementedError(self.open)
2243
class MetaDirRepositoryFormat(RepositoryFormat):
2244
"""Common base class for the new repositories using the metadir layout."""
2246
rich_root_data = False
2247
supports_tree_reference = False
2248
supports_external_lookups = False
2249
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2252
super(MetaDirRepositoryFormat, self).__init__()
2254
def _create_control_files(self, a_bzrdir):
2255
"""Create the required files and the initial control_files object."""
2256
# FIXME: RBC 20060125 don't peek under the covers
2257
# NB: no need to escape relative paths that are url safe.
2258
repository_transport = a_bzrdir.get_repository_transport(self)
2259
control_files = lockable_files.LockableFiles(repository_transport,
2260
'lock', lockdir.LockDir)
2261
control_files.create_lock()
2262
return control_files
2264
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2265
"""Upload the initial blank content."""
2266
control_files = self._create_control_files(a_bzrdir)
2267
control_files.lock_write()
2269
control_files._transport.mkdir_multi(dirs,
2270
mode=control_files._dir_mode)
2271
for file, content in files:
2272
control_files.put(file, content)
2273
for file, content in utf8_files:
2274
control_files.put_utf8(file, content)
2276
control_files.put_utf8('shared-storage', '')
2278
control_files.unlock()
2281
# formats which have no format string are not discoverable
2282
# and not independently creatable, so are not registered. They're
2283
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2284
# needed, it's constructed directly by the BzrDir. Non-native formats where
2285
# the repository is not separately opened are similar.
2287
format_registry.register_lazy(
2288
'Bazaar-NG Repository format 7',
2289
'bzrlib.repofmt.weaverepo',
2293
format_registry.register_lazy(
2294
'Bazaar-NG Knit Repository Format 1',
2295
'bzrlib.repofmt.knitrepo',
2296
'RepositoryFormatKnit1',
2299
format_registry.register_lazy(
2300
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2301
'bzrlib.repofmt.knitrepo',
2302
'RepositoryFormatKnit3',
2305
format_registry.register_lazy(
2306
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2307
'bzrlib.repofmt.knitrepo',
2308
'RepositoryFormatKnit4',
2311
# Pack-based formats. There is one format for pre-subtrees, and one for
2312
# post-subtrees to allow ease of testing.
2313
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
2314
format_registry.register_lazy(
2315
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2316
'bzrlib.repofmt.pack_repo',
2317
'RepositoryFormatKnitPack1',
2319
format_registry.register_lazy(
2320
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2321
'bzrlib.repofmt.pack_repo',
2322
'RepositoryFormatKnitPack3',
2324
format_registry.register_lazy(
2325
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2326
'bzrlib.repofmt.pack_repo',
2327
'RepositoryFormatKnitPack4',
2329
# Development formats.
2331
# development 0 - stub to introduce development versioning scheme.
2332
format_registry.register_lazy(
2333
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
2334
'bzrlib.repofmt.pack_repo',
2335
'RepositoryFormatPackDevelopment0',
2337
format_registry.register_lazy(
2338
("Bazaar development format 0 with subtree support "
2339
"(needs bzr.dev from before 1.3)\n"),
2340
'bzrlib.repofmt.pack_repo',
2341
'RepositoryFormatPackDevelopment0Subtree',
2343
# 1.3->1.4 go below here
2346
class InterRepository(InterObject):
2347
"""This class represents operations taking place between two repositories.
2349
Its instances have methods like copy_content and fetch, and contain
2350
references to the source and target repositories these operations can be
2353
Often we will provide convenience methods on 'repository' which carry out
2354
operations with another repository - they will always forward to
2355
InterRepository.get(other).method_name(parameters).
2359
"""The available optimised InterRepository types."""
2361
def copy_content(self, revision_id=None):
2362
raise NotImplementedError(self.copy_content)
2364
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2365
"""Fetch the content required to construct revision_id.
2367
The content is copied from self.source to self.target.
2369
:param revision_id: if None all content is copied, if NULL_REVISION no
2371
:param pb: optional progress bar to use for progress reports. If not
2372
provided a default one will be created.
2374
Returns the copied revision count and the failed revisions in a tuple:
2377
raise NotImplementedError(self.fetch)
2379
def _walk_to_common_revisions(self, revision_ids):
2380
"""Walk out from revision_ids in source to revisions target has.
2382
:param revision_ids: The start point for the search.
2383
:return: A set of revision ids.
2385
graph = self.source.get_graph()
2386
missing_revs = set()
2387
# ensure we don't pay silly lookup costs.
2388
revision_ids = frozenset(revision_ids)
2389
searcher = graph._make_breadth_first_searcher(revision_ids)
2390
null_set = frozenset([_mod_revision.NULL_REVISION])
2393
next_revs, ghosts = searcher.next_with_ghosts()
2394
except StopIteration:
2396
if revision_ids.intersection(ghosts):
2397
absent_ids = set(revision_ids.intersection(ghosts))
2398
# If all absent_ids are present in target, no error is needed.
2399
absent_ids.difference_update(
2400
self.target.has_revisions(absent_ids))
2402
raise errors.NoSuchRevision(self.source, absent_ids.pop())
2403
# we don't care about other ghosts as we can't fetch them and
2404
# haven't been asked to.
2405
next_revs = set(next_revs)
2406
# we always have NULL_REVISION present.
2407
have_revs = self.target.has_revisions(next_revs).union(null_set)
2408
missing_revs.update(next_revs - have_revs)
2409
searcher.stop_searching_any(have_revs)
2410
return searcher.get_result()
2412
@deprecated_method(symbol_versioning.one_two)
2414
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2415
"""Return the revision ids that source has that target does not.
2417
These are returned in topological order.
2419
:param revision_id: only return revision ids included by this
2421
:param find_ghosts: If True find missing revisions in deep history
2422
rather than just finding the surface difference.
2424
return list(self.search_missing_revision_ids(
2425
revision_id, find_ghosts).get_keys())
2428
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2429
"""Return the revision ids that source has that target does not.
2431
:param revision_id: only return revision ids included by this
2433
:param find_ghosts: If True find missing revisions in deep history
2434
rather than just finding the surface difference.
2435
:return: A bzrlib.graph.SearchResult.
2437
# stop searching at found target revisions.
2438
if not find_ghosts and revision_id is not None:
2439
return self._walk_to_common_revisions([revision_id])
2440
# generic, possibly worst case, slow code path.
2441
target_ids = set(self.target.all_revision_ids())
2442
if revision_id is not None:
2443
source_ids = self.source.get_ancestry(revision_id)
2444
assert source_ids[0] is None
2447
source_ids = self.source.all_revision_ids()
2448
result_set = set(source_ids).difference(target_ids)
2449
return self.source.revision_ids_to_search_result(result_set)
2452
def _same_model(source, target):
2453
"""True if source and target have the same data representation."""
2454
if source.supports_rich_root() != target.supports_rich_root():
2456
if source._serializer != target._serializer:
2461
class InterSameDataRepository(InterRepository):
2462
"""Code for converting between repositories that represent the same data.
2464
Data format and model must match for this to work.
2468
def _get_repo_format_to_test(self):
2469
"""Repository format for testing with.
2471
InterSameData can pull from subtree to subtree and from non-subtree to
2472
non-subtree, so we test this with the richest repository format.
2474
from bzrlib.repofmt import knitrepo
2475
return knitrepo.RepositoryFormatKnit3()
2478
def is_compatible(source, target):
2479
return InterRepository._same_model(source, target)
2482
def copy_content(self, revision_id=None):
2483
"""Make a complete copy of the content in self into destination.
2485
This copies both the repository's revision data, and configuration information
2486
such as the make_working_trees setting.
2488
This is a destructive operation! Do not use it on existing
2491
:param revision_id: Only copy the content needed to construct
2492
revision_id and its parents.
2495
self.target.set_make_working_trees(self.source.make_working_trees())
2496
except NotImplementedError:
2498
# but don't bother fetching if we have the needed data now.
2499
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2500
self.target.has_revision(revision_id)):
2502
self.target.fetch(self.source, revision_id=revision_id)
2505
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2506
"""See InterRepository.fetch()."""
2507
from bzrlib.fetch import GenericRepoFetcher
2508
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2509
self.source, self.source._format, self.target,
2510
self.target._format)
2511
f = GenericRepoFetcher(to_repository=self.target,
2512
from_repository=self.source,
2513
last_revision=revision_id,
2514
pb=pb, find_ghosts=find_ghosts)
2515
return f.count_copied, f.failed_revisions
2518
class InterWeaveRepo(InterSameDataRepository):
2519
"""Optimised code paths between Weave based repositories.
2521
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2522
implemented lazy inter-object optimisation.
2526
def _get_repo_format_to_test(self):
2527
from bzrlib.repofmt import weaverepo
2528
return weaverepo.RepositoryFormat7()
2531
def is_compatible(source, target):
2532
"""Be compatible with known Weave formats.
2534
We don't test for the stores being of specific types because that
2535
could lead to confusing results, and there is no need to be
2538
from bzrlib.repofmt.weaverepo import (
2544
return (isinstance(source._format, (RepositoryFormat5,
2546
RepositoryFormat7)) and
2547
isinstance(target._format, (RepositoryFormat5,
2549
RepositoryFormat7)))
2550
except AttributeError:
2554
def copy_content(self, revision_id=None):
2555
"""See InterRepository.copy_content()."""
2556
# weave specific optimised path:
2558
self.target.set_make_working_trees(self.source.make_working_trees())
2559
except (errors.RepositoryUpgradeRequired, NotImplemented):
2561
# FIXME do not peek!
2562
if self.source.control_files._transport.listable():
2563
pb = ui.ui_factory.nested_progress_bar()
2565
self.target.weave_store.copy_all_ids(
2566
self.source.weave_store,
2568
from_transaction=self.source.get_transaction(),
2569
to_transaction=self.target.get_transaction())
2570
pb.update('copying inventory', 0, 1)
2571
self.target.control_weaves.copy_multi(
2572
self.source.control_weaves, ['inventory'],
2573
from_transaction=self.source.get_transaction(),
2574
to_transaction=self.target.get_transaction())
2575
self.target._revision_store.text_store.copy_all_ids(
2576
self.source._revision_store.text_store,
2581
self.target.fetch(self.source, revision_id=revision_id)
2584
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2585
"""See InterRepository.fetch()."""
2586
from bzrlib.fetch import GenericRepoFetcher
2587
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2588
self.source, self.source._format, self.target, self.target._format)
2589
f = GenericRepoFetcher(to_repository=self.target,
2590
from_repository=self.source,
2591
last_revision=revision_id,
2592
pb=pb, find_ghosts=find_ghosts)
2593
return f.count_copied, f.failed_revisions
2596
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2597
"""See InterRepository.missing_revision_ids()."""
2598
# we want all revisions to satisfy revision_id in source.
2599
# but we don't want to stat every file here and there.
2600
# we want then, all revisions other needs to satisfy revision_id
2601
# checked, but not those that we have locally.
2602
# so the first thing is to get a subset of the revisions to
2603
# satisfy revision_id in source, and then eliminate those that
2604
# we do already have.
2605
# this is slow on high latency connection to self, but as as this
2606
# disk format scales terribly for push anyway due to rewriting
2607
# inventory.weave, this is considered acceptable.
2609
if revision_id is not None:
2610
source_ids = self.source.get_ancestry(revision_id)
2611
assert source_ids[0] is None
2614
source_ids = self.source._all_possible_ids()
2615
source_ids_set = set(source_ids)
2616
# source_ids is the worst possible case we may need to pull.
2617
# now we want to filter source_ids against what we actually
2618
# have in target, but don't try to check for existence where we know
2619
# we do not have a revision as that would be pointless.
2620
target_ids = set(self.target._all_possible_ids())
2621
possibly_present_revisions = target_ids.intersection(source_ids_set)
2622
actually_present_revisions = set(
2623
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2624
required_revisions = source_ids_set.difference(actually_present_revisions)
2625
if revision_id is not None:
2626
# we used get_ancestry to determine source_ids then we are assured all
2627
# revisions referenced are present as they are installed in topological order.
2628
# and the tip revision was validated by get_ancestry.
2629
result_set = required_revisions
2631
# if we just grabbed the possibly available ids, then
2632
# we only have an estimate of whats available and need to validate
2633
# that against the revision records.
2635
self.source._eliminate_revisions_not_present(required_revisions))
2636
return self.source.revision_ids_to_search_result(result_set)
2639
class InterKnitRepo(InterSameDataRepository):
2640
"""Optimised code paths between Knit based repositories."""
2643
def _get_repo_format_to_test(self):
2644
from bzrlib.repofmt import knitrepo
2645
return knitrepo.RepositoryFormatKnit1()
2648
def is_compatible(source, target):
2649
"""Be compatible with known Knit formats.
2651
We don't test for the stores being of specific types because that
2652
could lead to confusing results, and there is no need to be
2655
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2657
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2658
isinstance(target._format, RepositoryFormatKnit))
2659
except AttributeError:
2661
return are_knits and InterRepository._same_model(source, target)
2664
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2665
"""See InterRepository.fetch()."""
2666
from bzrlib.fetch import KnitRepoFetcher
2667
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2668
self.source, self.source._format, self.target, self.target._format)
2669
f = KnitRepoFetcher(to_repository=self.target,
2670
from_repository=self.source,
2671
last_revision=revision_id,
2672
pb=pb, find_ghosts=find_ghosts)
2673
return f.count_copied, f.failed_revisions
2676
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2677
"""See InterRepository.missing_revision_ids()."""
2678
if revision_id is not None:
2679
source_ids = self.source.get_ancestry(revision_id)
2680
assert source_ids[0] is None
2683
source_ids = self.source.all_revision_ids()
2684
source_ids_set = set(source_ids)
2685
# source_ids is the worst possible case we may need to pull.
2686
# now we want to filter source_ids against what we actually
2687
# have in target, but don't try to check for existence where we know
2688
# we do not have a revision as that would be pointless.
2689
target_ids = set(self.target.all_revision_ids())
2690
possibly_present_revisions = target_ids.intersection(source_ids_set)
2691
actually_present_revisions = set(
2692
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2693
required_revisions = source_ids_set.difference(actually_present_revisions)
2694
if revision_id is not None:
2695
# we used get_ancestry to determine source_ids then we are assured all
2696
# revisions referenced are present as they are installed in topological order.
2697
# and the tip revision was validated by get_ancestry.
2698
result_set = required_revisions
2700
# if we just grabbed the possibly available ids, then
2701
# we only have an estimate of whats available and need to validate
2702
# that against the revision records.
2704
self.source._eliminate_revisions_not_present(required_revisions))
2705
return self.source.revision_ids_to_search_result(result_set)
2708
class InterPackRepo(InterSameDataRepository):
2709
"""Optimised code paths between Pack based repositories."""
2712
def _get_repo_format_to_test(self):
2713
from bzrlib.repofmt import pack_repo
2714
return pack_repo.RepositoryFormatKnitPack1()
2717
def is_compatible(source, target):
2718
"""Be compatible with known Pack formats.
2720
We don't test for the stores being of specific types because that
2721
could lead to confusing results, and there is no need to be
2724
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2726
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2727
isinstance(target._format, RepositoryFormatPack))
2728
except AttributeError:
2730
return are_packs and InterRepository._same_model(source, target)
2733
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2734
"""See InterRepository.fetch()."""
2735
from bzrlib.repofmt.pack_repo import Packer
2736
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2737
self.source, self.source._format, self.target, self.target._format)
2738
self.count_copied = 0
2739
if revision_id is None:
2741
# everything to do - use pack logic
2742
# to fetch from all packs to one without
2743
# inventory parsing etc, IFF nothing to be copied is in the target.
2745
revision_ids = self.source.all_revision_ids()
2746
# implementing the TODO will involve:
2747
# - detecting when all of a pack is selected
2748
# - avoiding as much as possible pre-selection, so the
2749
# more-core routines such as create_pack_from_packs can filter in
2750
# a just-in-time fashion. (though having a HEADS list on a
2751
# repository might make this a lot easier, because we could
2752
# sensibly detect 'new revisions' without doing a full index scan.
2753
elif _mod_revision.is_null(revision_id):
2758
revision_ids = self.search_missing_revision_ids(revision_id,
2759
find_ghosts=find_ghosts).get_keys()
2760
except errors.NoSuchRevision:
2761
raise errors.InstallFailed([revision_id])
2762
packs = self.source._pack_collection.all_packs()
2763
pack = Packer(self.target._pack_collection, packs, '.fetch',
2764
revision_ids).pack()
2765
if pack is not None:
2766
self.target._pack_collection._save_pack_names()
2767
# Trigger an autopack. This may duplicate effort as we've just done
2768
# a pack creation, but for now it is simpler to think about as
2769
# 'upload data, then repack if needed'.
2770
self.target._pack_collection.autopack()
2771
return (pack.get_revision_count(), [])
2776
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2777
"""See InterRepository.missing_revision_ids().
2779
:param find_ghosts: Find ghosts throughout the ancestry of
2782
if not find_ghosts and revision_id is not None:
2783
return self._walk_to_common_revisions([revision_id])
2784
elif revision_id is not None:
2785
source_ids = self.source.get_ancestry(revision_id)
2786
assert source_ids[0] is None
2789
source_ids = self.source.all_revision_ids()
2790
# source_ids is the worst possible case we may need to pull.
2791
# now we want to filter source_ids against what we actually
2792
# have in target, but don't try to check for existence where we know
2793
# we do not have a revision as that would be pointless.
2794
target_ids = set(self.target.all_revision_ids())
2795
result_set = set(source_ids).difference(target_ids)
2796
return self.source.revision_ids_to_search_result(result_set)
2799
class InterModel1and2(InterRepository):
2802
def _get_repo_format_to_test(self):
2806
def is_compatible(source, target):
2807
if not source.supports_rich_root() and target.supports_rich_root():
2813
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2814
"""See InterRepository.fetch()."""
2815
from bzrlib.fetch import Model1toKnit2Fetcher
2816
f = Model1toKnit2Fetcher(to_repository=self.target,
2817
from_repository=self.source,
2818
last_revision=revision_id,
2819
pb=pb, find_ghosts=find_ghosts)
2820
return f.count_copied, f.failed_revisions
2823
def copy_content(self, revision_id=None):
2824
"""Make a complete copy of the content in self into destination.
2826
This is a destructive operation! Do not use it on existing
2829
:param revision_id: Only copy the content needed to construct
2830
revision_id and its parents.
2833
self.target.set_make_working_trees(self.source.make_working_trees())
2834
except NotImplementedError:
2836
# but don't bother fetching if we have the needed data now.
2837
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2838
self.target.has_revision(revision_id)):
2840
self.target.fetch(self.source, revision_id=revision_id)
2843
class InterKnit1and2(InterKnitRepo):
2846
def _get_repo_format_to_test(self):
2850
def is_compatible(source, target):
2851
"""Be compatible with Knit1 source and Knit3 target"""
2852
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2854
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2855
RepositoryFormatKnit3)
2856
from bzrlib.repofmt.pack_repo import (
2857
RepositoryFormatKnitPack1,
2858
RepositoryFormatKnitPack3,
2859
RepositoryFormatPackDevelopment0,
2860
RepositoryFormatPackDevelopment0Subtree,
2863
RepositoryFormatKnit1,
2864
RepositoryFormatKnitPack1,
2865
RepositoryFormatPackDevelopment0,
2868
RepositoryFormatKnit3,
2869
RepositoryFormatKnitPack3,
2870
RepositoryFormatPackDevelopment0Subtree,
2872
return (isinstance(source._format, nosubtrees) and
2873
isinstance(target._format, subtrees))
2874
except AttributeError:
2878
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2879
"""See InterRepository.fetch()."""
2880
from bzrlib.fetch import Knit1to2Fetcher
2881
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2882
self.source, self.source._format, self.target,
2883
self.target._format)
2884
f = Knit1to2Fetcher(to_repository=self.target,
2885
from_repository=self.source,
2886
last_revision=revision_id,
2887
pb=pb, find_ghosts=find_ghosts)
2888
return f.count_copied, f.failed_revisions
2891
class InterDifferingSerializer(InterKnitRepo):
2894
def _get_repo_format_to_test(self):
2898
def is_compatible(source, target):
2899
"""Be compatible with Knit2 source and Knit3 target"""
2900
if source.supports_rich_root() != target.supports_rich_root():
2902
# Ideally, we'd support fetching if the source had no tree references
2903
# even if it supported them...
2904
if (getattr(source, '_format.supports_tree_reference', False) and
2905
not getattr(target, '_format.supports_tree_reference', False)):
2910
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2911
"""See InterRepository.fetch()."""
2912
revision_ids = self.target.search_missing_revision_ids(self.source,
2913
revision_id, find_ghosts=find_ghosts).get_keys()
2914
revision_ids = tsort.topo_sort(
2915
self.source.get_graph().get_parent_map(revision_ids))
2916
def revisions_iterator():
2917
for current_revision_id in revision_ids:
2918
revision = self.source.get_revision(current_revision_id)
2919
tree = self.source.revision_tree(current_revision_id)
2921
signature = self.source.get_signature_text(
2922
current_revision_id)
2923
except errors.NoSuchRevision:
2925
yield revision, tree, signature
2927
my_pb = ui.ui_factory.nested_progress_bar()
2932
install_revisions(self.target, revisions_iterator(),
2933
len(revision_ids), pb)
2935
if my_pb is not None:
2937
return len(revision_ids), 0
2940
class InterRemoteToOther(InterRepository):
2942
def __init__(self, source, target):
2943
InterRepository.__init__(self, source, target)
2944
self._real_inter = None
2947
def is_compatible(source, target):
2948
if not isinstance(source, remote.RemoteRepository):
2950
# Is source's model compatible with target's model?
2951
source._ensure_real()
2952
real_source = source._real_repository
2953
assert not isinstance(real_source, remote.RemoteRepository), (
2954
"We don't support remote repos backed by remote repos yet.")
2955
return InterRepository._same_model(real_source, target)
2958
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2959
"""See InterRepository.fetch()."""
2960
from bzrlib.fetch import RemoteToOtherFetcher
2961
mutter("Using fetch logic to copy between %s(remote) and %s(%s)",
2962
self.source, self.target, self.target._format)
2963
# TODO: jam 20070210 This should be an assert, not a translate
2964
revision_id = osutils.safe_revision_id(revision_id)
2965
f = RemoteToOtherFetcher(to_repository=self.target,
2966
from_repository=self.source,
2967
last_revision=revision_id,
2968
pb=pb, find_ghosts=find_ghosts)
2969
return f.count_copied, f.failed_revisions
2972
def _get_repo_format_to_test(self):
2976
class InterOtherToRemote(InterRepository):
2978
def __init__(self, source, target):
2979
InterRepository.__init__(self, source, target)
2980
self._real_inter = None
2983
def is_compatible(source, target):
2984
if isinstance(target, remote.RemoteRepository):
2988
def _ensure_real_inter(self):
2989
if self._real_inter is None:
2990
self.target._ensure_real()
2991
real_target = self.target._real_repository
2992
self._real_inter = InterRepository.get(self.source, real_target)
2994
def copy_content(self, revision_id=None):
2995
self._ensure_real_inter()
2996
self._real_inter.copy_content(revision_id=revision_id)
2998
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2999
self._ensure_real_inter()
3000
self._real_inter.fetch(revision_id=revision_id, pb=pb,
3001
find_ghosts=find_ghosts)
3004
def _get_repo_format_to_test(self):
3008
InterRepository.register_optimiser(InterDifferingSerializer)
3009
InterRepository.register_optimiser(InterSameDataRepository)
3010
InterRepository.register_optimiser(InterWeaveRepo)
3011
InterRepository.register_optimiser(InterKnitRepo)
3012
InterRepository.register_optimiser(InterModel1and2)
3013
InterRepository.register_optimiser(InterKnit1and2)
3014
InterRepository.register_optimiser(InterPackRepo)
3015
InterRepository.register_optimiser(InterRemoteToOther)
3016
InterRepository.register_optimiser(InterOtherToRemote)
3019
class CopyConverter(object):
3020
"""A repository conversion tool which just performs a copy of the content.
3022
This is slow but quite reliable.
3025
def __init__(self, target_format):
3026
"""Create a CopyConverter.
3028
:param target_format: The format the resulting repository should be.
3030
self.target_format = target_format
3032
def convert(self, repo, pb):
3033
"""Perform the conversion of to_convert, giving feedback via pb.
3035
:param to_convert: The disk object to convert.
3036
:param pb: a progress bar to use for progress information.
3041
# this is only useful with metadir layouts - separated repo content.
3042
# trigger an assertion if not such
3043
repo._format.get_format_string()
3044
self.repo_dir = repo.bzrdir
3045
self.step('Moving repository to repository.backup')
3046
self.repo_dir.transport.move('repository', 'repository.backup')
3047
backup_transport = self.repo_dir.transport.clone('repository.backup')
3048
repo._format.check_conversion_target(self.target_format)
3049
self.source_repo = repo._format.open(self.repo_dir,
3051
_override_transport=backup_transport)
3052
self.step('Creating new repository')
3053
converted = self.target_format.initialize(self.repo_dir,
3054
self.source_repo.is_shared())
3055
converted.lock_write()
3057
self.step('Copying content into repository.')
3058
self.source_repo.copy_content_into(converted)
3061
self.step('Deleting old repository content.')
3062
self.repo_dir.transport.delete_tree('repository.backup')
3063
self.pb.note('repository converted')
3065
def step(self, message):
3066
"""Update the pb by a step."""
3068
self.pb.update(message, self.count, self.total)
3080
def _unescaper(match, _map=_unescape_map):
3081
code = match.group(1)
3085
if not code.startswith('#'):
3087
return unichr(int(code[1:])).encode('utf8')
3093
def _unescape_xml(data):
3094
"""Unescape predefined XML entities in a string of data."""
3096
if _unescape_re is None:
3097
_unescape_re = re.compile('\&([^;]*);')
3098
return _unescape_re.sub(_unescaper, data)
3101
class _VersionedFileChecker(object):
3103
def __init__(self, repository):
3104
self.repository = repository
3105
self.text_index = self.repository._generate_text_key_index()
3107
def calculate_file_version_parents(self, revision_id, file_id):
3108
"""Calculate the correct parents for a file version according to
3111
parent_keys = self.text_index[(file_id, revision_id)]
3112
if parent_keys == [_mod_revision.NULL_REVISION]:
3114
# strip the file_id, for the weave api
3115
return tuple([revision_id for file_id, revision_id in parent_keys])
3117
def check_file_version_parents(self, weave, file_id):
3118
"""Check the parents stored in a versioned file are correct.
3120
It also detects file versions that are not referenced by their
3121
corresponding revision's inventory.
3123
:returns: A tuple of (wrong_parents, dangling_file_versions).
3124
wrong_parents is a dict mapping {revision_id: (stored_parents,
3125
correct_parents)} for each revision_id where the stored parents
3126
are not correct. dangling_file_versions is a set of (file_id,
3127
revision_id) tuples for versions that are present in this versioned
3128
file, but not used by the corresponding inventory.
3131
unused_versions = set()
3132
versions = weave.versions()
3133
parent_map = weave.get_parent_map(versions)
3134
for num, revision_id in enumerate(versions):
3136
correct_parents = self.calculate_file_version_parents(
3137
revision_id, file_id)
3139
# The version is not part of the used keys.
3140
unused_versions.add(revision_id)
3143
knit_parents = tuple(parent_map[revision_id])
3144
except errors.RevisionNotPresent:
3146
if correct_parents != knit_parents:
3147
wrong_parents[revision_id] = (knit_parents, correct_parents)
3148
return wrong_parents, unused_versions
3151
def _old_get_graph(repository, revision_id):
3152
"""DO NOT USE. That is all. I'm serious."""
3153
graph = repository.get_graph()
3154
revision_graph = dict(((key, value) for key, value in
3155
graph.iter_ancestry([revision_id]) if value is not None))
3156
return _strip_NULL_ghosts(revision_graph)
3159
def _strip_NULL_ghosts(revision_graph):
3160
"""Also don't use this. more compatibility code for unmigrated clients."""
3161
# Filter ghosts, and null:
3162
if _mod_revision.NULL_REVISION in revision_graph:
3163
del revision_graph[_mod_revision.NULL_REVISION]
3164
for key, parents in revision_graph.items():
3165
revision_graph[key] = tuple(parent for parent in parents if parent
3167
return revision_graph