1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from bzrlib.lazy_import import lazy_import
18
lazy_import(globals(), """
38
revision as _mod_revision,
44
from bzrlib.bundle import serializer
45
from bzrlib.revisiontree import RevisionTree
46
from bzrlib.store.versioned import VersionedFileStore
47
from bzrlib.testament import Testament
50
from bzrlib.decorators import needs_read_lock, needs_write_lock
51
from bzrlib.inter import InterObject
52
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
53
from bzrlib import registry
54
from bzrlib.symbol_versioning import (
60
from bzrlib.trace import (
61
log_exception_quietly, note, mutter, mutter_callsite, warning)
64
# Old formats display a warning, but only once
65
_deprecation_warning_done = False
68
class CommitBuilder(object):
69
"""Provides an interface to build up a commit.
71
This allows describing a tree to be committed without needing to
72
know the internals of the format of the repository.
75
# all clients should supply tree roots.
76
record_root_entry = True
77
# the default CommitBuilder does not manage trees whose root is versioned.
78
_versioned_root = False
80
def __init__(self, repository, parents, config, timestamp=None,
81
timezone=None, committer=None, revprops=None,
83
"""Initiate a CommitBuilder.
85
:param repository: Repository to commit to.
86
:param parents: Revision ids of the parents of the new revision.
87
:param config: Configuration to use.
88
:param timestamp: Optional timestamp recorded for commit.
89
:param timezone: Optional timezone for timestamp.
90
:param committer: Optional committer to set for commit.
91
:param revprops: Optional dictionary of revision properties.
92
:param revision_id: Optional revision id.
97
self._committer = self._config.username()
99
self._committer = committer
101
self.new_inventory = Inventory(None)
102
self._new_revision_id = revision_id
103
self.parents = parents
104
self.repository = repository
107
if revprops is not None:
108
self._validate_revprops(revprops)
109
self._revprops.update(revprops)
111
if timestamp is None:
112
timestamp = time.time()
113
# Restrict resolution to 1ms
114
self._timestamp = round(timestamp, 3)
117
self._timezone = osutils.local_time_offset()
119
self._timezone = int(timezone)
121
self._generate_revision_if_needed()
122
self.__heads = graph.HeadsCache(repository.get_graph()).heads
123
self._basis_delta = []
124
# API compatibility, older code that used CommitBuilder did not call
125
# .record_delete(), which means the delta that is computed would not be
126
# valid. Callers that will call record_delete() should call
127
# .will_record_deletes() to indicate that.
128
self._recording_deletes = False
130
def _validate_unicode_text(self, text, context):
131
"""Verify things like commit messages don't have bogus characters."""
133
raise ValueError('Invalid value for %s: %r' % (context, text))
135
def _validate_revprops(self, revprops):
136
for key, value in revprops.iteritems():
137
# We know that the XML serializers do not round trip '\r'
138
# correctly, so refuse to accept them
139
if not isinstance(value, basestring):
140
raise ValueError('revision property (%s) is not a valid'
141
' (unicode) string: %r' % (key, value))
142
self._validate_unicode_text(value,
143
'revision property (%s)' % (key,))
145
def commit(self, message):
146
"""Make the actual commit.
148
:return: The revision id of the recorded revision.
150
self._validate_unicode_text(message, 'commit message')
151
rev = _mod_revision.Revision(
152
timestamp=self._timestamp,
153
timezone=self._timezone,
154
committer=self._committer,
156
inventory_sha1=self.inv_sha1,
157
revision_id=self._new_revision_id,
158
properties=self._revprops)
159
rev.parent_ids = self.parents
160
self.repository.add_revision(self._new_revision_id, rev,
161
self.new_inventory, self._config)
162
self.repository.commit_write_group()
163
return self._new_revision_id
166
"""Abort the commit that is being built.
168
self.repository.abort_write_group()
170
def revision_tree(self):
171
"""Return the tree that was just committed.
173
After calling commit() this can be called to get a RevisionTree
174
representing the newly committed tree. This is preferred to
175
calling Repository.revision_tree() because that may require
176
deserializing the inventory, while we already have a copy in
179
return RevisionTree(self.repository, self.new_inventory,
180
self._new_revision_id)
182
def finish_inventory(self):
183
"""Tell the builder that the inventory is finished."""
184
if self.new_inventory.root is None:
185
raise AssertionError('Root entry should be supplied to'
186
' record_entry_contents, as of bzr 0.10.')
187
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
188
self.new_inventory.revision_id = self._new_revision_id
189
self.inv_sha1 = self.repository.add_inventory(
190
self._new_revision_id,
195
def _gen_revision_id(self):
196
"""Return new revision-id."""
197
return generate_ids.gen_revision_id(self._config.username(),
200
def _generate_revision_if_needed(self):
201
"""Create a revision id if None was supplied.
203
If the repository can not support user-specified revision ids
204
they should override this function and raise CannotSetRevisionId
205
if _new_revision_id is not None.
207
:raises: CannotSetRevisionId
209
if self._new_revision_id is None:
210
self._new_revision_id = self._gen_revision_id()
211
self.random_revid = True
213
self.random_revid = False
215
def _heads(self, file_id, revision_ids):
216
"""Calculate the graph heads for revision_ids in the graph of file_id.
218
This can use either a per-file graph or a global revision graph as we
219
have an identity relationship between the two graphs.
221
return self.__heads(revision_ids)
223
def _check_root(self, ie, parent_invs, tree):
224
"""Helper for record_entry_contents.
226
:param ie: An entry being added.
227
:param parent_invs: The inventories of the parent revisions of the
229
:param tree: The tree that is being committed.
231
# In this revision format, root entries have no knit or weave When
232
# serializing out to disk and back in root.revision is always
234
ie.revision = self._new_revision_id
236
def _get_delta(self, ie, basis_inv, path):
237
"""Get a delta against the basis inventory for ie."""
238
if ie.file_id not in basis_inv:
240
result = (None, path, ie.file_id, ie)
241
self._basis_delta.append(result)
243
elif ie != basis_inv[ie.file_id]:
245
# TODO: avoid tis id2path call.
246
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
247
self._basis_delta.append(result)
253
def get_basis_delta(self):
254
"""Return the complete inventory delta versus the basis inventory.
256
This has been built up with the calls to record_delete and
257
record_entry_contents. The client must have already called
258
will_record_deletes() to indicate that they will be generating a
261
:return: An inventory delta, suitable for use with apply_delta, or
262
Repository.add_inventory_by_delta, etc.
264
if not self._recording_deletes:
265
raise AssertionError("recording deletes not activated.")
266
return self._basis_delta
268
def record_delete(self, path, file_id):
269
"""Record that a delete occured against a basis tree.
271
This is an optional API - when used it adds items to the basis_delta
272
being accumulated by the commit builder. It cannot be called unless the
273
method will_record_deletes() has been called to inform the builder that
274
a delta is being supplied.
276
:param path: The path of the thing deleted.
277
:param file_id: The file id that was deleted.
279
if not self._recording_deletes:
280
raise AssertionError("recording deletes not activated.")
281
delta = (path, None, file_id, None)
282
self._basis_delta.append(delta)
285
def will_record_deletes(self):
286
"""Tell the commit builder that deletes are being notified.
288
This enables the accumulation of an inventory delta; for the resulting
289
commit to be valid, deletes against the basis MUST be recorded via
290
builder.record_delete().
292
self._recording_deletes = True
294
def record_entry_contents(self, ie, parent_invs, path, tree,
296
"""Record the content of ie from tree into the commit if needed.
298
Side effect: sets ie.revision when unchanged
300
:param ie: An inventory entry present in the commit.
301
:param parent_invs: The inventories of the parent revisions of the
303
:param path: The path the entry is at in the tree.
304
:param tree: The tree which contains this entry and should be used to
306
:param content_summary: Summary data from the tree about the paths
307
content - stat, length, exec, sha/link target. This is only
308
accessed when the entry has a revision of None - that is when it is
309
a candidate to commit.
310
:return: A tuple (change_delta, version_recorded, fs_hash).
311
change_delta is an inventory_delta change for this entry against
312
the basis tree of the commit, or None if no change occured against
314
version_recorded is True if a new version of the entry has been
315
recorded. For instance, committing a merge where a file was only
316
changed on the other side will return (delta, False).
317
fs_hash is either None, or the hash details for the path (currently
318
a tuple of the contents sha1 and the statvalue returned by
319
tree.get_file_with_stat()).
321
if self.new_inventory.root is None:
322
if ie.parent_id is not None:
323
raise errors.RootMissing()
324
self._check_root(ie, parent_invs, tree)
325
if ie.revision is None:
326
kind = content_summary[0]
328
# ie is carried over from a prior commit
330
# XXX: repository specific check for nested tree support goes here - if
331
# the repo doesn't want nested trees we skip it ?
332
if (kind == 'tree-reference' and
333
not self.repository._format.supports_tree_reference):
334
# mismatch between commit builder logic and repository:
335
# this needs the entry creation pushed down into the builder.
336
raise NotImplementedError('Missing repository subtree support.')
337
self.new_inventory.add(ie)
339
# TODO: slow, take it out of the inner loop.
341
basis_inv = parent_invs[0]
343
basis_inv = Inventory(root_id=None)
345
# ie.revision is always None if the InventoryEntry is considered
346
# for committing. We may record the previous parents revision if the
347
# content is actually unchanged against a sole head.
348
if ie.revision is not None:
349
if not self._versioned_root and path == '':
350
# repositories that do not version the root set the root's
351
# revision to the new commit even when no change occurs (more
352
# specifically, they do not record a revision on the root; and
353
# the rev id is assigned to the root during deserialisation -
354
# this masks when a change may have occurred against the basis.
355
# To match this we always issue a delta, because the revision
356
# of the root will always be changing.
357
if ie.file_id in basis_inv:
358
delta = (basis_inv.id2path(ie.file_id), path,
362
delta = (None, path, ie.file_id, ie)
363
self._basis_delta.append(delta)
364
return delta, False, None
366
# we don't need to commit this, because the caller already
367
# determined that an existing revision of this file is
368
# appropriate. If its not being considered for committing then
369
# it and all its parents to the root must be unaltered so
370
# no-change against the basis.
371
if ie.revision == self._new_revision_id:
372
raise AssertionError("Impossible situation, a skipped "
373
"inventory entry (%r) claims to be modified in this "
374
"commit (%r).", (ie, self._new_revision_id))
375
return None, False, None
376
# XXX: Friction: parent_candidates should return a list not a dict
377
# so that we don't have to walk the inventories again.
378
parent_candiate_entries = ie.parent_candidates(parent_invs)
379
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
381
for inv in parent_invs:
382
if ie.file_id in inv:
383
old_rev = inv[ie.file_id].revision
384
if old_rev in head_set:
385
heads.append(inv[ie.file_id].revision)
386
head_set.remove(inv[ie.file_id].revision)
389
# now we check to see if we need to write a new record to the
391
# We write a new entry unless there is one head to the ancestors, and
392
# the kind-derived content is unchanged.
394
# Cheapest check first: no ancestors, or more the one head in the
395
# ancestors, we write a new node.
399
# There is a single head, look it up for comparison
400
parent_entry = parent_candiate_entries[heads[0]]
401
# if the non-content specific data has changed, we'll be writing a
403
if (parent_entry.parent_id != ie.parent_id or
404
parent_entry.name != ie.name):
406
# now we need to do content specific checks:
408
# if the kind changed the content obviously has
409
if kind != parent_entry.kind:
411
# Stat cache fingerprint feedback for the caller - None as we usually
412
# don't generate one.
415
if content_summary[2] is None:
416
raise ValueError("Files must not have executable = None")
418
if (# if the file length changed we have to store:
419
parent_entry.text_size != content_summary[1] or
420
# if the exec bit has changed we have to store:
421
parent_entry.executable != content_summary[2]):
423
elif parent_entry.text_sha1 == content_summary[3]:
424
# all meta and content is unchanged (using a hash cache
425
# hit to check the sha)
426
ie.revision = parent_entry.revision
427
ie.text_size = parent_entry.text_size
428
ie.text_sha1 = parent_entry.text_sha1
429
ie.executable = parent_entry.executable
430
return self._get_delta(ie, basis_inv, path), False, None
432
# Either there is only a hash change(no hash cache entry,
433
# or same size content change), or there is no change on
435
# Provide the parent's hash to the store layer, so that the
436
# content is unchanged we will not store a new node.
437
nostore_sha = parent_entry.text_sha1
439
# We want to record a new node regardless of the presence or
440
# absence of a content change in the file.
442
ie.executable = content_summary[2]
443
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
445
lines = file_obj.readlines()
449
ie.text_sha1, ie.text_size = self._add_text_to_weave(
450
ie.file_id, lines, heads, nostore_sha)
451
# Let the caller know we generated a stat fingerprint.
452
fingerprint = (ie.text_sha1, stat_value)
453
except errors.ExistingContent:
454
# Turns out that the file content was unchanged, and we were
455
# only going to store a new node if it was changed. Carry over
457
ie.revision = parent_entry.revision
458
ie.text_size = parent_entry.text_size
459
ie.text_sha1 = parent_entry.text_sha1
460
ie.executable = parent_entry.executable
461
return self._get_delta(ie, basis_inv, path), False, None
462
elif kind == 'directory':
464
# all data is meta here, nothing specific to directory, so
466
ie.revision = parent_entry.revision
467
return self._get_delta(ie, basis_inv, path), False, None
469
self._add_text_to_weave(ie.file_id, lines, heads, None)
470
elif kind == 'symlink':
471
current_link_target = content_summary[3]
473
# symlink target is not generic metadata, check if it has
475
if current_link_target != parent_entry.symlink_target:
478
# unchanged, carry over.
479
ie.revision = parent_entry.revision
480
ie.symlink_target = parent_entry.symlink_target
481
return self._get_delta(ie, basis_inv, path), False, None
482
ie.symlink_target = current_link_target
484
self._add_text_to_weave(ie.file_id, lines, heads, None)
485
elif kind == 'tree-reference':
487
if content_summary[3] != parent_entry.reference_revision:
490
# unchanged, carry over.
491
ie.reference_revision = parent_entry.reference_revision
492
ie.revision = parent_entry.revision
493
return self._get_delta(ie, basis_inv, path), False, None
494
ie.reference_revision = content_summary[3]
496
self._add_text_to_weave(ie.file_id, lines, heads, None)
498
raise NotImplementedError('unknown kind')
499
ie.revision = self._new_revision_id
500
return self._get_delta(ie, basis_inv, path), True, fingerprint
502
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
503
# Note: as we read the content directly from the tree, we know its not
504
# been turned into unicode or badly split - but a broken tree
505
# implementation could give us bad output from readlines() so this is
506
# not a guarantee of safety. What would be better is always checking
507
# the content during test suite execution. RBC 20070912
508
parent_keys = tuple((file_id, parent) for parent in parents)
509
return self.repository.texts.add_lines(
510
(file_id, self._new_revision_id), parent_keys, new_lines,
511
nostore_sha=nostore_sha, random_id=self.random_revid,
512
check_content=False)[0:2]
515
class RootCommitBuilder(CommitBuilder):
516
"""This commitbuilder actually records the root id"""
518
# the root entry gets versioned properly by this builder.
519
_versioned_root = True
521
def _check_root(self, ie, parent_invs, tree):
522
"""Helper for record_entry_contents.
524
:param ie: An entry being added.
525
:param parent_invs: The inventories of the parent revisions of the
527
:param tree: The tree that is being committed.
531
######################################################################
534
class Repository(object):
535
"""Repository holding history for one or more branches.
537
The repository holds and retrieves historical information including
538
revisions and file history. It's normally accessed only by the Branch,
539
which views a particular line of development through that history.
541
The Repository builds on top of some byte storage facilies (the revisions,
542
signatures, inventories and texts attributes) and a Transport, which
543
respectively provide byte storage and a means to access the (possibly
546
The byte storage facilities are addressed via tuples, which we refer to
547
as 'keys' throughout the code base. Revision_keys, inventory_keys and
548
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
549
(file_id, revision_id). We use this interface because it allows low
550
friction with the underlying code that implements disk indices, network
551
encoding and other parts of bzrlib.
553
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
554
the serialised revisions for the repository. This can be used to obtain
555
revision graph information or to access raw serialised revisions.
556
The result of trying to insert data into the repository via this store
557
is undefined: it should be considered read-only except for implementors
559
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
560
the serialised signatures for the repository. This can be used to
561
obtain access to raw serialised signatures. The result of trying to
562
insert data into the repository via this store is undefined: it should
563
be considered read-only except for implementors of repositories.
564
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
565
the serialised inventories for the repository. This can be used to
566
obtain unserialised inventories. The result of trying to insert data
567
into the repository via this store is undefined: it should be
568
considered read-only except for implementors of repositories.
569
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
570
texts of files and directories for the repository. This can be used to
571
obtain file texts or file graphs. Note that Repository.iter_file_bytes
572
is usually a better interface for accessing file texts.
573
The result of trying to insert data into the repository via this store
574
is undefined: it should be considered read-only except for implementors
576
:ivar _transport: Transport for file access to repository, typically
577
pointing to .bzr/repository.
580
# What class to use for a CommitBuilder. Often its simpler to change this
581
# in a Repository class subclass rather than to override
582
# get_commit_builder.
583
_commit_builder_class = CommitBuilder
584
# The search regex used by xml based repositories to determine what things
585
# where changed in a single commit.
586
_file_ids_altered_regex = lazy_regex.lazy_compile(
587
r'file_id="(?P<file_id>[^"]+)"'
588
r'.* revision="(?P<revision_id>[^"]+)"'
591
def abort_write_group(self, suppress_errors=False):
592
"""Commit the contents accrued within the current write group.
594
:param suppress_errors: if true, abort_write_group will catch and log
595
unexpected errors that happen during the abort, rather than
596
allowing them to propagate. Defaults to False.
598
:seealso: start_write_group.
600
if self._write_group is not self.get_transaction():
601
# has an unlock or relock occured ?
602
raise errors.BzrError('mismatched lock context and write group.')
604
self._abort_write_group()
605
except Exception, exc:
606
self._write_group = None
607
if not suppress_errors:
609
mutter('abort_write_group failed')
610
log_exception_quietly()
611
note('bzr: ERROR (ignored): %s', exc)
612
self._write_group = None
614
def _abort_write_group(self):
615
"""Template method for per-repository write group cleanup.
617
This is called during abort before the write group is considered to be
618
finished and should cleanup any internal state accrued during the write
619
group. There is no requirement that data handed to the repository be
620
*not* made available - this is not a rollback - but neither should any
621
attempt be made to ensure that data added is fully commited. Abort is
622
invoked when an error has occured so futher disk or network operations
623
may not be possible or may error and if possible should not be
627
def add_fallback_repository(self, repository):
628
"""Add a repository to use for looking up data not held locally.
630
:param repository: A repository.
632
if not self._format.supports_external_lookups:
633
raise errors.UnstackableRepositoryFormat(self._format, self.base)
634
self._check_fallback_repository(repository)
635
self._fallback_repositories.append(repository)
636
self.texts.add_fallback_versioned_files(repository.texts)
637
self.inventories.add_fallback_versioned_files(repository.inventories)
638
self.revisions.add_fallback_versioned_files(repository.revisions)
639
self.signatures.add_fallback_versioned_files(repository.signatures)
641
def _check_fallback_repository(self, repository):
642
"""Check that this repository can fallback to repository safely.
644
Raise an error if not.
646
:param repository: A repository to fallback to.
648
return InterRepository._assert_same_model(self, repository)
650
def add_inventory(self, revision_id, inv, parents):
651
"""Add the inventory inv to the repository as revision_id.
653
:param parents: The revision ids of the parents that revision_id
654
is known to have and are in the repository already.
656
:returns: The validator(which is a sha1 digest, though what is sha'd is
657
repository format specific) of the serialized inventory.
659
if not self.is_in_write_group():
660
raise AssertionError("%r not in write group" % (self,))
661
_mod_revision.check_not_reserved_id(revision_id)
662
if not (inv.revision_id is None or inv.revision_id == revision_id):
663
raise AssertionError(
664
"Mismatch between inventory revision"
665
" id and insertion revid (%r, %r)"
666
% (inv.revision_id, revision_id))
668
raise AssertionError()
669
inv_lines = self._serialise_inventory_to_lines(inv)
670
return self._inventory_add_lines(revision_id, parents,
671
inv_lines, check_content=False)
673
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
675
"""Add a new inventory expressed as a delta against another revision.
677
:param basis_revision_id: The inventory id the delta was created
678
against. (This does not have to be a direct parent.)
679
:param delta: The inventory delta (see Inventory.apply_delta for
681
:param new_revision_id: The revision id that the inventory is being
683
:param parents: The revision ids of the parents that revision_id is
684
known to have and are in the repository already. These are supplied
685
for repositories that depend on the inventory graph for revision
686
graph access, as well as for those that pun ancestry with delta
689
:returns: (validator, new_inv)
690
The validator(which is a sha1 digest, though what is sha'd is
691
repository format specific) of the serialized inventory, and the
694
if not self.is_in_write_group():
695
raise AssertionError("%r not in write group" % (self,))
696
_mod_revision.check_not_reserved_id(new_revision_id)
697
basis_tree = self.revision_tree(basis_revision_id)
698
basis_tree.lock_read()
700
# Note that this mutates the inventory of basis_tree, which not all
701
# inventory implementations may support: A better idiom would be to
702
# return a new inventory, but as there is no revision tree cache in
703
# repository this is safe for now - RBC 20081013
704
basis_inv = basis_tree.inventory
705
basis_inv.apply_delta(delta)
706
basis_inv.revision_id = new_revision_id
707
return (self.add_inventory(new_revision_id, basis_inv, parents),
712
def _inventory_add_lines(self, revision_id, parents, lines,
714
"""Store lines in inv_vf and return the sha1 of the inventory."""
715
parents = [(parent,) for parent in parents]
716
return self.inventories.add_lines((revision_id,), parents, lines,
717
check_content=check_content)[0]
719
def add_revision(self, revision_id, rev, inv=None, config=None):
720
"""Add rev to the revision store as revision_id.
722
:param revision_id: the revision id to use.
723
:param rev: The revision object.
724
:param inv: The inventory for the revision. if None, it will be looked
725
up in the inventory storer
726
:param config: If None no digital signature will be created.
727
If supplied its signature_needed method will be used
728
to determine if a signature should be made.
730
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
732
_mod_revision.check_not_reserved_id(revision_id)
733
if config is not None and config.signature_needed():
735
inv = self.get_inventory(revision_id)
736
plaintext = Testament(rev, inv).as_short_text()
737
self.store_revision_signature(
738
gpg.GPGStrategy(config), plaintext, revision_id)
739
# check inventory present
740
if not self.inventories.get_parent_map([(revision_id,)]):
742
raise errors.WeaveRevisionNotPresent(revision_id,
745
# yes, this is not suitable for adding with ghosts.
746
rev.inventory_sha1 = self.add_inventory(revision_id, inv,
750
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
751
self._add_revision(rev)
753
def _add_revision(self, revision):
754
text = self._serializer.write_revision_to_string(revision)
755
key = (revision.revision_id,)
756
parents = tuple((parent,) for parent in revision.parent_ids)
757
self.revisions.add_lines(key, parents, osutils.split_lines(text))
759
def all_revision_ids(self):
760
"""Returns a list of all the revision ids in the repository.
762
This is conceptually deprecated because code should generally work on
763
the graph reachable from a particular revision, and ignore any other
764
revisions that might be present. There is no direct replacement
767
if 'evil' in debug.debug_flags:
768
mutter_callsite(2, "all_revision_ids is linear with history.")
769
return self._all_revision_ids()
771
def _all_revision_ids(self):
772
"""Returns a list of all the revision ids in the repository.
774
These are in as much topological order as the underlying store can
777
raise NotImplementedError(self._all_revision_ids)
779
def break_lock(self):
780
"""Break a lock if one is present from another instance.
782
Uses the ui factory to ask for confirmation if the lock may be from
785
self.control_files.break_lock()
788
def _eliminate_revisions_not_present(self, revision_ids):
789
"""Check every revision id in revision_ids to see if we have it.
791
Returns a set of the present revisions.
794
graph = self.get_graph()
795
parent_map = graph.get_parent_map(revision_ids)
796
# The old API returned a list, should this actually be a set?
797
return parent_map.keys()
800
def create(a_bzrdir):
801
"""Construct the current default format repository in a_bzrdir."""
802
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
804
def __init__(self, _format, a_bzrdir, control_files):
805
"""instantiate a Repository.
807
:param _format: The format of the repository on disk.
808
:param a_bzrdir: The BzrDir of the repository.
810
In the future we will have a single api for all stores for
811
getting file texts, inventories and revisions, then
812
this construct will accept instances of those things.
814
super(Repository, self).__init__()
815
self._format = _format
816
# the following are part of the public API for Repository:
817
self.bzrdir = a_bzrdir
818
self.control_files = control_files
819
self._transport = control_files._transport
820
self.base = self._transport.base
822
self._reconcile_does_inventory_gc = True
823
self._reconcile_fixes_text_parents = False
824
self._reconcile_backsup_inventory = True
825
# not right yet - should be more semantically clear ?
827
# TODO: make sure to construct the right store classes, etc, depending
828
# on whether escaping is required.
829
self._warn_if_deprecated()
830
self._write_group = None
831
# Additional places to query for data.
832
self._fallback_repositories = []
833
# An InventoryEntry cache, used during deserialization
834
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
837
return '%s(%r)' % (self.__class__.__name__,
840
def has_same_location(self, other):
841
"""Returns a boolean indicating if this repository is at the same
842
location as another repository.
844
This might return False even when two repository objects are accessing
845
the same physical repository via different URLs.
847
if self.__class__ is not other.__class__:
849
return (self._transport.base == other._transport.base)
851
def is_in_write_group(self):
852
"""Return True if there is an open write group.
854
:seealso: start_write_group.
856
return self._write_group is not None
859
return self.control_files.is_locked()
861
def is_write_locked(self):
862
"""Return True if this object is write locked."""
863
return self.is_locked() and self.control_files._lock_mode == 'w'
865
def lock_write(self, token=None):
866
"""Lock this repository for writing.
868
This causes caching within the repository obejct to start accumlating
869
data during reads, and allows a 'write_group' to be obtained. Write
870
groups must be used for actual data insertion.
872
:param token: if this is already locked, then lock_write will fail
873
unless the token matches the existing lock.
874
:returns: a token if this instance supports tokens, otherwise None.
875
:raises TokenLockingNotSupported: when a token is given but this
876
instance doesn't support using token locks.
877
:raises MismatchedToken: if the specified token doesn't match the token
878
of the existing lock.
879
:seealso: start_write_group.
881
A token should be passed in if you know that you have locked the object
882
some other way, and need to synchronise this object's state with that
885
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
887
locked = self.is_locked()
888
result = self.control_files.lock_write(token=token)
889
for repo in self._fallback_repositories:
890
# Writes don't affect fallback repos
897
locked = self.is_locked()
898
self.control_files.lock_read()
899
for repo in self._fallback_repositories:
904
def get_physical_lock_status(self):
905
return self.control_files.get_physical_lock_status()
907
def leave_lock_in_place(self):
908
"""Tell this repository not to release the physical lock when this
911
If lock_write doesn't return a token, then this method is not supported.
913
self.control_files.leave_in_place()
915
def dont_leave_lock_in_place(self):
916
"""Tell this repository to release the physical lock when this
917
object is unlocked, even if it didn't originally acquire it.
919
If lock_write doesn't return a token, then this method is not supported.
921
self.control_files.dont_leave_in_place()
924
def gather_stats(self, revid=None, committers=None):
925
"""Gather statistics from a revision id.
927
:param revid: The revision id to gather statistics from, if None, then
928
no revision specific statistics are gathered.
929
:param committers: Optional parameter controlling whether to grab
930
a count of committers from the revision specific statistics.
931
:return: A dictionary of statistics. Currently this contains:
932
committers: The number of committers if requested.
933
firstrev: A tuple with timestamp, timezone for the penultimate left
934
most ancestor of revid, if revid is not the NULL_REVISION.
935
latestrev: A tuple with timestamp, timezone for revid, if revid is
936
not the NULL_REVISION.
937
revisions: The total revision count in the repository.
938
size: An estimate disk size of the repository in bytes.
941
if revid and committers:
942
result['committers'] = 0
943
if revid and revid != _mod_revision.NULL_REVISION:
945
all_committers = set()
946
revisions = self.get_ancestry(revid)
947
# pop the leading None
949
first_revision = None
951
# ignore the revisions in the middle - just grab first and last
952
revisions = revisions[0], revisions[-1]
953
for revision in self.get_revisions(revisions):
954
if not first_revision:
955
first_revision = revision
957
all_committers.add(revision.committer)
958
last_revision = revision
960
result['committers'] = len(all_committers)
961
result['firstrev'] = (first_revision.timestamp,
962
first_revision.timezone)
963
result['latestrev'] = (last_revision.timestamp,
964
last_revision.timezone)
966
# now gather global repository information
967
# XXX: This is available for many repos regardless of listability.
968
if self.bzrdir.root_transport.listable():
969
# XXX: do we want to __define len__() ?
970
# Maybe the versionedfiles object should provide a different
971
# method to get the number of keys.
972
result['revisions'] = len(self.revisions.keys())
976
def find_branches(self, using=False):
977
"""Find branches underneath this repository.
979
This will include branches inside other branches.
981
:param using: If True, list only branches using this repository.
983
if using and not self.is_shared():
985
return [self.bzrdir.open_branch()]
986
except errors.NotBranchError:
988
class Evaluator(object):
991
self.first_call = True
993
def __call__(self, bzrdir):
994
# On the first call, the parameter is always the bzrdir
995
# containing the current repo.
996
if not self.first_call:
998
repository = bzrdir.open_repository()
999
except errors.NoRepositoryPresent:
1002
return False, (None, repository)
1003
self.first_call = False
1005
value = (bzrdir.open_branch(), None)
1006
except errors.NotBranchError:
1007
value = (None, None)
1011
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1012
self.bzrdir.root_transport, evaluate=Evaluator()):
1013
if branch is not None:
1014
branches.append(branch)
1015
if not using and repository is not None:
1016
branches.extend(repository.find_branches())
1020
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1021
"""Return the revision ids that other has that this does not.
1023
These are returned in topological order.
1025
revision_id: only return revision ids included by revision_id.
1027
return InterRepository.get(other, self).search_missing_revision_ids(
1028
revision_id, find_ghosts)
1030
@deprecated_method(one_two)
1032
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1033
"""Return the revision ids that other has that this does not.
1035
These are returned in topological order.
1037
revision_id: only return revision ids included by revision_id.
1039
keys = self.search_missing_revision_ids(
1040
other, revision_id, find_ghosts).get_keys()
1043
parents = other.get_graph().get_parent_map(keys)
1046
return tsort.topo_sort(parents)
1050
"""Open the repository rooted at base.
1052
For instance, if the repository is at URL/.bzr/repository,
1053
Repository.open(URL) -> a Repository instance.
1055
control = bzrdir.BzrDir.open(base)
1056
return control.open_repository()
1058
def copy_content_into(self, destination, revision_id=None):
1059
"""Make a complete copy of the content in self into destination.
1061
This is a destructive operation! Do not use it on existing
1064
return InterRepository.get(self, destination).copy_content(revision_id)
1066
def commit_write_group(self):
1067
"""Commit the contents accrued within the current write group.
1069
:seealso: start_write_group.
1071
if self._write_group is not self.get_transaction():
1072
# has an unlock or relock occured ?
1073
raise errors.BzrError('mismatched lock context %r and '
1075
(self.get_transaction(), self._write_group))
1076
self._commit_write_group()
1077
self._write_group = None
1079
def _commit_write_group(self):
1080
"""Template method for per-repository write group cleanup.
1082
This is called before the write group is considered to be
1083
finished and should ensure that all data handed to the repository
1084
for writing during the write group is safely committed (to the
1085
extent possible considering file system caching etc).
1088
def suspend_write_group(self):
1089
raise errors.UnsuspendableWriteGroup(self)
1091
def refresh_data(self):
1092
"""Re-read any data needed to to synchronise with disk.
1094
This method is intended to be called after another repository instance
1095
(such as one used by a smart server) has inserted data into the
1096
repository. It may not be called during a write group, but may be
1097
called at any other time.
1099
if self.is_in_write_group():
1100
raise errors.InternalBzrError(
1101
"May not refresh_data while in a write group.")
1102
self._refresh_data()
1104
def resume_write_group(self, tokens):
1105
if not self.is_write_locked():
1106
raise errors.NotWriteLocked(self)
1107
if self._write_group:
1108
raise errors.BzrError('already in a write group')
1109
self._resume_write_group(tokens)
1110
# so we can detect unlock/relock - the write group is now entered.
1111
self._write_group = self.get_transaction()
1113
def _resume_write_group(self, tokens):
1114
raise errors.UnsuspendableWriteGroup(self)
1116
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,
1118
"""Fetch the content required to construct revision_id from source.
1120
If revision_id is None and fetch_spec is None, then all content is
1123
fetch() may not be used when the repository is in a write group -
1124
either finish the current write group before using fetch, or use
1125
fetch before starting the write group.
1127
:param find_ghosts: Find and copy revisions in the source that are
1128
ghosts in the target (and not reachable directly by walking out to
1129
the first-present revision in target from revision_id).
1130
:param revision_id: If specified, all the content needed for this
1131
revision ID will be copied to the target. Fetch will determine for
1132
itself which content needs to be copied.
1133
:param fetch_spec: If specified, a SearchResult or
1134
PendingAncestryResult that describes which revisions to copy. This
1135
allows copying multiple heads at once. Mutually exclusive with
1138
if fetch_spec is not None and revision_id is not None:
1139
raise AssertionError(
1140
"fetch_spec and revision_id are mutually exclusive.")
1141
if self.is_in_write_group():
1142
raise errors.InternalBzrError(
1143
"May not fetch while in a write group.")
1144
# fast path same-url fetch operations
1145
if self.has_same_location(source) and fetch_spec is None:
1146
# check that last_revision is in 'from' and then return a
1148
if (revision_id is not None and
1149
not _mod_revision.is_null(revision_id)):
1150
self.get_revision(revision_id)
1152
# if there is no specific appropriate InterRepository, this will get
1153
# the InterRepository base class, which raises an
1154
# IncompatibleRepositories when asked to fetch.
1155
inter = InterRepository.get(source, self)
1156
return inter.fetch(revision_id=revision_id, pb=pb,
1157
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1159
def create_bundle(self, target, base, fileobj, format=None):
1160
return serializer.write_bundle(self, target, base, fileobj, format)
1162
def get_commit_builder(self, branch, parents, config, timestamp=None,
1163
timezone=None, committer=None, revprops=None,
1165
"""Obtain a CommitBuilder for this repository.
1167
:param branch: Branch to commit to.
1168
:param parents: Revision ids of the parents of the new revision.
1169
:param config: Configuration to use.
1170
:param timestamp: Optional timestamp recorded for commit.
1171
:param timezone: Optional timezone for timestamp.
1172
:param committer: Optional committer to set for commit.
1173
:param revprops: Optional dictionary of revision properties.
1174
:param revision_id: Optional revision id.
1176
result = self._commit_builder_class(self, parents, config,
1177
timestamp, timezone, committer, revprops, revision_id)
1178
self.start_write_group()
1182
if (self.control_files._lock_count == 1 and
1183
self.control_files._lock_mode == 'w'):
1184
if self._write_group is not None:
1185
self.abort_write_group()
1186
self.control_files.unlock()
1187
raise errors.BzrError(
1188
'Must end write groups before releasing write locks.')
1189
self.control_files.unlock()
1190
if self.control_files._lock_count == 0:
1191
self._inventory_entry_cache.clear()
1192
for repo in self._fallback_repositories:
1196
def clone(self, a_bzrdir, revision_id=None):
1197
"""Clone this repository into a_bzrdir using the current format.
1199
Currently no check is made that the format of this repository and
1200
the bzrdir format are compatible. FIXME RBC 20060201.
1202
:return: The newly created destination repository.
1204
# TODO: deprecate after 0.16; cloning this with all its settings is
1205
# probably not very useful -- mbp 20070423
1206
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1207
self.copy_content_into(dest_repo, revision_id)
1210
def start_write_group(self):
1211
"""Start a write group in the repository.
1213
Write groups are used by repositories which do not have a 1:1 mapping
1214
between file ids and backend store to manage the insertion of data from
1215
both fetch and commit operations.
1217
A write lock is required around the start_write_group/commit_write_group
1218
for the support of lock-requiring repository formats.
1220
One can only insert data into a repository inside a write group.
1224
if not self.is_write_locked():
1225
raise errors.NotWriteLocked(self)
1226
if self._write_group:
1227
raise errors.BzrError('already in a write group')
1228
self._start_write_group()
1229
# so we can detect unlock/relock - the write group is now entered.
1230
self._write_group = self.get_transaction()
1232
def _start_write_group(self):
1233
"""Template method for per-repository write group startup.
1235
This is called before the write group is considered to be
1240
def sprout(self, to_bzrdir, revision_id=None):
1241
"""Create a descendent repository for new development.
1243
Unlike clone, this does not copy the settings of the repository.
1245
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1246
dest_repo.fetch(self, revision_id=revision_id)
1249
def _create_sprouting_repo(self, a_bzrdir, shared):
1250
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
1251
# use target default format.
1252
dest_repo = a_bzrdir.create_repository()
1254
# Most control formats need the repository to be specifically
1255
# created, but on some old all-in-one formats it's not needed
1257
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1258
except errors.UninitializableFormat:
1259
dest_repo = a_bzrdir.open_repository()
1262
def _get_sink(self):
1263
"""Return a sink for streaming into this repository."""
1264
return StreamSink(self)
1266
def _get_source(self, to_format):
1267
"""Return a source for streaming from this repository."""
1268
return StreamSource(self, to_format)
1271
def has_revision(self, revision_id):
1272
"""True if this repository has a copy of the revision."""
1273
return revision_id in self.has_revisions((revision_id,))
1276
def has_revisions(self, revision_ids):
1277
"""Probe to find out the presence of multiple revisions.
1279
:param revision_ids: An iterable of revision_ids.
1280
:return: A set of the revision_ids that were present.
1282
parent_map = self.revisions.get_parent_map(
1283
[(rev_id,) for rev_id in revision_ids])
1285
if _mod_revision.NULL_REVISION in revision_ids:
1286
result.add(_mod_revision.NULL_REVISION)
1287
result.update([key[0] for key in parent_map])
1291
def get_revision(self, revision_id):
1292
"""Return the Revision object for a named revision."""
1293
return self.get_revisions([revision_id])[0]
1296
def get_revision_reconcile(self, revision_id):
1297
"""'reconcile' helper routine that allows access to a revision always.
1299
This variant of get_revision does not cross check the weave graph
1300
against the revision one as get_revision does: but it should only
1301
be used by reconcile, or reconcile-alike commands that are correcting
1302
or testing the revision graph.
1304
return self._get_revisions([revision_id])[0]
1307
def get_revisions(self, revision_ids):
1308
"""Get many revisions at once."""
1309
return self._get_revisions(revision_ids)
1312
def _get_revisions(self, revision_ids):
1313
"""Core work logic to get many revisions without sanity checks."""
1314
for rev_id in revision_ids:
1315
if not rev_id or not isinstance(rev_id, basestring):
1316
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1317
keys = [(key,) for key in revision_ids]
1318
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1320
for record in stream:
1321
if record.storage_kind == 'absent':
1322
raise errors.NoSuchRevision(self, record.key[0])
1323
text = record.get_bytes_as('fulltext')
1324
rev = self._serializer.read_revision_from_string(text)
1325
revs[record.key[0]] = rev
1326
return [revs[revid] for revid in revision_ids]
1329
def get_revision_xml(self, revision_id):
1330
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1331
# would have already do it.
1332
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1333
rev = self.get_revision(revision_id)
1334
rev_tmp = cStringIO.StringIO()
1335
# the current serializer..
1336
self._serializer.write_revision(rev, rev_tmp)
1338
return rev_tmp.getvalue()
1340
def get_deltas_for_revisions(self, revisions):
1341
"""Produce a generator of revision deltas.
1343
Note that the input is a sequence of REVISIONS, not revision_ids.
1344
Trees will be held in memory until the generator exits.
1345
Each delta is relative to the revision's lefthand predecessor.
1347
required_trees = set()
1348
for revision in revisions:
1349
required_trees.add(revision.revision_id)
1350
required_trees.update(revision.parent_ids[:1])
1351
trees = dict((t.get_revision_id(), t) for
1352
t in self.revision_trees(required_trees))
1353
for revision in revisions:
1354
if not revision.parent_ids:
1355
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)
1357
old_tree = trees[revision.parent_ids[0]]
1358
yield trees[revision.revision_id].changes_from(old_tree)
1361
def get_revision_delta(self, revision_id):
1362
"""Return the delta for one revision.
1364
The delta is relative to the left-hand predecessor of the
1367
r = self.get_revision(revision_id)
1368
return list(self.get_deltas_for_revisions([r]))[0]
1371
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
1372
signature = gpg_strategy.sign(plaintext)
1373
self.add_signature_text(revision_id, signature)
1376
def add_signature_text(self, revision_id, signature):
1377
self.signatures.add_lines((revision_id,), (),
1378
osutils.split_lines(signature))
1380
def find_text_key_references(self):
1381
"""Find the text key references within the repository.
1383
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1384
to whether they were referred to by the inventory of the
1385
revision_id that they contain. The inventory texts from all present
1386
revision ids are assessed to generate this report.
1388
revision_keys = self.revisions.keys()
1389
w = self.inventories
1390
pb = ui.ui_factory.nested_progress_bar()
1392
return self._find_text_key_references_from_xml_inventory_lines(
1393
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1397
def _find_text_key_references_from_xml_inventory_lines(self,
1399
"""Core routine for extracting references to texts from inventories.
1401
This performs the translation of xml lines to revision ids.
1403
:param line_iterator: An iterator of lines, origin_version_id
1404
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1405
to whether they were referred to by the inventory of the
1406
revision_id that they contain. Note that if that revision_id was
1407
not part of the line_iterator's output then False will be given -
1408
even though it may actually refer to that key.
1410
if not self._serializer.support_altered_by_hack:
1411
raise AssertionError(
1412
"_find_text_key_references_from_xml_inventory_lines only "
1413
"supported for branches which store inventory as unnested xml"
1414
", not on %r" % self)
1417
# this code needs to read every new line in every inventory for the
1418
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
1419
# not present in one of those inventories is unnecessary but not
1420
# harmful because we are filtering by the revision id marker in the
1421
# inventory lines : we only select file ids altered in one of those
1422
# revisions. We don't need to see all lines in the inventory because
1423
# only those added in an inventory in rev X can contain a revision=X
1425
unescape_revid_cache = {}
1426
unescape_fileid_cache = {}
1428
# jam 20061218 In a big fetch, this handles hundreds of thousands
1429
# of lines, so it has had a lot of inlining and optimizing done.
1430
# Sorry that it is a little bit messy.
1431
# Move several functions to be local variables, since this is a long
1433
search = self._file_ids_altered_regex.search
1434
unescape = _unescape_xml
1435
setdefault = result.setdefault
1436
for line, line_key in line_iterator:
1437
match = search(line)
1440
# One call to match.group() returning multiple items is quite a
1441
# bit faster than 2 calls to match.group() each returning 1
1442
file_id, revision_id = match.group('file_id', 'revision_id')
1444
# Inlining the cache lookups helps a lot when you make 170,000
1445
# lines and 350k ids, versus 8.4 unique ids.
1446
# Using a cache helps in 2 ways:
1447
# 1) Avoids unnecessary decoding calls
1448
# 2) Re-uses cached strings, which helps in future set and
1450
# (2) is enough that removing encoding entirely along with
1451
# the cache (so we are using plain strings) results in no
1452
# performance improvement.
1454
revision_id = unescape_revid_cache[revision_id]
1456
unescaped = unescape(revision_id)
1457
unescape_revid_cache[revision_id] = unescaped
1458
revision_id = unescaped
1460
# Note that unconditionally unescaping means that we deserialise
1461
# every fileid, which for general 'pull' is not great, but we don't
1462
# really want to have some many fulltexts that this matters anyway.
1465
file_id = unescape_fileid_cache[file_id]
1467
unescaped = unescape(file_id)
1468
unescape_fileid_cache[file_id] = unescaped
1471
key = (file_id, revision_id)
1472
setdefault(key, False)
1473
if revision_id == line_key[-1]:
1477
def _inventory_xml_lines_for_keys(self, keys):
1478
"""Get a line iterator of the sort needed for findind references.
1480
Not relevant for non-xml inventory repositories.
1482
Ghosts in revision_keys are ignored.
1484
:param revision_keys: The revision keys for the inventories to inspect.
1485
:return: An iterator over (inventory line, revid) for the fulltexts of
1486
all of the xml inventories specified by revision_keys.
1488
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1489
for record in stream:
1490
if record.storage_kind != 'absent':
1491
chunks = record.get_bytes_as('chunked')
1492
revid = record.key[-1]
1493
lines = osutils.chunks_to_lines(chunks)
1497
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1499
"""Helper routine for fileids_altered_by_revision_ids.
1501
This performs the translation of xml lines to revision ids.
1503
:param line_iterator: An iterator of lines, origin_version_id
1504
:param revision_ids: The revision ids to filter for. This should be a
1505
set or other type which supports efficient __contains__ lookups, as
1506
the revision id from each parsed line will be looked up in the
1507
revision_ids filter.
1508
:return: a dictionary mapping altered file-ids to an iterable of
1509
revision_ids. Each altered file-ids has the exact revision_ids that
1510
altered it listed explicitly.
1512
seen = set(self._find_text_key_references_from_xml_inventory_lines(
1513
line_iterator).iterkeys())
1514
# Note that revision_ids are revision keys.
1515
parent_maps = self.revisions.get_parent_map(revision_ids)
1517
map(parents.update, parent_maps.itervalues())
1518
parents.difference_update(revision_ids)
1519
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(
1520
self._inventory_xml_lines_for_keys(parents)))
1521
new_keys = seen - parent_seen
1523
setdefault = result.setdefault
1524
for key in new_keys:
1525
setdefault(key[0], set()).add(key[-1])
1528
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1529
"""Find the file ids and versions affected by revisions.
1531
:param revisions: an iterable containing revision ids.
1532
:param _inv_weave: The inventory weave from this repository or None.
1533
If None, the inventory weave will be opened automatically.
1534
:return: a dictionary mapping altered file-ids to an iterable of
1535
revision_ids. Each altered file-ids has the exact revision_ids that
1536
altered it listed explicitly.
1538
selected_keys = set((revid,) for revid in revision_ids)
1539
w = _inv_weave or self.inventories
1540
pb = ui.ui_factory.nested_progress_bar()
1542
return self._find_file_ids_from_xml_inventory_lines(
1543
w.iter_lines_added_or_present_in_keys(
1544
selected_keys, pb=pb),
1549
def iter_files_bytes(self, desired_files):
1550
"""Iterate through file versions.
1552
Files will not necessarily be returned in the order they occur in
1553
desired_files. No specific order is guaranteed.
1555
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1556
value supplied by the caller as part of desired_files. It should
1557
uniquely identify the file version in the caller's context. (Examples:
1558
an index number or a TreeTransform trans_id.)
1560
bytes_iterator is an iterable of bytestrings for the file. The
1561
kind of iterable and length of the bytestrings are unspecified, but for
1562
this implementation, it is a list of bytes produced by
1563
VersionedFile.get_record_stream().
1565
:param desired_files: a list of (file_id, revision_id, identifier)
1569
for file_id, revision_id, callable_data in desired_files:
1570
text_keys[(file_id, revision_id)] = callable_data
1571
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1572
if record.storage_kind == 'absent':
1573
raise errors.RevisionNotPresent(record.key, self)
1574
yield text_keys[record.key], record.get_bytes_as('fulltext')
1576
def _generate_text_key_index(self, text_key_references=None,
1578
"""Generate a new text key index for the repository.
1580
This is an expensive function that will take considerable time to run.
1582
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1583
list of parents, also text keys. When a given key has no parents,
1584
the parents list will be [NULL_REVISION].
1586
# All revisions, to find inventory parents.
1587
if ancestors is None:
1588
graph = self.get_graph()
1589
ancestors = graph.get_parent_map(self.all_revision_ids())
1590
if text_key_references is None:
1591
text_key_references = self.find_text_key_references()
1592
pb = ui.ui_factory.nested_progress_bar()
1594
return self._do_generate_text_key_index(ancestors,
1595
text_key_references, pb)
1599
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1600
"""Helper for _generate_text_key_index to avoid deep nesting."""
1601
revision_order = tsort.topo_sort(ancestors)
1602
invalid_keys = set()
1604
for revision_id in revision_order:
1605
revision_keys[revision_id] = set()
1606
text_count = len(text_key_references)
1607
# a cache of the text keys to allow reuse; costs a dict of all the
1608
# keys, but saves a 2-tuple for every child of a given key.
1610
for text_key, valid in text_key_references.iteritems():
1612
invalid_keys.add(text_key)
1614
revision_keys[text_key[1]].add(text_key)
1615
text_key_cache[text_key] = text_key
1616
del text_key_references
1618
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1619
NULL_REVISION = _mod_revision.NULL_REVISION
1620
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1621
# too small for large or very branchy trees. However, for 55K path
1622
# trees, it would be easy to use too much memory trivially. Ideally we
1623
# could gauge this by looking at available real memory etc, but this is
1624
# always a tricky proposition.
1625
inventory_cache = lru_cache.LRUCache(10)
1626
batch_size = 10 # should be ~150MB on a 55K path tree
1627
batch_count = len(revision_order) / batch_size + 1
1629
pb.update("Calculating text parents", processed_texts, text_count)
1630
for offset in xrange(batch_count):
1631
to_query = revision_order[offset * batch_size:(offset + 1) *
1635
for rev_tree in self.revision_trees(to_query):
1636
revision_id = rev_tree.get_revision_id()
1637
parent_ids = ancestors[revision_id]
1638
for text_key in revision_keys[revision_id]:
1639
pb.update("Calculating text parents", processed_texts)
1640
processed_texts += 1
1641
candidate_parents = []
1642
for parent_id in parent_ids:
1643
parent_text_key = (text_key[0], parent_id)
1645
check_parent = parent_text_key not in \
1646
revision_keys[parent_id]
1648
# the parent parent_id is a ghost:
1649
check_parent = False
1650
# truncate the derived graph against this ghost.
1651
parent_text_key = None
1653
# look at the parent commit details inventories to
1654
# determine possible candidates in the per file graph.
1657
inv = inventory_cache[parent_id]
1659
inv = self.revision_tree(parent_id).inventory
1660
inventory_cache[parent_id] = inv
1661
parent_entry = inv._byid.get(text_key[0], None)
1662
if parent_entry is not None:
1664
text_key[0], parent_entry.revision)
1666
parent_text_key = None
1667
if parent_text_key is not None:
1668
candidate_parents.append(
1669
text_key_cache[parent_text_key])
1670
parent_heads = text_graph.heads(candidate_parents)
1671
new_parents = list(parent_heads)
1672
new_parents.sort(key=lambda x:candidate_parents.index(x))
1673
if new_parents == []:
1674
new_parents = [NULL_REVISION]
1675
text_index[text_key] = new_parents
1677
for text_key in invalid_keys:
1678
text_index[text_key] = [NULL_REVISION]
1681
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1682
"""Get an iterable listing the keys of all the data introduced by a set
1685
The keys will be ordered so that the corresponding items can be safely
1686
fetched and inserted in that order.
1688
:returns: An iterable producing tuples of (knit-kind, file-id,
1689
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1690
'revisions'. file-id is None unless knit-kind is 'file'.
1692
# XXX: it's a bit weird to control the inventory weave caching in this
1693
# generator. Ideally the caching would be done in fetch.py I think. Or
1694
# maybe this generator should explicitly have the contract that it
1695
# should not be iterated until the previously yielded item has been
1697
inv_w = self.inventories
1699
# file ids that changed
1700
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
1702
num_file_ids = len(file_ids)
1703
for file_id, altered_versions in file_ids.iteritems():
1704
if _files_pb is not None:
1705
_files_pb.update("fetch texts", count, num_file_ids)
1707
yield ("file", file_id, altered_versions)
1708
# We're done with the files_pb. Note that it finished by the caller,
1709
# just as it was created by the caller.
1713
yield ("inventory", None, revision_ids)
1716
# XXX: Note ATM no callers actually pay attention to this return
1717
# instead they just use the list of revision ids and ignore
1718
# missing sigs. Consider removing this work entirely
1719
revisions_with_signatures = set(self.signatures.get_parent_map(
1720
[(r,) for r in revision_ids]))
1721
revisions_with_signatures = set(
1722
[r for (r,) in revisions_with_signatures])
1723
revisions_with_signatures.intersection_update(revision_ids)
1724
yield ("signatures", None, revisions_with_signatures)
1727
yield ("revisions", None, revision_ids)
1730
def get_inventory(self, revision_id):
1731
"""Get Inventory object by revision id."""
1732
return self.iter_inventories([revision_id]).next()
1734
def iter_inventories(self, revision_ids):
1735
"""Get many inventories by revision_ids.
1737
This will buffer some or all of the texts used in constructing the
1738
inventories in memory, but will only parse a single inventory at a
1741
:return: An iterator of inventories.
1743
if ((None in revision_ids)
1744
or (_mod_revision.NULL_REVISION in revision_ids)):
1745
raise ValueError('cannot get null revision inventory')
1746
return self._iter_inventories(revision_ids)
1748
def _iter_inventories(self, revision_ids):
1749
"""single-document based inventory iteration."""
1750
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1751
yield self.deserialise_inventory(revision_id, text)
1753
def _iter_inventory_xmls(self, revision_ids):
1754
keys = [(revision_id,) for revision_id in revision_ids]
1755
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1757
for record in stream:
1758
if record.storage_kind != 'absent':
1759
text_chunks[record.key] = record.get_bytes_as('chunked')
1761
raise errors.NoSuchRevision(self, record.key)
1763
chunks = text_chunks.pop(key)
1764
yield ''.join(chunks), key[-1]
1766
def deserialise_inventory(self, revision_id, xml):
1767
"""Transform the xml into an inventory object.
1769
:param revision_id: The expected revision id of the inventory.
1770
:param xml: A serialised inventory.
1772
result = self._serializer.read_inventory_from_string(xml, revision_id,
1773
entry_cache=self._inventory_entry_cache)
1774
if result.revision_id != revision_id:
1775
raise AssertionError('revision id mismatch %s != %s' % (
1776
result.revision_id, revision_id))
1779
def serialise_inventory(self, inv):
1780
return self._serializer.write_inventory_to_string(inv)
1782
def _serialise_inventory_to_lines(self, inv):
1783
return self._serializer.write_inventory_to_lines(inv)
1785
def get_serializer_format(self):
1786
return self._serializer.format_num
1789
def get_inventory_xml(self, revision_id):
1790
"""Get inventory XML as a file object."""
1791
texts = self._iter_inventory_xmls([revision_id])
1793
text, revision_id = texts.next()
1794
except StopIteration:
1795
raise errors.HistoryMissing(self, 'inventory', revision_id)
1799
def get_inventory_sha1(self, revision_id):
1800
"""Return the sha1 hash of the inventory entry
1802
return self.get_revision(revision_id).inventory_sha1
1804
def iter_reverse_revision_history(self, revision_id):
1805
"""Iterate backwards through revision ids in the lefthand history
1807
:param revision_id: The revision id to start with. All its lefthand
1808
ancestors will be traversed.
1810
graph = self.get_graph()
1811
next_id = revision_id
1813
if next_id in (None, _mod_revision.NULL_REVISION):
1816
# Note: The following line may raise KeyError in the event of
1817
# truncated history. We decided not to have a try:except:raise
1818
# RevisionNotPresent here until we see a use for it, because of the
1819
# cost in an inner loop that is by its very nature O(history).
1820
# Robert Collins 20080326
1821
parents = graph.get_parent_map([next_id])[next_id]
1822
if len(parents) == 0:
1825
next_id = parents[0]
1828
def get_revision_inventory(self, revision_id):
1829
"""Return inventory of a past revision."""
1830
# TODO: Unify this with get_inventory()
1831
# bzr 0.0.6 and later imposes the constraint that the inventory_id
1832
# must be the same as its revision, so this is trivial.
1833
if revision_id is None:
1834
# This does not make sense: if there is no revision,
1835
# then it is the current tree inventory surely ?!
1836
# and thus get_root_id() is something that looks at the last
1837
# commit on the branch, and the get_root_id is an inventory check.
1838
raise NotImplementedError
1839
# return Inventory(self.get_root_id())
1841
return self.get_inventory(revision_id)
1843
def is_shared(self):
1844
"""Return True if this repository is flagged as a shared repository."""
1845
raise NotImplementedError(self.is_shared)
1848
def reconcile(self, other=None, thorough=False):
1849
"""Reconcile this repository."""
1850
from bzrlib.reconcile import RepoReconciler
1851
reconciler = RepoReconciler(self, thorough=thorough)
1852
reconciler.reconcile()
1855
def _refresh_data(self):
1856
"""Helper called from lock_* to ensure coherency with disk.
1858
The default implementation does nothing; it is however possible
1859
for repositories to maintain loaded indices across multiple locks
1860
by checking inside their implementation of this method to see
1861
whether their indices are still valid. This depends of course on
1862
the disk format being validatable in this manner. This method is
1863
also called by the refresh_data() public interface to cause a refresh
1864
to occur while in a write lock so that data inserted by a smart server
1865
push operation is visible on the client's instance of the physical
1870
def revision_tree(self, revision_id):
1871
"""Return Tree for a revision on this branch.
1873
`revision_id` may be NULL_REVISION for the empty tree revision.
1875
revision_id = _mod_revision.ensure_null(revision_id)
1876
# TODO: refactor this to use an existing revision object
1877
# so we don't need to read it in twice.
1878
if revision_id == _mod_revision.NULL_REVISION:
1879
return RevisionTree(self, Inventory(root_id=None),
1880
_mod_revision.NULL_REVISION)
1882
inv = self.get_revision_inventory(revision_id)
1883
return RevisionTree(self, inv, revision_id)
1885
def revision_trees(self, revision_ids):
1886
"""Return Tree for a revision on this branch.
1888
`revision_id` may not be None or 'null:'"""
1889
inventories = self.iter_inventories(revision_ids)
1890
for inv in inventories:
1891
yield RevisionTree(self, inv, inv.revision_id)
1894
def get_ancestry(self, revision_id, topo_sorted=True):
1895
"""Return a list of revision-ids integrated by a revision.
1897
The first element of the list is always None, indicating the origin
1898
revision. This might change when we have history horizons, or
1899
perhaps we should have a new API.
1901
This is topologically sorted.
1903
if _mod_revision.is_null(revision_id):
1905
if not self.has_revision(revision_id):
1906
raise errors.NoSuchRevision(self, revision_id)
1907
graph = self.get_graph()
1909
search = graph._make_breadth_first_searcher([revision_id])
1912
found, ghosts = search.next_with_ghosts()
1913
except StopIteration:
1916
if _mod_revision.NULL_REVISION in keys:
1917
keys.remove(_mod_revision.NULL_REVISION)
1919
parent_map = graph.get_parent_map(keys)
1920
keys = tsort.topo_sort(parent_map)
1921
return [None] + list(keys)
1924
"""Compress the data within the repository.
1926
This operation only makes sense for some repository types. For other
1927
types it should be a no-op that just returns.
1929
This stub method does not require a lock, but subclasses should use
1930
@needs_write_lock as this is a long running call its reasonable to
1931
implicitly lock for the user.
1935
@deprecated_method(one_six)
1936
def print_file(self, file, revision_id):
1937
"""Print `file` to stdout.
1939
FIXME RBC 20060125 as John Meinel points out this is a bad api
1940
- it writes to stdout, it assumes that that is valid etc. Fix
1941
by creating a new more flexible convenience function.
1943
tree = self.revision_tree(revision_id)
1944
# use inventory as it was in that revision
1945
file_id = tree.inventory.path2id(file)
1947
# TODO: jam 20060427 Write a test for this code path
1948
# it had a bug in it, and was raising the wrong
1950
raise errors.BzrError("%r is not present in revision %s" % (file, revision_id))
1951
tree.print_file(file_id)
1953
def get_transaction(self):
1954
return self.control_files.get_transaction()
1956
@deprecated_method(one_one)
1957
def get_parents(self, revision_ids):
1958
"""See StackedParentsProvider.get_parents"""
1959
parent_map = self.get_parent_map(revision_ids)
1960
return [parent_map.get(r, None) for r in revision_ids]
1962
def get_parent_map(self, revision_ids):
1963
"""See graph._StackedParentsProvider.get_parent_map"""
1964
# revisions index works in keys; this just works in revisions
1965
# therefore wrap and unwrap
1968
for revision_id in revision_ids:
1969
if revision_id == _mod_revision.NULL_REVISION:
1970
result[revision_id] = ()
1971
elif revision_id is None:
1972
raise ValueError('get_parent_map(None) is not valid')
1974
query_keys.append((revision_id ,))
1975
for ((revision_id,), parent_keys) in \
1976
self.revisions.get_parent_map(query_keys).iteritems():
1978
result[revision_id] = tuple(parent_revid
1979
for (parent_revid,) in parent_keys)
1981
result[revision_id] = (_mod_revision.NULL_REVISION,)
1984
def _make_parents_provider(self):
1987
def get_graph(self, other_repository=None):
1988
"""Return the graph walker for this repository format"""
1989
parents_provider = self._make_parents_provider()
1990
if (other_repository is not None and
1991
not self.has_same_location(other_repository)):
1992
parents_provider = graph._StackedParentsProvider(
1993
[parents_provider, other_repository._make_parents_provider()])
1994
return graph.Graph(parents_provider)
1996
def _get_versioned_file_checker(self, text_key_references=None):
1997
"""Return an object suitable for checking versioned files.
1999
:param text_key_references: if non-None, an already built
2000
dictionary mapping text keys ((fileid, revision_id) tuples)
2001
to whether they were referred to by the inventory of the
2002
revision_id that they contain. If None, this will be
2005
return _VersionedFileChecker(self,
2006
text_key_references=text_key_references)
2008
def revision_ids_to_search_result(self, result_set):
2009
"""Convert a set of revision ids to a graph SearchResult."""
2010
result_parents = set()
2011
for parents in self.get_graph().get_parent_map(
2012
result_set).itervalues():
2013
result_parents.update(parents)
2014
included_keys = result_set.intersection(result_parents)
2015
start_keys = result_set.difference(included_keys)
2016
exclude_keys = result_parents.difference(result_set)
2017
result = graph.SearchResult(start_keys, exclude_keys,
2018
len(result_set), result_set)
2022
def set_make_working_trees(self, new_value):
2023
"""Set the policy flag for making working trees when creating branches.
2025
This only applies to branches that use this repository.
2027
The default is 'True'.
2028
:param new_value: True to restore the default, False to disable making
2031
raise NotImplementedError(self.set_make_working_trees)
2033
def make_working_trees(self):
2034
"""Returns the policy for making working trees on new branches."""
2035
raise NotImplementedError(self.make_working_trees)
2038
def sign_revision(self, revision_id, gpg_strategy):
2039
plaintext = Testament.from_revision(self, revision_id).as_short_text()
2040
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
2043
def has_signature_for_revision_id(self, revision_id):
2044
"""Query for a revision signature for revision_id in the repository."""
2045
if not self.has_revision(revision_id):
2046
raise errors.NoSuchRevision(self, revision_id)
2047
sig_present = (1 == len(
2048
self.signatures.get_parent_map([(revision_id,)])))
2052
def get_signature_text(self, revision_id):
2053
"""Return the text for a signature."""
2054
stream = self.signatures.get_record_stream([(revision_id,)],
2056
record = stream.next()
2057
if record.storage_kind == 'absent':
2058
raise errors.NoSuchRevision(self, revision_id)
2059
return record.get_bytes_as('fulltext')
2062
def check(self, revision_ids=None):
2063
"""Check consistency of all history of given revision_ids.
2065
Different repository implementations should override _check().
2067
:param revision_ids: A non-empty list of revision_ids whose ancestry
2068
will be checked. Typically the last revision_id of a branch.
2070
return self._check(revision_ids)
2072
def _check(self, revision_ids):
2073
result = check.Check(self)
2077
def _warn_if_deprecated(self):
2078
global _deprecation_warning_done
2079
if _deprecation_warning_done:
2081
_deprecation_warning_done = True
2082
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2083
% (self._format, self.bzrdir.transport.base))
2085
def supports_rich_root(self):
2086
return self._format.rich_root_data
2088
def _check_ascii_revisionid(self, revision_id, method):
2089
"""Private helper for ascii-only repositories."""
2090
# weave repositories refuse to store revisionids that are non-ascii.
2091
if revision_id is not None:
2092
# weaves require ascii revision ids.
2093
if isinstance(revision_id, unicode):
2095
revision_id.encode('ascii')
2096
except UnicodeEncodeError:
2097
raise errors.NonAsciiRevisionId(method, self)
2100
revision_id.decode('ascii')
2101
except UnicodeDecodeError:
2102
raise errors.NonAsciiRevisionId(method, self)
2104
def revision_graph_can_have_wrong_parents(self):
2105
"""Is it possible for this repository to have a revision graph with
2108
If True, then this repository must also implement
2109
_find_inconsistent_revision_parents so that check and reconcile can
2110
check for inconsistencies before proceeding with other checks that may
2111
depend on the revision index being consistent.
2113
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
2116
# remove these delegates a while after bzr 0.15
2117
def __make_delegated(name, from_module):
2118
def _deprecated_repository_forwarder():
2119
symbol_versioning.warn('%s moved to %s in bzr 0.15'
2120
% (name, from_module),
2123
m = __import__(from_module, globals(), locals(), [name])
2125
return getattr(m, name)
2126
except AttributeError:
2127
raise AttributeError('module %s has no name %s'
2129
globals()[name] = _deprecated_repository_forwarder
2132
'AllInOneRepository',
2133
'WeaveMetaDirRepository',
2134
'PreSplitOutRepositoryFormat',
2135
'RepositoryFormat4',
2136
'RepositoryFormat5',
2137
'RepositoryFormat6',
2138
'RepositoryFormat7',
2140
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
2144
'RepositoryFormatKnit',
2145
'RepositoryFormatKnit1',
2147
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
2150
def install_revision(repository, rev, revision_tree):
2151
"""Install all revision data into a repository."""
2152
install_revisions(repository, [(rev, revision_tree, None)])
2155
def install_revisions(repository, iterable, num_revisions=None, pb=None):
2156
"""Install all revision data into a repository.
2158
Accepts an iterable of revision, tree, signature tuples. The signature
2161
repository.start_write_group()
2163
for n, (revision, revision_tree, signature) in enumerate(iterable):
2164
_install_revision(repository, revision, revision_tree, signature)
2166
pb.update('Transferring revisions', n + 1, num_revisions)
2168
repository.abort_write_group()
2171
repository.commit_write_group()
2174
def _install_revision(repository, rev, revision_tree, signature):
2175
"""Install all revision data into a repository."""
2176
present_parents = []
2178
for p_id in rev.parent_ids:
2179
if repository.has_revision(p_id):
2180
present_parents.append(p_id)
2181
parent_trees[p_id] = repository.revision_tree(p_id)
2183
parent_trees[p_id] = repository.revision_tree(
2184
_mod_revision.NULL_REVISION)
2186
inv = revision_tree.inventory
2187
entries = inv.iter_entries()
2188
# backwards compatibility hack: skip the root id.
2189
if not repository.supports_rich_root():
2190
path, root = entries.next()
2191
if root.revision != rev.revision_id:
2192
raise errors.IncompatibleRevision(repr(repository))
2194
for path, ie in entries:
2195
text_keys[(ie.file_id, ie.revision)] = ie
2196
text_parent_map = repository.texts.get_parent_map(text_keys)
2197
missing_texts = set(text_keys) - set(text_parent_map)
2198
# Add the texts that are not already present
2199
for text_key in missing_texts:
2200
ie = text_keys[text_key]
2202
# FIXME: TODO: The following loop overlaps/duplicates that done by
2203
# commit to determine parents. There is a latent/real bug here where
2204
# the parents inserted are not those commit would do - in particular
2205
# they are not filtered by heads(). RBC, AB
2206
for revision, tree in parent_trees.iteritems():
2207
if ie.file_id not in tree:
2209
parent_id = tree.inventory[ie.file_id].revision
2210
if parent_id in text_parents:
2212
text_parents.append((ie.file_id, parent_id))
2213
lines = revision_tree.get_file(ie.file_id).readlines()
2214
repository.texts.add_lines(text_key, text_parents, lines)
2216
# install the inventory
2217
repository.add_inventory(rev.revision_id, inv, present_parents)
2218
except errors.RevisionAlreadyPresent:
2220
if signature is not None:
2221
repository.add_signature_text(rev.revision_id, signature)
2222
repository.add_revision(rev.revision_id, rev, inv)
2225
class MetaDirRepository(Repository):
2226
"""Repositories in the new meta-dir layout.
2228
:ivar _transport: Transport for access to repository control files,
2229
typically pointing to .bzr/repository.
2232
def __init__(self, _format, a_bzrdir, control_files):
2233
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)
2234
self._transport = control_files._transport
2236
def is_shared(self):
2237
"""Return True if this repository is flagged as a shared repository."""
2238
return self._transport.has('shared-storage')
2241
def set_make_working_trees(self, new_value):
2242
"""Set the policy flag for making working trees when creating branches.
2244
This only applies to branches that use this repository.
2246
The default is 'True'.
2247
:param new_value: True to restore the default, False to disable making
2252
self._transport.delete('no-working-trees')
2253
except errors.NoSuchFile:
2256
self._transport.put_bytes('no-working-trees', '',
2257
mode=self.bzrdir._get_file_mode())
2259
def make_working_trees(self):
2260
"""Returns the policy for making working trees on new branches."""
2261
return not self._transport.has('no-working-trees')
2264
class MetaDirVersionedFileRepository(MetaDirRepository):
2265
"""Repositories in a meta-dir, that work via versioned file objects."""
2267
def __init__(self, _format, a_bzrdir, control_files):
2268
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2272
network_format_registry = registry.FormatRegistry()
2273
"""Registry of formats indexed by their network name.
2275
The network name for a repository format is an identifier that can be used when
2276
referring to formats with smart server operations. See
2277
RepositoryFormat.network_name() for more detail.
2281
format_registry = registry.FormatRegistry(network_format_registry)
2282
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
2284
This can contain either format instances themselves, or classes/factories that
2285
can be called to obtain one.
2289
#####################################################################
2290
# Repository Formats
2292
class RepositoryFormat(object):
2293
"""A repository format.
2295
Formats provide four things:
2296
* An initialization routine to construct repository data on disk.
2297
* a optional format string which is used when the BzrDir supports
2299
* an open routine which returns a Repository instance.
2300
* A network name for referring to the format in smart server RPC
2303
There is one and only one Format subclass for each on-disk format. But
2304
there can be one Repository subclass that is used for several different
2305
formats. The _format attribute on a Repository instance can be used to
2306
determine the disk format.
2308
Formats are placed in a registry by their format string for reference
2309
during opening. These should be subclasses of RepositoryFormat for
2312
Once a format is deprecated, just deprecate the initialize and open
2313
methods on the format class. Do not deprecate the object, as the
2314
object may be created even when a repository instnace hasn't been
2317
Common instance attributes:
2318
_matchingbzrdir - the bzrdir format that the repository format was
2319
originally written to work with. This can be used if manually
2320
constructing a bzrdir and repository, or more commonly for test suite
2324
# Set to True or False in derived classes. True indicates that the format
2325
# supports ghosts gracefully.
2326
supports_ghosts = None
2327
# Can this repository be given external locations to lookup additional
2328
# data. Set to True or False in derived classes.
2329
supports_external_lookups = None
2330
# What order should fetch operations request streams in?
2331
# The default is unordered as that is the cheapest for an origin to
2333
_fetch_order = 'unordered'
2334
# Does this repository format use deltas that can be fetched as-deltas ?
2335
# (E.g. knits, where the knit deltas can be transplanted intact.
2336
# We default to False, which will ensure that enough data to get
2337
# a full text out of any fetch stream will be grabbed.
2338
_fetch_uses_deltas = False
2339
# Should fetch trigger a reconcile after the fetch? Only needed for
2340
# some repository formats that can suffer internal inconsistencies.
2341
_fetch_reconcile = False
2344
return "<%s>" % self.__class__.__name__
2346
def __eq__(self, other):
2347
# format objects are generally stateless
2348
return isinstance(other, self.__class__)
2350
def __ne__(self, other):
2351
return not self == other
2354
def find_format(klass, a_bzrdir):
2355
"""Return the format for the repository object in a_bzrdir.
2357
This is used by bzr native formats that have a "format" file in
2358
the repository. Other methods may be used by different types of
2362
transport = a_bzrdir.get_repository_transport(None)
2363
format_string = transport.get("format").read()
2364
return format_registry.get(format_string)
2365
except errors.NoSuchFile:
2366
raise errors.NoRepositoryPresent(a_bzrdir)
2368
raise errors.UnknownFormatError(format=format_string,
2372
def register_format(klass, format):
2373
format_registry.register(format.get_format_string(), format)
2376
def unregister_format(klass, format):
2377
format_registry.remove(format.get_format_string())
2380
def get_default_format(klass):
2381
"""Return the current default format."""
2382
from bzrlib import bzrdir
2383
return bzrdir.format_registry.make_bzrdir('default').repository_format
2385
def get_format_string(self):
2386
"""Return the ASCII format string that identifies this format.
2388
Note that in pre format ?? repositories the format string is
2389
not permitted nor written to disk.
2391
raise NotImplementedError(self.get_format_string)
2393
def get_format_description(self):
2394
"""Return the short description for this format."""
2395
raise NotImplementedError(self.get_format_description)
2397
# TODO: this shouldn't be in the base class, it's specific to things that
2398
# use weaves or knits -- mbp 20070207
2399
def _get_versioned_file_store(self,
2404
versionedfile_class=None,
2405
versionedfile_kwargs={},
2407
if versionedfile_class is None:
2408
versionedfile_class = self._versionedfile_class
2409
weave_transport = control_files._transport.clone(name)
2410
dir_mode = control_files._dir_mode
2411
file_mode = control_files._file_mode
2412
return VersionedFileStore(weave_transport, prefixed=prefixed,
2414
file_mode=file_mode,
2415
versionedfile_class=versionedfile_class,
2416
versionedfile_kwargs=versionedfile_kwargs,
2419
def initialize(self, a_bzrdir, shared=False):
2420
"""Initialize a repository of this format in a_bzrdir.
2422
:param a_bzrdir: The bzrdir to put the new repository in it.
2423
:param shared: The repository should be initialized as a sharable one.
2424
:returns: The new repository object.
2426
This may raise UninitializableFormat if shared repository are not
2427
compatible the a_bzrdir.
2429
raise NotImplementedError(self.initialize)
2431
def is_supported(self):
2432
"""Is this format supported?
2434
Supported formats must be initializable and openable.
2435
Unsupported formats may not support initialization or committing or
2436
some other features depending on the reason for not being supported.
2440
def network_name(self):
2441
"""A simple byte string uniquely identifying this format for RPC calls.
2443
MetaDir repository formats use their disk format string to identify the
2444
repository over the wire. All in one formats such as bzr < 0.8, and
2445
foreign formats like svn/git and hg should use some marker which is
2446
unique and immutable.
2448
raise NotImplementedError(self.network_name)
2450
def check_conversion_target(self, target_format):
2451
raise NotImplementedError(self.check_conversion_target)
2453
def open(self, a_bzrdir, _found=False):
2454
"""Return an instance of this format for the bzrdir a_bzrdir.
2456
_found is a private parameter, do not use it.
2458
raise NotImplementedError(self.open)
2461
class MetaDirRepositoryFormat(RepositoryFormat):
2462
"""Common base class for the new repositories using the metadir layout."""
2464
rich_root_data = False
2465
supports_tree_reference = False
2466
supports_external_lookups = False
2469
def _matchingbzrdir(self):
2470
matching = bzrdir.BzrDirMetaFormat1()
2471
matching.repository_format = self
2475
super(MetaDirRepositoryFormat, self).__init__()
2477
def _create_control_files(self, a_bzrdir):
2478
"""Create the required files and the initial control_files object."""
2479
# FIXME: RBC 20060125 don't peek under the covers
2480
# NB: no need to escape relative paths that are url safe.
2481
repository_transport = a_bzrdir.get_repository_transport(self)
2482
control_files = lockable_files.LockableFiles(repository_transport,
2483
'lock', lockdir.LockDir)
2484
control_files.create_lock()
2485
return control_files
2487
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):
2488
"""Upload the initial blank content."""
2489
control_files = self._create_control_files(a_bzrdir)
2490
control_files.lock_write()
2491
transport = control_files._transport
2493
utf8_files += [('shared-storage', '')]
2495
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
2496
for (filename, content_stream) in files:
2497
transport.put_file(filename, content_stream,
2498
mode=a_bzrdir._get_file_mode())
2499
for (filename, content_bytes) in utf8_files:
2500
transport.put_bytes_non_atomic(filename, content_bytes,
2501
mode=a_bzrdir._get_file_mode())
2503
control_files.unlock()
2505
def network_name(self):
2506
"""Metadir formats have matching disk and network format strings."""
2507
return self.get_format_string()
2510
# Pre-0.8 formats that don't have a disk format string (because they are
2511
# versioned by the matching control directory). We use the control directories
2512
# disk format string as a key for the network_name because they meet the
2513
# constraints (simple string, unique, immmutable).
2514
network_format_registry.register_lazy(
2515
"Bazaar-NG branch, format 5\n",
2516
'bzrlib.repofmt.weaverepo',
2517
'RepositoryFormat5',
2519
network_format_registry.register_lazy(
2520
"Bazaar-NG branch, format 6\n",
2521
'bzrlib.repofmt.weaverepo',
2522
'RepositoryFormat6',
2525
# formats which have no format string are not discoverable or independently
2526
# creatable on disk, so are not registered in format_registry. They're
2527
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2528
# needed, it's constructed directly by the BzrDir. Non-native formats where
2529
# the repository is not separately opened are similar.
2531
format_registry.register_lazy(
2532
'Bazaar-NG Repository format 7',
2533
'bzrlib.repofmt.weaverepo',
2537
format_registry.register_lazy(
2538
'Bazaar-NG Knit Repository Format 1',
2539
'bzrlib.repofmt.knitrepo',
2540
'RepositoryFormatKnit1',
2543
format_registry.register_lazy(
2544
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2545
'bzrlib.repofmt.knitrepo',
2546
'RepositoryFormatKnit3',
2549
format_registry.register_lazy(
2550
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2551
'bzrlib.repofmt.knitrepo',
2552
'RepositoryFormatKnit4',
2555
# Pack-based formats. There is one format for pre-subtrees, and one for
2556
# post-subtrees to allow ease of testing.
2557
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
2558
format_registry.register_lazy(
2559
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2560
'bzrlib.repofmt.pack_repo',
2561
'RepositoryFormatKnitPack1',
2563
format_registry.register_lazy(
2564
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2565
'bzrlib.repofmt.pack_repo',
2566
'RepositoryFormatKnitPack3',
2568
format_registry.register_lazy(
2569
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2570
'bzrlib.repofmt.pack_repo',
2571
'RepositoryFormatKnitPack4',
2573
format_registry.register_lazy(
2574
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
2575
'bzrlib.repofmt.pack_repo',
2576
'RepositoryFormatKnitPack5',
2578
format_registry.register_lazy(
2579
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',
2580
'bzrlib.repofmt.pack_repo',
2581
'RepositoryFormatKnitPack5RichRoot',
2583
format_registry.register_lazy(
2584
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
2585
'bzrlib.repofmt.pack_repo',
2586
'RepositoryFormatKnitPack5RichRootBroken',
2588
format_registry.register_lazy(
2589
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',
2590
'bzrlib.repofmt.pack_repo',
2591
'RepositoryFormatKnitPack6',
2593
format_registry.register_lazy(
2594
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',
2595
'bzrlib.repofmt.pack_repo',
2596
'RepositoryFormatKnitPack6RichRoot',
2599
# Development formats.
2600
# 1.7->1.8 go below here
2601
format_registry.register_lazy(
2602
"Bazaar development format 2 (needs bzr.dev from before 1.8)\n",
2603
'bzrlib.repofmt.pack_repo',
2604
'RepositoryFormatPackDevelopment2',
2606
format_registry.register_lazy(
2607
("Bazaar development format 2 with subtree support "
2608
"(needs bzr.dev from before 1.8)\n"),
2609
'bzrlib.repofmt.pack_repo',
2610
'RepositoryFormatPackDevelopment2Subtree',
2614
class InterRepository(InterObject):
2615
"""This class represents operations taking place between two repositories.
2617
Its instances have methods like copy_content and fetch, and contain
2618
references to the source and target repositories these operations can be
2621
Often we will provide convenience methods on 'repository' which carry out
2622
operations with another repository - they will always forward to
2623
InterRepository.get(other).method_name(parameters).
2626
_walk_to_common_revisions_batch_size = 50
2628
"""The available optimised InterRepository types."""
2630
def __init__(self, source, target):
2631
InterObject.__init__(self, source, target)
2632
# These two attributes may be overridden by e.g. InterOtherToRemote to
2633
# provide a faster implementation.
2634
self.target_get_graph = self.target.get_graph
2635
self.target_get_parent_map = self.target.get_parent_map
2638
def copy_content(self, revision_id=None):
2639
"""Make a complete copy of the content in self into destination.
2641
This is a destructive operation! Do not use it on existing
2644
:param revision_id: Only copy the content needed to construct
2645
revision_id and its parents.
2648
self.target.set_make_working_trees(self.source.make_working_trees())
2649
except NotImplementedError:
2651
self.target.fetch(self.source, revision_id=revision_id)
2654
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
2656
"""Fetch the content required to construct revision_id.
2658
The content is copied from self.source to self.target.
2660
:param revision_id: if None all content is copied, if NULL_REVISION no
2662
:param pb: optional progress bar to use for progress reports. If not
2663
provided a default one will be created.
2666
from bzrlib.fetch import RepoFetcher
2667
f = RepoFetcher(to_repository=self.target,
2668
from_repository=self.source,
2669
last_revision=revision_id,
2670
fetch_spec=fetch_spec,
2671
pb=pb, find_ghosts=find_ghosts)
2673
def _walk_to_common_revisions(self, revision_ids):
2674
"""Walk out from revision_ids in source to revisions target has.
2676
:param revision_ids: The start point for the search.
2677
:return: A set of revision ids.
2679
target_graph = self.target_get_graph()
2680
revision_ids = frozenset(revision_ids)
2681
# Fast path for the case where all the revisions are already in the
2683
# (Although this does incur an extra round trip for the
2684
# fairly common case where the target doesn't already have the revision
2686
if set(target_graph.get_parent_map(revision_ids)) == revision_ids:
2687
return graph.SearchResult(revision_ids, set(), 0, set())
2688
missing_revs = set()
2689
source_graph = self.source.get_graph()
2690
# ensure we don't pay silly lookup costs.
2691
searcher = source_graph._make_breadth_first_searcher(revision_ids)
2692
null_set = frozenset([_mod_revision.NULL_REVISION])
2693
searcher_exhausted = False
2697
# Iterate the searcher until we have enough next_revs
2698
while len(next_revs) < self._walk_to_common_revisions_batch_size:
2700
next_revs_part, ghosts_part = searcher.next_with_ghosts()
2701
next_revs.update(next_revs_part)
2702
ghosts.update(ghosts_part)
2703
except StopIteration:
2704
searcher_exhausted = True
2706
# If there are ghosts in the source graph, and the caller asked for
2707
# them, make sure that they are present in the target.
2708
# We don't care about other ghosts as we can't fetch them and
2709
# haven't been asked to.
2710
ghosts_to_check = set(revision_ids.intersection(ghosts))
2711
revs_to_get = set(next_revs).union(ghosts_to_check)
2713
have_revs = set(target_graph.get_parent_map(revs_to_get))
2714
# we always have NULL_REVISION present.
2715
have_revs = have_revs.union(null_set)
2716
# Check if the target is missing any ghosts we need.
2717
ghosts_to_check.difference_update(have_revs)
2719
# One of the caller's revision_ids is a ghost in both the
2720
# source and the target.
2721
raise errors.NoSuchRevision(
2722
self.source, ghosts_to_check.pop())
2723
missing_revs.update(next_revs - have_revs)
2724
# Because we may have walked past the original stop point, make
2725
# sure everything is stopped
2726
stop_revs = searcher.find_seen_ancestors(have_revs)
2727
searcher.stop_searching_any(stop_revs)
2728
if searcher_exhausted:
2730
return searcher.get_result()
2732
@deprecated_method(one_two)
2734
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
2735
"""Return the revision ids that source has that target does not.
2737
These are returned in topological order.
2739
:param revision_id: only return revision ids included by this
2741
:param find_ghosts: If True find missing revisions in deep history
2742
rather than just finding the surface difference.
2744
return list(self.search_missing_revision_ids(
2745
revision_id, find_ghosts).get_keys())
2748
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2749
"""Return the revision ids that source has that target does not.
2751
:param revision_id: only return revision ids included by this
2753
:param find_ghosts: If True find missing revisions in deep history
2754
rather than just finding the surface difference.
2755
:return: A bzrlib.graph.SearchResult.
2757
# stop searching at found target revisions.
2758
if not find_ghosts and revision_id is not None:
2759
return self._walk_to_common_revisions([revision_id])
2760
# generic, possibly worst case, slow code path.
2761
target_ids = set(self.target.all_revision_ids())
2762
if revision_id is not None:
2763
source_ids = self.source.get_ancestry(revision_id)
2764
if source_ids[0] is not None:
2765
raise AssertionError()
2768
source_ids = self.source.all_revision_ids()
2769
result_set = set(source_ids).difference(target_ids)
2770
return self.source.revision_ids_to_search_result(result_set)
2773
def _same_model(source, target):
2774
"""True if source and target have the same data representation.
2776
Note: this is always called on the base class; overriding it in a
2777
subclass will have no effect.
2780
InterRepository._assert_same_model(source, target)
2782
except errors.IncompatibleRepositories, e:
2786
def _assert_same_model(source, target):
2787
"""Raise an exception if two repositories do not use the same model.
2789
if source.supports_rich_root() != target.supports_rich_root():
2790
raise errors.IncompatibleRepositories(source, target,
2791
"different rich-root support")
2792
if source._serializer != target._serializer:
2793
raise errors.IncompatibleRepositories(source, target,
2794
"different serializers")
2797
class InterSameDataRepository(InterRepository):
2798
"""Code for converting between repositories that represent the same data.
2800
Data format and model must match for this to work.
2804
def _get_repo_format_to_test(self):
2805
"""Repository format for testing with.
2807
InterSameData can pull from subtree to subtree and from non-subtree to
2808
non-subtree, so we test this with the richest repository format.
2810
from bzrlib.repofmt import knitrepo
2811
return knitrepo.RepositoryFormatKnit3()
2814
def is_compatible(source, target):
2815
return InterRepository._same_model(source, target)
2818
class InterWeaveRepo(InterSameDataRepository):
2819
"""Optimised code paths between Weave based repositories.
2821
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2822
implemented lazy inter-object optimisation.
2826
def _get_repo_format_to_test(self):
2827
from bzrlib.repofmt import weaverepo
2828
return weaverepo.RepositoryFormat7()
2831
def is_compatible(source, target):
2832
"""Be compatible with known Weave formats.
2834
We don't test for the stores being of specific types because that
2835
could lead to confusing results, and there is no need to be
2838
from bzrlib.repofmt.weaverepo import (
2844
return (isinstance(source._format, (RepositoryFormat5,
2846
RepositoryFormat7)) and
2847
isinstance(target._format, (RepositoryFormat5,
2849
RepositoryFormat7)))
2850
except AttributeError:
2854
def copy_content(self, revision_id=None):
2855
"""See InterRepository.copy_content()."""
2856
# weave specific optimised path:
2858
self.target.set_make_working_trees(self.source.make_working_trees())
2859
except (errors.RepositoryUpgradeRequired, NotImplemented):
2861
# FIXME do not peek!
2862
if self.source._transport.listable():
2863
pb = ui.ui_factory.nested_progress_bar()
2865
self.target.texts.insert_record_stream(
2866
self.source.texts.get_record_stream(
2867
self.source.texts.keys(), 'topological', False))
2868
pb.update('copying inventory', 0, 1)
2869
self.target.inventories.insert_record_stream(
2870
self.source.inventories.get_record_stream(
2871
self.source.inventories.keys(), 'topological', False))
2872
self.target.signatures.insert_record_stream(
2873
self.source.signatures.get_record_stream(
2874
self.source.signatures.keys(),
2876
self.target.revisions.insert_record_stream(
2877
self.source.revisions.get_record_stream(
2878
self.source.revisions.keys(),
2879
'topological', True))
2883
self.target.fetch(self.source, revision_id=revision_id)
2886
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2887
"""See InterRepository.missing_revision_ids()."""
2888
# we want all revisions to satisfy revision_id in source.
2889
# but we don't want to stat every file here and there.
2890
# we want then, all revisions other needs to satisfy revision_id
2891
# checked, but not those that we have locally.
2892
# so the first thing is to get a subset of the revisions to
2893
# satisfy revision_id in source, and then eliminate those that
2894
# we do already have.
2895
# this is slow on high latency connection to self, but as as this
2896
# disk format scales terribly for push anyway due to rewriting
2897
# inventory.weave, this is considered acceptable.
2899
if revision_id is not None:
2900
source_ids = self.source.get_ancestry(revision_id)
2901
if source_ids[0] is not None:
2902
raise AssertionError()
2905
source_ids = self.source._all_possible_ids()
2906
source_ids_set = set(source_ids)
2907
# source_ids is the worst possible case we may need to pull.
2908
# now we want to filter source_ids against what we actually
2909
# have in target, but don't try to check for existence where we know
2910
# we do not have a revision as that would be pointless.
2911
target_ids = set(self.target._all_possible_ids())
2912
possibly_present_revisions = target_ids.intersection(source_ids_set)
2913
actually_present_revisions = set(
2914
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2915
required_revisions = source_ids_set.difference(actually_present_revisions)
2916
if revision_id is not None:
2917
# we used get_ancestry to determine source_ids then we are assured all
2918
# revisions referenced are present as they are installed in topological order.
2919
# and the tip revision was validated by get_ancestry.
2920
result_set = required_revisions
2922
# if we just grabbed the possibly available ids, then
2923
# we only have an estimate of whats available and need to validate
2924
# that against the revision records.
2926
self.source._eliminate_revisions_not_present(required_revisions))
2927
return self.source.revision_ids_to_search_result(result_set)
2930
class InterKnitRepo(InterSameDataRepository):
2931
"""Optimised code paths between Knit based repositories."""
2934
def _get_repo_format_to_test(self):
2935
from bzrlib.repofmt import knitrepo
2936
return knitrepo.RepositoryFormatKnit1()
2939
def is_compatible(source, target):
2940
"""Be compatible with known Knit formats.
2942
We don't test for the stores being of specific types because that
2943
could lead to confusing results, and there is no need to be
2946
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
2948
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2949
isinstance(target._format, RepositoryFormatKnit))
2950
except AttributeError:
2952
return are_knits and InterRepository._same_model(source, target)
2955
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2956
"""See InterRepository.missing_revision_ids()."""
2957
if revision_id is not None:
2958
source_ids = self.source.get_ancestry(revision_id)
2959
if source_ids[0] is not None:
2960
raise AssertionError()
2963
source_ids = self.source.all_revision_ids()
2964
source_ids_set = set(source_ids)
2965
# source_ids is the worst possible case we may need to pull.
2966
# now we want to filter source_ids against what we actually
2967
# have in target, but don't try to check for existence where we know
2968
# we do not have a revision as that would be pointless.
2969
target_ids = set(self.target.all_revision_ids())
2970
possibly_present_revisions = target_ids.intersection(source_ids_set)
2971
actually_present_revisions = set(
2972
self.target._eliminate_revisions_not_present(possibly_present_revisions))
2973
required_revisions = source_ids_set.difference(actually_present_revisions)
2974
if revision_id is not None:
2975
# we used get_ancestry to determine source_ids then we are assured all
2976
# revisions referenced are present as they are installed in topological order.
2977
# and the tip revision was validated by get_ancestry.
2978
result_set = required_revisions
2980
# if we just grabbed the possibly available ids, then
2981
# we only have an estimate of whats available and need to validate
2982
# that against the revision records.
2984
self.source._eliminate_revisions_not_present(required_revisions))
2985
return self.source.revision_ids_to_search_result(result_set)
2988
class InterPackRepo(InterSameDataRepository):
2989
"""Optimised code paths between Pack based repositories."""
2992
def _get_repo_format_to_test(self):
2993
from bzrlib.repofmt import pack_repo
2994
return pack_repo.RepositoryFormatKnitPack1()
2997
def is_compatible(source, target):
2998
"""Be compatible with known Pack formats.
3000
We don't test for the stores being of specific types because that
3001
could lead to confusing results, and there is no need to be
3004
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3006
are_packs = (isinstance(source._format, RepositoryFormatPack) and
3007
isinstance(target._format, RepositoryFormatPack))
3008
except AttributeError:
3010
return are_packs and InterRepository._same_model(source, target)
3013
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3015
"""See InterRepository.fetch()."""
3016
if (len(self.source._fallback_repositories) > 0 or
3017
len(self.target._fallback_repositories) > 0):
3018
# The pack layer is not aware of fallback repositories, so when
3019
# fetching from a stacked repository or into a stacked repository
3020
# we use the generic fetch logic which uses the VersionedFiles
3021
# attributes on repository.
3022
from bzrlib.fetch import RepoFetcher
3023
fetcher = RepoFetcher(self.target, self.source, revision_id,
3024
pb, find_ghosts, fetch_spec=fetch_spec)
3025
if fetch_spec is not None:
3026
if len(list(fetch_spec.heads)) != 1:
3027
raise AssertionError(
3028
"InterPackRepo.fetch doesn't support "
3029
"fetching multiple heads yet.")
3030
revision_id = fetch_spec.heads[0]
3032
if revision_id is None:
3034
# everything to do - use pack logic
3035
# to fetch from all packs to one without
3036
# inventory parsing etc, IFF nothing to be copied is in the target.
3038
source_revision_ids = frozenset(self.source.all_revision_ids())
3039
revision_ids = source_revision_ids - \
3040
frozenset(self.target_get_parent_map(source_revision_ids))
3041
revision_keys = [(revid,) for revid in revision_ids]
3042
target_pack_collection = self._get_target_pack_collection()
3043
index = target_pack_collection.revision_index.combined_index
3044
present_revision_ids = set(item[1][0] for item in
3045
index.iter_entries(revision_keys))
3046
revision_ids = set(revision_ids) - present_revision_ids
3047
# implementing the TODO will involve:
3048
# - detecting when all of a pack is selected
3049
# - avoiding as much as possible pre-selection, so the
3050
# more-core routines such as create_pack_from_packs can filter in
3051
# a just-in-time fashion. (though having a HEADS list on a
3052
# repository might make this a lot easier, because we could
3053
# sensibly detect 'new revisions' without doing a full index scan.
3054
elif _mod_revision.is_null(revision_id):
3059
revision_ids = self.search_missing_revision_ids(revision_id,
3060
find_ghosts=find_ghosts).get_keys()
3061
except errors.NoSuchRevision:
3062
raise errors.InstallFailed([revision_id])
3063
if len(revision_ids) == 0:
3065
return self._pack(self.source, self.target, revision_ids)
3067
def _pack(self, source, target, revision_ids):
3068
from bzrlib.repofmt.pack_repo import Packer
3069
target_pack_collection = self._get_target_pack_collection()
3070
packs = source._pack_collection.all_packs()
3071
pack = Packer(target_pack_collection, packs, '.fetch',
3072
revision_ids).pack()
3073
if pack is not None:
3074
target_pack_collection._save_pack_names()
3075
copied_revs = pack.get_revision_count()
3076
# Trigger an autopack. This may duplicate effort as we've just done
3077
# a pack creation, but for now it is simpler to think about as
3078
# 'upload data, then repack if needed'.
3080
return (copied_revs, [])
3084
def _autopack(self):
3085
self.target._pack_collection.autopack()
3087
def _get_target_pack_collection(self):
3088
return self.target._pack_collection
3091
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
3092
"""See InterRepository.missing_revision_ids().
3094
:param find_ghosts: Find ghosts throughout the ancestry of
3097
if not find_ghosts and revision_id is not None:
3098
return self._walk_to_common_revisions([revision_id])
3099
elif revision_id is not None:
3100
# Find ghosts: search for revisions pointing from one repository to
3101
# the other, and vice versa, anywhere in the history of revision_id.
3102
graph = self.target_get_graph(other_repository=self.source)
3103
searcher = graph._make_breadth_first_searcher([revision_id])
3107
next_revs, ghosts = searcher.next_with_ghosts()
3108
except StopIteration:
3110
if revision_id in ghosts:
3111
raise errors.NoSuchRevision(self.source, revision_id)
3112
found_ids.update(next_revs)
3113
found_ids.update(ghosts)
3114
found_ids = frozenset(found_ids)
3115
# Double query here: should be able to avoid this by changing the
3116
# graph api further.
3117
result_set = found_ids - frozenset(
3118
self.target_get_parent_map(found_ids))
3120
source_ids = self.source.all_revision_ids()
3121
# source_ids is the worst possible case we may need to pull.
3122
# now we want to filter source_ids against what we actually
3123
# have in target, but don't try to check for existence where we know
3124
# we do not have a revision as that would be pointless.
3125
target_ids = set(self.target.all_revision_ids())
3126
result_set = set(source_ids).difference(target_ids)
3127
return self.source.revision_ids_to_search_result(result_set)
3130
class InterDifferingSerializer(InterKnitRepo):
3133
def _get_repo_format_to_test(self):
3137
def is_compatible(source, target):
3138
"""Be compatible with Knit2 source and Knit3 target"""
3139
if source.supports_rich_root() != target.supports_rich_root():
3141
# Ideally, we'd support fetching if the source had no tree references
3142
# even if it supported them...
3143
if (getattr(source, '_format.supports_tree_reference', False) and
3144
not getattr(target, '_format.supports_tree_reference', False)):
3148
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):
3149
"""Get the best delta and base for this revision.
3151
:return: (basis_id, delta)
3153
possible_trees = [(parent_id, cache[parent_id])
3154
for parent_id in parent_ids
3155
if parent_id in cache]
3156
if len(possible_trees) == 0:
3157
# There either aren't any parents, or the parents aren't in the
3158
# cache, so just use the last converted tree
3159
possible_trees.append((basis_id, cache[basis_id]))
3161
for basis_id, basis_tree in possible_trees:
3162
delta = tree.inventory._make_delta(basis_tree.inventory)
3163
deltas.append((len(delta), basis_id, delta))
3165
return deltas[0][1:]
3167
def _fetch_batch(self, revision_ids, basis_id, cache):
3168
"""Fetch across a few revisions.
3170
:param revision_ids: The revisions to copy
3171
:param basis_id: The revision_id of a tree that must be in cache, used
3172
as a basis for delta when no other base is available
3173
:param cache: A cache of RevisionTrees that we can use.
3174
:return: The revision_id of the last converted tree. The RevisionTree
3175
for it will be in cache
3177
# Walk though all revisions; get inventory deltas, copy referenced
3178
# texts that delta references, insert the delta, revision and
3182
pending_revisions = []
3183
parent_map = self.source.get_parent_map(revision_ids)
3184
for tree in self.source.revision_trees(revision_ids):
3185
current_revision_id = tree.get_revision_id()
3186
parent_ids = parent_map.get(current_revision_id, ())
3187
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3189
# Find text entries that need to be copied
3190
for old_path, new_path, file_id, entry in delta:
3191
if new_path is not None:
3192
if not (new_path or self.target.supports_rich_root()):
3193
# We don't copy the text for the root node unless the
3194
# target supports_rich_root.
3196
text_keys.add((file_id, entry.revision))
3197
revision = self.source.get_revision(current_revision_id)
3198
pending_deltas.append((basis_id, delta,
3199
current_revision_id, revision.parent_ids))
3200
pending_revisions.append(revision)
3201
cache[current_revision_id] = tree
3202
basis_id = current_revision_id
3204
from_texts = self.source.texts
3205
to_texts = self.target.texts
3206
to_texts.insert_record_stream(from_texts.get_record_stream(
3207
text_keys, self.target._format._fetch_order,
3208
not self.target._format._fetch_uses_deltas))
3210
for delta in pending_deltas:
3211
self.target.add_inventory_by_delta(*delta)
3212
# insert signatures and revisions
3213
for revision in pending_revisions:
3215
signature = self.source.get_signature_text(
3216
revision.revision_id)
3217
self.target.add_signature_text(revision.revision_id,
3219
except errors.NoSuchRevision:
3221
self.target.add_revision(revision.revision_id, revision)
3224
def _fetch_all_revisions(self, revision_ids, pb):
3225
"""Fetch everything for the list of revisions.
3227
:param revision_ids: The list of revisions to fetch. Must be in
3229
:param pb: A ProgressBar
3232
basis_id, basis_tree = self._get_basis(revision_ids[0])
3234
cache = lru_cache.LRUCache(100)
3235
cache[basis_id] = basis_tree
3236
del basis_tree # We don't want to hang on to it here
3237
for offset in range(0, len(revision_ids), batch_size):
3238
self.target.start_write_group()
3240
pb.update('Transferring revisions', offset,
3242
batch = revision_ids[offset:offset+batch_size]
3243
basis_id = self._fetch_batch(batch, basis_id, cache)
3245
self.target.abort_write_group()
3248
self.target.commit_write_group()
3249
pb.update('Transferring revisions', len(revision_ids),
3253
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3255
"""See InterRepository.fetch()."""
3256
if fetch_spec is not None:
3257
raise AssertionError("Not implemented yet...")
3258
revision_ids = self.target.search_missing_revision_ids(self.source,
3259
revision_id, find_ghosts=find_ghosts).get_keys()
3260
if not revision_ids:
3262
revision_ids = tsort.topo_sort(
3263
self.source.get_graph().get_parent_map(revision_ids))
3265
my_pb = ui.ui_factory.nested_progress_bar()
3268
symbol_versioning.warn(
3269
symbol_versioning.deprecated_in((1, 14, 0))
3270
% "pb parameter to fetch()")
3273
self._fetch_all_revisions(revision_ids, pb)
3275
if my_pb is not None:
3277
return len(revision_ids), 0
3279
def _get_basis(self, first_revision_id):
3280
"""Get a revision and tree which exists in the target.
3282
This assumes that first_revision_id is selected for transmission
3283
because all other ancestors are already present. If we can't find an
3284
ancestor we fall back to NULL_REVISION since we know that is safe.
3286
:return: (basis_id, basis_tree)
3288
first_rev = self.source.get_revision(first_revision_id)
3290
basis_id = first_rev.parent_ids[0]
3291
# only valid as a basis if the target has it
3292
self.target.get_revision(basis_id)
3293
# Try to get a basis tree - if its a ghost it will hit the
3294
# NoSuchRevision case.
3295
basis_tree = self.source.revision_tree(basis_id)
3296
except (IndexError, errors.NoSuchRevision):
3297
basis_id = _mod_revision.NULL_REVISION
3298
basis_tree = self.source.revision_tree(basis_id)
3299
return basis_id, basis_tree
3302
class InterOtherToRemote(InterRepository):
3303
"""An InterRepository that simply delegates to the 'real' InterRepository
3304
calculated for (source, target._real_repository).
3307
def __init__(self, source, target):
3308
InterRepository.__init__(self, source, target)
3309
self._real_inter = None
3312
def is_compatible(source, target):
3313
if isinstance(target, remote.RemoteRepository):
3317
def _ensure_real_inter(self):
3318
if self._real_inter is None:
3319
self.target._ensure_real()
3320
real_target = self.target._real_repository
3321
self._real_inter = InterRepository.get(self.source, real_target)
3322
# Make _real_inter use the RemoteRepository for get_parent_map
3323
self._real_inter.target_get_graph = self.target.get_graph
3324
self._real_inter.target_get_parent_map = self.target.get_parent_map
3326
def copy_content(self, revision_id=None):
3327
self._ensure_real_inter()
3328
self._real_inter.copy_content(revision_id=revision_id)
3330
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3332
self._ensure_real_inter()
3333
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3334
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
3337
def _get_repo_format_to_test(self):
3341
class InterRemoteToOther(InterRepository):
3343
def __init__(self, source, target):
3344
InterRepository.__init__(self, source, target)
3345
self._real_inter = None
3348
def is_compatible(source, target):
3349
if not isinstance(source, remote.RemoteRepository):
3351
return InterRepository._same_model(source, target)
3353
def _ensure_real_inter(self):
3354
if self._real_inter is None:
3355
self.source._ensure_real()
3356
real_source = self.source._real_repository
3357
self._real_inter = InterRepository.get(real_source, self.target)
3359
def copy_content(self, revision_id=None):
3360
self._ensure_real_inter()
3361
self._real_inter.copy_content(revision_id=revision_id)
3364
def _get_repo_format_to_test(self):
3369
class InterPackToRemotePack(InterPackRepo):
3370
"""A specialisation of InterPackRepo for a target that is a
3373
This will use the get_parent_map RPC rather than plain readvs, and also
3374
uses an RPC for autopacking.
3378
def is_compatible(source, target):
3379
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
3380
if isinstance(source._format, RepositoryFormatPack):
3381
if isinstance(target, remote.RemoteRepository):
3382
target._format._ensure_real()
3383
if isinstance(target._format._custom_format,
3384
RepositoryFormatPack):
3385
if InterRepository._same_model(source, target):
3389
def _autopack(self):
3390
self.target.autopack()
3393
def fetch(self, revision_id=None, pb=None, find_ghosts=False,
3395
"""See InterRepository.fetch()."""
3396
if self.target._client._medium._is_remote_before((1, 13)):
3397
# The server won't support the insert_stream RPC, so just use
3398
# regular InterPackRepo logic. This avoids a bug that causes many
3399
# round-trips for small append calls.
3400
return InterPackRepo.fetch(self, revision_id=revision_id, pb=pb,
3401
find_ghosts=find_ghosts, fetch_spec=fetch_spec)
3402
# Always fetch using the generic streaming fetch code, to allow
3403
# streaming fetching into remote servers.
3404
from bzrlib.fetch import RepoFetcher
3405
fetcher = RepoFetcher(self.target, self.source, revision_id,
3406
pb, find_ghosts, fetch_spec=fetch_spec)
3408
def _get_target_pack_collection(self):
3409
return self.target._real_repository._pack_collection
3412
def _get_repo_format_to_test(self):
3416
InterRepository.register_optimiser(InterDifferingSerializer)
3417
InterRepository.register_optimiser(InterSameDataRepository)
3418
InterRepository.register_optimiser(InterWeaveRepo)
3419
InterRepository.register_optimiser(InterKnitRepo)
3420
InterRepository.register_optimiser(InterPackRepo)
3421
InterRepository.register_optimiser(InterOtherToRemote)
3422
InterRepository.register_optimiser(InterRemoteToOther)
3423
InterRepository.register_optimiser(InterPackToRemotePack)
3426
class CopyConverter(object):
3427
"""A repository conversion tool which just performs a copy of the content.
3429
This is slow but quite reliable.
3432
def __init__(self, target_format):
3433
"""Create a CopyConverter.
3435
:param target_format: The format the resulting repository should be.
3437
self.target_format = target_format
3439
def convert(self, repo, pb):
3440
"""Perform the conversion of to_convert, giving feedback via pb.
3442
:param to_convert: The disk object to convert.
3443
:param pb: a progress bar to use for progress information.
3448
# this is only useful with metadir layouts - separated repo content.
3449
# trigger an assertion if not such
3450
repo._format.get_format_string()
3451
self.repo_dir = repo.bzrdir
3452
self.step('Moving repository to repository.backup')
3453
self.repo_dir.transport.move('repository', 'repository.backup')
3454
backup_transport = self.repo_dir.transport.clone('repository.backup')
3455
repo._format.check_conversion_target(self.target_format)
3456
self.source_repo = repo._format.open(self.repo_dir,
3458
_override_transport=backup_transport)
3459
self.step('Creating new repository')
3460
converted = self.target_format.initialize(self.repo_dir,
3461
self.source_repo.is_shared())
3462
converted.lock_write()
3464
self.step('Copying content into repository.')
3465
self.source_repo.copy_content_into(converted)
3468
self.step('Deleting old repository content.')
3469
self.repo_dir.transport.delete_tree('repository.backup')
3470
self.pb.note('repository converted')
3472
def step(self, message):
3473
"""Update the pb by a step."""
3475
self.pb.update(message, self.count, self.total)
3487
def _unescaper(match, _map=_unescape_map):
3488
code = match.group(1)
3492
if not code.startswith('#'):
3494
return unichr(int(code[1:])).encode('utf8')
3500
def _unescape_xml(data):
3501
"""Unescape predefined XML entities in a string of data."""
3503
if _unescape_re is None:
3504
_unescape_re = re.compile('\&([^;]*);')
3505
return _unescape_re.sub(_unescaper, data)
3508
class _VersionedFileChecker(object):
3510
def __init__(self, repository, text_key_references=None):
3511
self.repository = repository
3512
self.text_index = self.repository._generate_text_key_index(
3513
text_key_references=text_key_references)
3515
def calculate_file_version_parents(self, text_key):
3516
"""Calculate the correct parents for a file version according to
3519
parent_keys = self.text_index[text_key]
3520
if parent_keys == [_mod_revision.NULL_REVISION]:
3522
return tuple(parent_keys)
3524
def check_file_version_parents(self, texts, progress_bar=None):
3525
"""Check the parents stored in a versioned file are correct.
3527
It also detects file versions that are not referenced by their
3528
corresponding revision's inventory.
3530
:returns: A tuple of (wrong_parents, dangling_file_versions).
3531
wrong_parents is a dict mapping {revision_id: (stored_parents,
3532
correct_parents)} for each revision_id where the stored parents
3533
are not correct. dangling_file_versions is a set of (file_id,
3534
revision_id) tuples for versions that are present in this versioned
3535
file, but not used by the corresponding inventory.
3538
self.file_ids = set([file_id for file_id, _ in
3539
self.text_index.iterkeys()])
3540
# text keys is now grouped by file_id
3541
n_weaves = len(self.file_ids)
3542
files_in_revisions = {}
3543
revisions_of_files = {}
3544
n_versions = len(self.text_index)
3545
progress_bar.update('loading text store', 0, n_versions)
3546
parent_map = self.repository.texts.get_parent_map(self.text_index)
3547
# On unlistable transports this could well be empty/error...
3548
text_keys = self.repository.texts.keys()
3549
unused_keys = frozenset(text_keys) - set(self.text_index)
3550
for num, key in enumerate(self.text_index.iterkeys()):
3551
if progress_bar is not None:
3552
progress_bar.update('checking text graph', num, n_versions)
3553
correct_parents = self.calculate_file_version_parents(key)
3555
knit_parents = parent_map[key]
3556
except errors.RevisionNotPresent:
3559
if correct_parents != knit_parents:
3560
wrong_parents[key] = (knit_parents, correct_parents)
3561
return wrong_parents, unused_keys
3564
def _old_get_graph(repository, revision_id):
3565
"""DO NOT USE. That is all. I'm serious."""
3566
graph = repository.get_graph()
3567
revision_graph = dict(((key, value) for key, value in
3568
graph.iter_ancestry([revision_id]) if value is not None))
3569
return _strip_NULL_ghosts(revision_graph)
3572
def _strip_NULL_ghosts(revision_graph):
3573
"""Also don't use this. more compatibility code for unmigrated clients."""
3574
# Filter ghosts, and null:
3575
if _mod_revision.NULL_REVISION in revision_graph:
3576
del revision_graph[_mod_revision.NULL_REVISION]
3577
for key, parents in revision_graph.items():
3578
revision_graph[key] = tuple(parent for parent in parents if parent
3580
return revision_graph
3583
class StreamSink(object):
3584
"""An object that can insert a stream into a repository.
3586
This interface handles the complexity of reserialising inventories and
3587
revisions from different formats, and allows unidirectional insertion into
3588
stacked repositories without looking for the missing basis parents
3592
def __init__(self, target_repo):
3593
self.target_repo = target_repo
3595
def insert_stream(self, stream, src_format, resume_tokens):
3596
"""Insert a stream's content into the target repository.
3598
:param src_format: a bzr repository format.
3600
:return: a list of resume tokens and an iterable of keys additional
3601
items required before the insertion can be completed.
3603
self.target_repo.lock_write()
3606
self.target_repo.resume_write_group(resume_tokens)
3608
self.target_repo.start_write_group()
3610
# locked_insert_stream performs a commit|suspend.
3611
return self._locked_insert_stream(stream, src_format)
3613
self.target_repo.abort_write_group(suppress_errors=True)
3616
self.target_repo.unlock()
3618
def _locked_insert_stream(self, stream, src_format):
3619
to_serializer = self.target_repo._format._serializer
3620
src_serializer = src_format._serializer
3621
for substream_type, substream in stream:
3622
if substream_type == 'texts':
3623
self.target_repo.texts.insert_record_stream(substream)
3624
elif substream_type == 'inventories':
3625
if src_serializer == to_serializer:
3626
self.target_repo.inventories.insert_record_stream(
3629
self._extract_and_insert_inventories(
3630
substream, src_serializer)
3631
elif substream_type == 'revisions':
3632
# This may fallback to extract-and-insert more often than
3633
# required if the serializers are different only in terms of
3635
if src_serializer == to_serializer:
3636
self.target_repo.revisions.insert_record_stream(
3639
self._extract_and_insert_revisions(substream,
3641
elif substream_type == 'signatures':
3642
self.target_repo.signatures.insert_record_stream(substream)
3644
raise AssertionError('kaboom! %s' % (substream_type,))
3646
missing_keys = set()
3647
for prefix, versioned_file in (
3648
('texts', self.target_repo.texts),
3649
('inventories', self.target_repo.inventories),
3650
('revisions', self.target_repo.revisions),
3651
('signatures', self.target_repo.signatures),
3653
missing_keys.update((prefix,) + key for key in
3654
versioned_file.get_missing_compression_parent_keys())
3655
except NotImplementedError:
3656
# cannot even attempt suspending, and missing would have failed
3657
# during stream insertion.
3658
missing_keys = set()
3661
# suspend the write group and tell the caller what we is
3662
# missing. We know we can suspend or else we would not have
3663
# entered this code path. (All repositories that can handle
3664
# missing keys can handle suspending a write group).
3665
write_group_tokens = self.target_repo.suspend_write_group()
3666
return write_group_tokens, missing_keys
3667
self.target_repo.commit_write_group()
3670
def _extract_and_insert_inventories(self, substream, serializer):
3671
"""Generate a new inventory versionedfile in target, converting data.
3673
The inventory is retrieved from the source, (deserializing it), and
3674
stored in the target (reserializing it in a different format).
3676
for record in substream:
3677
bytes = record.get_bytes_as('fulltext')
3678
revision_id = record.key[0]
3679
inv = serializer.read_inventory_from_string(bytes, revision_id)
3680
parents = [key[0] for key in record.parents]
3681
self.target_repo.add_inventory(revision_id, inv, parents)
3683
def _extract_and_insert_revisions(self, substream, serializer):
3684
for record in substream:
3685
bytes = record.get_bytes_as('fulltext')
3686
revision_id = record.key[0]
3687
rev = serializer.read_revision_from_string(bytes)
3688
if rev.revision_id != revision_id:
3689
raise AssertionError('wtf: %s != %s' % (rev, revision_id))
3690
self.target_repo.add_revision(revision_id, rev)
3693
if self.target_repo._format._fetch_reconcile:
3694
self.target_repo.reconcile()
3697
class StreamSource(object):
3698
"""A source of a stream for fetching between repositories."""
3700
def __init__(self, from_repository, to_format):
3701
"""Create a StreamSource streaming from from_repository."""
3702
self.from_repository = from_repository
3703
self.to_format = to_format
3705
def delta_on_metadata(self):
3706
"""Return True if delta's are permitted on metadata streams.
3708
That is on revisions and signatures.
3710
src_serializer = self.from_repository._format._serializer
3711
target_serializer = self.to_format._serializer
3712
return (self.to_format._fetch_uses_deltas and
3713
src_serializer == target_serializer)
3715
def _fetch_revision_texts(self, revs):
3716
# fetch signatures first and then the revision texts
3717
# may need to be a InterRevisionStore call here.
3718
from_sf = self.from_repository.signatures
3719
# A missing signature is just skipped.
3720
keys = [(rev_id,) for rev_id in revs]
3721
signatures = versionedfile.filter_absent(from_sf.get_record_stream(
3723
self.to_format._fetch_order,
3724
not self.to_format._fetch_uses_deltas))
3725
# If a revision has a delta, this is actually expanded inside the
3726
# insert_record_stream code now, which is an alternate fix for
3728
from_rf = self.from_repository.revisions
3729
revisions = from_rf.get_record_stream(
3731
self.to_format._fetch_order,
3732
not self.delta_on_metadata())
3733
return [('signatures', signatures), ('revisions', revisions)]
3735
def _generate_root_texts(self, revs):
3736
"""This will be called by __fetch between fetching weave texts and
3737
fetching the inventory weave.
3739
Subclasses should override this if they need to generate root texts
3740
after fetching weave texts.
3742
if self._rich_root_upgrade():
3744
return bzrlib.fetch.Inter1and2Helper(
3745
self.from_repository).generate_root_texts(revs)
3749
def get_stream(self, search):
3751
revs = search.get_keys()
3752
graph = self.from_repository.get_graph()
3753
revs = list(graph.iter_topo_order(revs))
3754
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
3756
for knit_kind, file_id, revisions in data_to_fetch:
3757
if knit_kind != phase:
3759
# Make a new progress bar for this phase
3760
if knit_kind == "file":
3761
# Accumulate file texts
3762
text_keys.extend([(file_id, revision) for revision in
3764
elif knit_kind == "inventory":
3765
# Now copy the file texts.
3766
from_texts = self.from_repository.texts
3767
yield ('texts', from_texts.get_record_stream(
3768
text_keys, self.to_format._fetch_order,
3769
not self.to_format._fetch_uses_deltas))
3770
# Cause an error if a text occurs after we have done the
3773
# Before we process the inventory we generate the root
3774
# texts (if necessary) so that the inventories references
3776
for _ in self._generate_root_texts(revs):
3778
# NB: This currently reopens the inventory weave in source;
3779
# using a single stream interface instead would avoid this.
3780
from_weave = self.from_repository.inventories
3781
# we fetch only the referenced inventories because we do not
3782
# know for unselected inventories whether all their required
3783
# texts are present in the other repository - it could be
3785
yield ('inventories', from_weave.get_record_stream(
3786
[(rev_id,) for rev_id in revs],
3787
self.inventory_fetch_order(),
3788
not self.delta_on_metadata()))
3789
elif knit_kind == "signatures":
3790
# Nothing to do here; this will be taken care of when
3791
# _fetch_revision_texts happens.
3793
elif knit_kind == "revisions":
3794
for record in self._fetch_revision_texts(revs):
3797
raise AssertionError("Unknown knit kind %r" % knit_kind)
3799
def get_stream_for_missing_keys(self, missing_keys):
3800
# missing keys can only occur when we are byte copying and not
3801
# translating (because translation means we don't send
3802
# unreconstructable deltas ever).
3804
keys['texts'] = set()
3805
keys['revisions'] = set()
3806
keys['inventories'] = set()
3807
keys['signatures'] = set()
3808
for key in missing_keys:
3809
keys[key[0]].add(key[1:])
3810
if len(keys['revisions']):
3811
# If we allowed copying revisions at this point, we could end up
3812
# copying a revision without copying its required texts: a
3813
# violation of the requirements for repository integrity.
3814
raise AssertionError(
3815
'cannot copy revisions to fill in missing deltas %s' % (
3816
keys['revisions'],))
3817
for substream_kind, keys in keys.iteritems():
3818
vf = getattr(self.from_repository, substream_kind)
3819
# Ask for full texts always so that we don't need more round trips
3820
# after this stream.
3821
stream = vf.get_record_stream(keys,
3822
self.to_format._fetch_order, True)
3823
yield substream_kind, stream
3825
def inventory_fetch_order(self):
3826
if self._rich_root_upgrade():
3827
return 'topological'
3829
return self.to_format._fetch_order
3831
def _rich_root_upgrade(self):
3832
return (not self.from_repository._format.rich_root_data and
3833
self.to_format.rich_root_data)