1
# Copyright (C) 2005 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from copy import deepcopy
18
from cStringIO import StringIO
19
from unittest import TestSuite
20
import xml.sax.saxutils
23
import bzrlib.bzrdir as bzrdir
24
from bzrlib.decorators import needs_read_lock, needs_write_lock
25
import bzrlib.errors as errors
26
from bzrlib.errors import InvalidRevisionId
27
from bzrlib.lockable_files import LockableFiles
28
from bzrlib.osutils import safe_unicode
29
from bzrlib.revision import NULL_REVISION
30
from bzrlib.store import copy_all
31
from bzrlib.store.weave import WeaveStore
32
from bzrlib.store.text import TextStore
33
from bzrlib.symbol_versioning import *
34
from bzrlib.trace import mutter
35
from bzrlib.tree import RevisionTree
36
from bzrlib.testament import Testament
37
from bzrlib.tree import EmptyTree
42
class Repository(object):
43
"""Repository holding history for one or more branches.
45
The repository holds and retrieves historical information including
46
revisions and file history. It's normally accessed only by the Branch,
47
which views a particular line of development through that history.
49
The Repository builds on top of Stores and a Transport, which respectively
50
describe the disk data format and the way of accessing the (possibly
55
def _all_possible_ids(self):
56
"""Return all the possible revisions that we could find."""
57
return self.get_inventory_weave().names()
60
def all_revision_ids(self):
61
"""Returns a list of all the revision ids in the repository.
63
These are in as much topological order as the underlying store can
64
present: for weaves ghosts may lead to a lack of correctness until
65
the reweave updates the parents list.
67
result = self._all_possible_ids()
68
return self._eliminate_revisions_not_present(result)
71
def _eliminate_revisions_not_present(self, revision_ids):
72
"""Check every revision id in revision_ids to see if we have it.
74
Returns a set of the present revisions.
77
for id in revision_ids:
78
if self.has_revision(id):
84
"""Construct the current default format repository in a_bzrdir."""
85
return RepositoryFormat.get_default_format().initialize(a_bzrdir)
87
def __init__(self, _format, a_bzrdir):
89
if isinstance(_format, (RepositoryFormat4,
92
# legacy: use a common control files.
93
self.control_files = a_bzrdir._control_files
95
self.control_files = LockableFiles(a_bzrdir.get_repository_transport(None),
98
dir_mode = self.control_files._dir_mode
99
file_mode = self.control_files._file_mode
100
self._format = _format
101
self.bzrdir = a_bzrdir
103
def get_weave(name, prefixed=False):
105
name = safe_unicode(name)
108
relpath = self.control_files._escape(name)
109
weave_transport = self.control_files._transport.clone(relpath)
110
ws = WeaveStore(weave_transport, prefixed=prefixed,
113
if self.control_files._transport.should_cache():
114
ws.enable_cache = True
117
def get_store(name, compressed=True, prefixed=False):
118
# FIXME: This approach of assuming stores are all entirely compressed
119
# or entirely uncompressed is tidy, but breaks upgrade from
120
# some existing branches where there's a mixture; we probably
121
# still want the option to look for both.
123
name = safe_unicode(name)
126
relpath = self.control_files._escape(name)
127
store = TextStore(self.control_files._transport.clone(relpath),
128
prefixed=prefixed, compressed=compressed,
131
#if self._transport.should_cache():
132
# cache_path = os.path.join(self.cache_root, name)
133
# os.mkdir(cache_path)
134
# store = bzrlib.store.CachedStore(store, cache_path)
137
if isinstance(self._format, RepositoryFormat4):
138
self.inventory_store = get_store('inventory-store')
139
self.text_store = get_store('text-store')
140
self.revision_store = get_store('revision-store')
141
elif isinstance(self._format, RepositoryFormat5):
142
self.control_weaves = get_weave('')
143
self.weave_store = get_weave('weaves')
144
self.revision_store = get_store('revision-store', compressed=False)
145
elif isinstance(self._format, RepositoryFormat6):
146
self.control_weaves = get_weave('')
147
self.weave_store = get_weave('weaves', prefixed=True)
148
self.revision_store = get_store('revision-store', compressed=False,
150
elif isinstance(self._format, RepositoryFormat7):
151
self.control_weaves = get_weave('')
152
self.weave_store = get_weave('weaves', prefixed=True)
153
self.revision_store = get_store('revision-store', compressed=False,
155
self.revision_store.register_suffix('sig')
157
def lock_write(self):
158
self.control_files.lock_write()
161
self.control_files.lock_read()
164
return self.control_files.is_locked()
167
def missing_revision_ids(self, other, revision_id=None):
168
"""Return the revision ids that other has that this does not.
170
These are returned in topological order.
172
revision_id: only return revision ids included by revision_id.
174
return InterRepository.get(other, self).missing_revision_ids(revision_id)
178
"""Open the repository rooted at base.
180
For instance, if the repository is at URL/.bzr/repository,
181
Repository.open(URL) -> a Repository instance.
183
control = bzrdir.BzrDir.open(base)
184
return control.open_repository()
186
def copy_content_into(self, destination, revision_id=None, basis=None):
187
"""Make a complete copy of the content in self into destination.
189
This is a destructive operation! Do not use it on existing
192
return InterRepository.get(self, destination).copy_content(revision_id, basis)
194
def fetch(self, source, revision_id=None, pb=None):
195
"""Fetch the content required to construct revision_id from source.
197
If revision_id is None all content is copied.
199
return InterRepository.get(source, self).fetch(revision_id=revision_id,
203
self.control_files.unlock()
206
def clone(self, a_bzrdir, revision_id=None, basis=None):
207
"""Clone this repository into a_bzrdir using the current format.
209
Currently no check is made that the format of this repository and
210
the bzrdir format are compatible. FIXME RBC 20060201.
212
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
213
# use target default format.
214
result = a_bzrdir.create_repository()
215
# FIXME RBC 20060209 split out the repository type to avoid this check ?
216
elif isinstance(a_bzrdir._format,
217
(bzrdir.BzrDirFormat4,
218
bzrdir.BzrDirFormat5,
219
bzrdir.BzrDirFormat6)):
220
result = a_bzrdir.open_repository()
222
result = self._format.initialize(a_bzrdir, shared=self.is_shared())
223
self.copy_content_into(result, revision_id, basis)
226
def has_revision(self, revision_id):
227
"""True if this branch has a copy of the revision.
229
This does not necessarily imply the revision is merge
230
or on the mainline."""
231
return (revision_id is None
232
or self.revision_store.has_id(revision_id))
235
def get_revision_xml_file(self, revision_id):
236
"""Return XML file object for revision object."""
237
if not revision_id or not isinstance(revision_id, basestring):
238
raise InvalidRevisionId(revision_id=revision_id, branch=self)
240
return self.revision_store.get(revision_id)
241
except (IndexError, KeyError):
242
raise bzrlib.errors.NoSuchRevision(self, revision_id)
245
def get_revision_xml(self, revision_id):
246
return self.get_revision_xml_file(revision_id).read()
249
def get_revision(self, revision_id):
250
"""Return the Revision object for a named revision"""
251
xml_file = self.get_revision_xml_file(revision_id)
254
r = bzrlib.xml5.serializer_v5.read_revision(xml_file)
255
except SyntaxError, e:
256
raise bzrlib.errors.BzrError('failed to unpack revision_xml',
260
assert r.revision_id == revision_id
264
def get_revision_sha1(self, revision_id):
265
"""Hash the stored value of a revision, and return it."""
266
# In the future, revision entries will be signed. At that
267
# point, it is probably best *not* to include the signature
268
# in the revision hash. Because that lets you re-sign
269
# the revision, (add signatures/remove signatures) and still
270
# have all hash pointers stay consistent.
271
# But for now, just hash the contents.
272
return bzrlib.osutils.sha_file(self.get_revision_xml_file(revision_id))
275
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
276
self.revision_store.add(StringIO(gpg_strategy.sign(plaintext)),
279
def fileid_involved_between_revs(self, from_revid, to_revid):
280
"""Find file_id(s) which are involved in the changes between revisions.
282
This determines the set of revisions which are involved, and then
283
finds all file ids affected by those revisions.
285
# TODO: jam 20060119 This code assumes that w.inclusions will
286
# always be correct. But because of the presence of ghosts
287
# it is possible to be wrong.
288
# One specific example from Robert Collins:
289
# Two branches, with revisions ABC, and AD
290
# C is a ghost merge of D.
291
# Inclusions doesn't recognize D as an ancestor.
292
# If D is ever merged in the future, the weave
293
# won't be fixed, because AD never saw revision C
294
# to cause a conflict which would force a reweave.
295
w = self.get_inventory_weave()
296
from_set = set(w.inclusions([w.lookup(from_revid)]))
297
to_set = set(w.inclusions([w.lookup(to_revid)]))
298
included = to_set.difference(from_set)
299
changed = map(w.idx_to_name, included)
300
return self._fileid_involved_by_set(changed)
302
def fileid_involved(self, last_revid=None):
303
"""Find all file_ids modified in the ancestry of last_revid.
305
:param last_revid: If None, last_revision() will be used.
307
w = self.get_inventory_weave()
309
changed = set(w._names)
311
included = w.inclusions([w.lookup(last_revid)])
312
changed = map(w.idx_to_name, included)
313
return self._fileid_involved_by_set(changed)
315
def fileid_involved_by_set(self, changes):
316
"""Find all file_ids modified by the set of revisions passed in.
318
:param changes: A set() of revision ids
320
# TODO: jam 20060119 This line does *nothing*, remove it.
321
# or better yet, change _fileid_involved_by_set so
322
# that it takes the inventory weave, rather than
323
# pulling it out by itself.
324
return self._fileid_involved_by_set(changes)
326
def _fileid_involved_by_set(self, changes):
327
"""Find the set of file-ids affected by the set of revisions.
329
:param changes: A set() of revision ids.
330
:return: A set() of file ids.
332
This peaks at the Weave, interpreting each line, looking to
333
see if it mentions one of the revisions. And if so, includes
334
the file id mentioned.
335
This expects both the Weave format, and the serialization
336
to have a single line per file/directory, and to have
337
fileid="" and revision="" on that line.
339
assert isinstance(self._format, (RepositoryFormat5,
341
RepositoryFormat7)), \
342
"fileid_involved only supported for branches which store inventory as unnested xml"
344
w = self.get_inventory_weave()
346
for line in w._weave:
348
# it is ugly, but it is due to the weave structure
349
if not isinstance(line, basestring): continue
351
start = line.find('file_id="')+9
352
if start < 9: continue
353
end = line.find('"', start)
355
file_id = xml.sax.saxutils.unescape(line[start:end])
357
# check if file_id is already present
358
if file_id in file_ids: continue
360
start = line.find('revision="')+10
361
if start < 10: continue
362
end = line.find('"', start)
364
revision_id = xml.sax.saxutils.unescape(line[start:end])
366
if revision_id in changes:
367
file_ids.add(file_id)
371
def get_inventory_weave(self):
372
return self.control_weaves.get_weave('inventory',
373
self.get_transaction())
376
def get_inventory(self, revision_id):
377
"""Get Inventory object by hash."""
378
xml = self.get_inventory_xml(revision_id)
379
return bzrlib.xml5.serializer_v5.read_inventory_from_string(xml)
382
def get_inventory_xml(self, revision_id):
383
"""Get inventory XML as a file object."""
385
assert isinstance(revision_id, basestring), type(revision_id)
386
iw = self.get_inventory_weave()
387
return iw.get_text(iw.lookup(revision_id))
389
raise bzrlib.errors.HistoryMissing(self, 'inventory', revision_id)
392
def get_inventory_sha1(self, revision_id):
393
"""Return the sha1 hash of the inventory entry
395
return self.get_revision(revision_id).inventory_sha1
398
def get_revision_inventory(self, revision_id):
399
"""Return inventory of a past revision."""
400
# TODO: Unify this with get_inventory()
401
# bzr 0.0.6 and later imposes the constraint that the inventory_id
402
# must be the same as its revision, so this is trivial.
403
if revision_id is None:
404
# This does not make sense: if there is no revision,
405
# then it is the current tree inventory surely ?!
406
# and thus get_root_id() is something that looks at the last
407
# commit on the branch, and the get_root_id is an inventory check.
408
raise NotImplementedError
409
# return Inventory(self.get_root_id())
411
return self.get_inventory(revision_id)
415
"""Return True if this repository is flagged as a shared repository."""
416
# FIXME format 4-6 cannot be shared, this is technically faulty.
417
return self.control_files._transport.has('shared-storage')
420
def revision_tree(self, revision_id):
421
"""Return Tree for a revision on this branch.
423
`revision_id` may be None for the null revision, in which case
424
an `EmptyTree` is returned."""
425
# TODO: refactor this to use an existing revision object
426
# so we don't need to read it in twice.
427
if revision_id is None or revision_id == NULL_REVISION:
430
inv = self.get_revision_inventory(revision_id)
431
return RevisionTree(self, inv, revision_id)
434
def get_ancestry(self, revision_id):
435
"""Return a list of revision-ids integrated by a revision.
437
This is topologically sorted.
439
if revision_id is None:
441
if not self.has_revision(revision_id):
442
raise errors.NoSuchRevision(self, revision_id)
443
w = self.get_inventory_weave()
444
return [None] + map(w.idx_to_name,
445
w.inclusions([w.lookup(revision_id)]))
448
def print_file(self, file, revision_id):
449
"""Print `file` to stdout.
451
FIXME RBC 20060125 as John Meinel points out this is a bad api
452
- it writes to stdout, it assumes that that is valid etc. Fix
453
by creating a new more flexible convenience function.
455
tree = self.revision_tree(revision_id)
456
# use inventory as it was in that revision
457
file_id = tree.inventory.path2id(file)
459
raise BzrError("%r is not present in revision %s" % (file, revno))
461
revno = self.revision_id_to_revno(revision_id)
462
except errors.NoSuchRevision:
463
# TODO: This should not be BzrError,
464
# but NoSuchFile doesn't fit either
465
raise BzrError('%r is not present in revision %s'
466
% (file, revision_id))
468
raise BzrError('%r is not present in revision %s'
470
tree.print_file(file_id)
472
def get_transaction(self):
473
return self.control_files.get_transaction()
476
def set_make_working_trees(self, new_value):
477
"""Set the policy flag for making working trees when creating branches.
479
This only applies to branches that use this repository.
481
The default is 'True'.
482
:param new_value: True to restore the default, False to disable making
485
# FIXME: split out into a new class/strategy ?
486
if isinstance(self._format, (RepositoryFormat4,
489
raise NotImplementedError(self.set_make_working_trees)
492
self.control_files._transport.delete('no-working-trees')
493
except errors.NoSuchFile:
496
self.control_files.put_utf8('no-working-trees', '')
498
def make_working_trees(self):
499
"""Returns the policy for making working trees on new branches."""
500
# FIXME: split out into a new class/strategy ?
501
if isinstance(self._format, (RepositoryFormat4,
505
return not self.control_files._transport.has('no-working-trees')
508
def sign_revision(self, revision_id, gpg_strategy):
509
plaintext = Testament.from_revision(self, revision_id).as_short_text()
510
self.store_revision_signature(gpg_strategy, plaintext, revision_id)
513
class RepositoryFormat(object):
514
"""A repository format.
516
Formats provide three things:
517
* An initialization routine to construct repository data on disk.
518
* a format string which is used when the BzrDir supports versioned
520
* an open routine which returns a Repository instance.
522
Formats are placed in an dict by their format string for reference
523
during opening. These should be subclasses of RepositoryFormat
526
Once a format is deprecated, just deprecate the initialize and open
527
methods on the format class. Do not deprecate the object, as the
528
object will be created every system load.
530
Common instance attributes:
531
_matchingbzrdir - the bzrdir format that the repository format was
532
originally written to work with. This can be used if manually
533
constructing a bzrdir and repository, or more commonly for test suite
537
_default_format = None
538
"""The default format used for new repositories."""
541
"""The known formats."""
544
def find_format(klass, a_bzrdir):
545
"""Return the format for the repository object in a_bzrdir."""
547
transport = a_bzrdir.get_repository_transport(None)
548
format_string = transport.get("format").read()
549
return klass._formats[format_string]
550
except errors.NoSuchFile:
551
raise errors.NoRepositoryPresent(a_bzrdir)
553
raise errors.UnknownFormatError(format_string)
556
def get_default_format(klass):
557
"""Return the current default format."""
558
return klass._default_format
560
def get_format_string(self):
561
"""Return the ASCII format string that identifies this format.
563
Note that in pre format ?? repositories the format string is
564
not permitted nor written to disk.
566
raise NotImplementedError(self.get_format_string)
568
def initialize(self, a_bzrdir, shared=False):
569
"""Initialize a repository of this format in a_bzrdir.
571
:param a_bzrdir: The bzrdir to put the new repository in it.
572
:param shared: The repository should be initialized as a sharable one.
574
This may raise UninitializableFormat if shared repository are not
575
compatible the a_bzrdir.
578
def is_supported(self):
579
"""Is this format supported?
581
Supported formats must be initializable and openable.
582
Unsupported formats may not support initialization or committing or
583
some other features depending on the reason for not being supported.
587
def open(self, a_bzrdir, _found=False):
588
"""Return an instance of this format for the bzrdir a_bzrdir.
590
_found is a private parameter, do not use it.
593
# we are being called directly and must probe.
594
raise NotImplementedError
595
return Repository(_format=self, a_bzrdir=a_bzrdir)
598
def register_format(klass, format):
599
klass._formats[format.get_format_string()] = format
602
def set_default_format(klass, format):
603
klass._default_format = format
606
def unregister_format(klass, format):
607
assert klass._formats[format.get_format_string()] is format
608
del klass._formats[format.get_format_string()]
611
class PreSplitOutRepositoryFormat(RepositoryFormat):
612
"""Base class for the pre split out repository formats."""
614
def initialize(self, a_bzrdir, shared=False, _internal=False):
615
"""Create a weave repository.
617
TODO: when creating split out bzr branch formats, move this to a common
618
base for Format5, Format6. or something like that.
620
from bzrlib.weavefile import write_weave_v5
621
from bzrlib.weave import Weave
624
raise errors.IncompatibleFormat(self, a_bzrdir._format)
627
# always initialized when the bzrdir is.
628
return Repository(_format=self, a_bzrdir=a_bzrdir)
630
# Create an empty weave
632
bzrlib.weavefile.write_weave_v5(Weave(), sio)
633
empty_weave = sio.getvalue()
635
mutter('creating repository in %s.', a_bzrdir.transport.base)
636
dirs = ['revision-store', 'weaves']
637
lock_file = 'branch-lock'
638
files = [('inventory.weave', StringIO(empty_weave)),
641
# FIXME: RBC 20060125 dont peek under the covers
642
# NB: no need to escape relative paths that are url safe.
643
control_files = LockableFiles(a_bzrdir.transport, 'branch-lock')
644
control_files.lock_write()
645
control_files._transport.mkdir_multi(dirs,
646
mode=control_files._dir_mode)
648
for file, content in files:
649
control_files.put(file, content)
651
control_files.unlock()
652
return Repository(_format=self, a_bzrdir=a_bzrdir)
655
class RepositoryFormat4(PreSplitOutRepositoryFormat):
656
"""Bzr repository format 4.
658
This repository format has:
660
- TextStores for texts, inventories,revisions.
662
This format is deprecated: it indexes texts using a text id which is
663
removed in format 5; initializationa and write support for this format
668
super(RepositoryFormat4, self).__init__()
669
self._matchingbzrdir = bzrdir.BzrDirFormat4()
671
def initialize(self, url, shared=False, _internal=False):
672
"""Format 4 branches cannot be created."""
673
raise errors.UninitializableFormat(self)
675
def is_supported(self):
676
"""Format 4 is not supported.
678
It is not supported because the model changed from 4 to 5 and the
679
conversion logic is expensive - so doing it on the fly was not
685
class RepositoryFormat5(PreSplitOutRepositoryFormat):
686
"""Bzr control format 5.
688
This repository format has:
689
- weaves for file texts and inventory
691
- TextStores for revisions and signatures.
695
super(RepositoryFormat5, self).__init__()
696
self._matchingbzrdir = bzrdir.BzrDirFormat5()
699
class RepositoryFormat6(PreSplitOutRepositoryFormat):
700
"""Bzr control format 6.
702
This repository format has:
703
- weaves for file texts and inventory
704
- hash subdirectory based stores.
705
- TextStores for revisions and signatures.
709
super(RepositoryFormat6, self).__init__()
710
self._matchingbzrdir = bzrdir.BzrDirFormat6()
713
class RepositoryFormat7(RepositoryFormat):
716
This repository format has:
717
- weaves for file texts and inventory
718
- hash subdirectory based stores.
719
- TextStores for revisions and signatures.
720
- a format marker of its own
721
- an optional 'shared-storage' flag
724
def get_format_string(self):
725
"""See RepositoryFormat.get_format_string()."""
726
return "Bazaar-NG Repository format 7"
728
def initialize(self, a_bzrdir, shared=False):
729
"""Create a weave repository.
731
:param shared: If true the repository will be initialized as a shared
734
from bzrlib.weavefile import write_weave_v5
735
from bzrlib.weave import Weave
737
# Create an empty weave
739
bzrlib.weavefile.write_weave_v5(Weave(), sio)
740
empty_weave = sio.getvalue()
742
mutter('creating repository in %s.', a_bzrdir.transport.base)
743
dirs = ['revision-store', 'weaves']
744
files = [('inventory.weave', StringIO(empty_weave)),
746
utf8_files = [('format', self.get_format_string())]
748
# FIXME: RBC 20060125 dont peek under the covers
749
# NB: no need to escape relative paths that are url safe.
751
repository_transport = a_bzrdir.get_repository_transport(self)
752
repository_transport.put(lock_file, StringIO()) # TODO get the file mode from the bzrdir lock files., mode=file_mode)
753
control_files = LockableFiles(repository_transport, 'lock')
754
control_files.lock_write()
755
control_files._transport.mkdir_multi(dirs,
756
mode=control_files._dir_mode)
758
for file, content in files:
759
control_files.put(file, content)
760
for file, content in utf8_files:
761
control_files.put_utf8(file, content)
763
control_files.put_utf8('shared-storage', '')
765
control_files.unlock()
766
return Repository(_format=self, a_bzrdir=a_bzrdir)
769
super(RepositoryFormat7, self).__init__()
770
self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
773
# formats which have no format string are not discoverable
774
# and not independently creatable, so are not registered.
775
_default_format = RepositoryFormat7()
776
RepositoryFormat.register_format(_default_format)
777
RepositoryFormat.set_default_format(_default_format)
778
_legacy_formats = [RepositoryFormat4(),
783
class InterRepository(object):
784
"""This class represents operations taking place between two repositories.
786
Its instances have methods like copy_content and fetch, and contain
787
references to the source and target repositories these operations can be
790
Often we will provide convenience methods on 'repository' which carry out
791
operations with another repository - they will always forward to
792
InterRepository.get(other).method_name(parameters).
794
# XXX: FIXME: FUTURE: robertc
795
# testing of these probably requires a factory in optimiser type, and
796
# then a test adapter to test each type thoroughly.
800
"""The available optimised InterRepository types."""
802
def __init__(self, source, target):
803
"""Construct a default InterRepository instance. Please use 'get'.
805
Only subclasses of InterRepository should call
806
InterRepository.__init__ - clients should call InterRepository.get
807
instead which will create an optimised InterRepository if possible.
813
def copy_content(self, revision_id=None, basis=None):
814
"""Make a complete copy of the content in self into destination.
816
This is a destructive operation! Do not use it on existing
819
:param revision_id: Only copy the content needed to construct
820
revision_id and its parents.
821
:param basis: Copy the needed data preferentially from basis.
824
self.target.set_make_working_trees(self.source.make_working_trees())
825
except NotImplementedError:
827
# grab the basis available data
828
if basis is not None:
829
self.target.fetch(basis, revision_id=revision_id)
830
# but dont both fetching if we have the needed data now.
831
if (revision_id not in (None, NULL_REVISION) and
832
self.target.has_revision(revision_id)):
834
self.target.fetch(self.source, revision_id=revision_id)
836
def _double_lock(self, lock_source, lock_target):
837
"""Take out too locks, rolling back the first if the second throws."""
842
# we want to ensure that we don't leave source locked by mistake.
843
# and any error on target should not confuse source.
848
def fetch(self, revision_id=None, pb=None):
849
"""Fetch the content required to construct revision_id.
851
The content is copied from source to target.
853
:param revision_id: if None all content is copied, if NULL_REVISION no
855
:param pb: optional progress bar to use for progress reports. If not
856
provided a default one will be created.
858
Returns the copied revision count and the failed revisions in a tuple:
861
from bzrlib.fetch import RepoFetcher
862
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
863
self.source, self.source._format, self.target, self.target._format)
864
f = RepoFetcher(to_repository=self.target,
865
from_repository=self.source,
866
last_revision=revision_id,
868
return f.count_copied, f.failed_revisions
871
def get(klass, repository_source, repository_target):
872
"""Retrieve a InterRepository worker object for these repositories.
874
:param repository_source: the repository to be the 'source' member of
875
the InterRepository instance.
876
:param repository_target: the repository to be the 'target' member of
877
the InterRepository instance.
878
If an optimised InterRepository worker exists it will be used otherwise
879
a default InterRepository instance will be created.
881
for provider in klass._optimisers:
882
if provider.is_compatible(repository_source, repository_target):
883
return provider(repository_source, repository_target)
884
return InterRepository(repository_source, repository_target)
887
"""Take out a logical read lock.
889
This will lock the source branch and the target branch. The source gets
890
a read lock and the target a read lock.
892
self._double_lock(self.source.lock_read, self.target.lock_read)
894
def lock_write(self):
895
"""Take out a logical write lock.
897
This will lock the source branch and the target branch. The source gets
898
a read lock and the target a write lock.
900
self._double_lock(self.source.lock_read, self.target.lock_write)
903
def missing_revision_ids(self, revision_id=None):
904
"""Return the revision ids that source has that target does not.
906
These are returned in topological order.
908
:param revision_id: only return revision ids included by this
911
# generic, possibly worst case, slow code path.
912
target_ids = set(self.source.all_revision_ids())
913
if revision_id is not None:
914
source_ids = self.target.get_ancestry(revision_id)
915
assert source_ids.pop(0) == None
917
source_ids = self.target.all_revision_ids()
918
result_set = set(source_ids).difference(target_ids)
919
# this may look like a no-op: its not. It preserves the ordering
920
# other_ids had while only returning the members from other_ids
921
# that we've decided we need.
922
return [rev_id for rev_id in other_ids if rev_id in result_set]
925
def register_optimiser(klass, optimiser):
926
"""Register an InterRepository optimiser."""
927
klass._optimisers.add(optimiser)
930
"""Release the locks on source and target."""
937
def unregister_optimiser(klass, optimiser):
938
"""Unregister an InterRepository optimiser."""
939
klass._optimisers.remove(optimiser)
942
class InterWeaveRepo(InterRepository):
943
"""Optimised code paths between Weave based repositories."""
945
_matching_repo_format = _default_format
946
"""Repository format for testing with."""
949
def is_compatible(source, target):
950
"""Be compatible with known Weave formats.
952
We dont test for the stores being of specific types becase that
953
could lead to confusing results, and there is no need to be
957
return (isinstance(source._format, (RepositoryFormat5,
959
RepositoryFormat7)) and
960
isinstance(target._format, (RepositoryFormat5,
963
except AttributeError:
967
def copy_content(self, revision_id=None, basis=None):
968
"""See InterRepository.copy_content()."""
969
# weave specific optimised path:
970
if basis is not None:
971
# copy the basis in, then fetch remaining data.
972
basis.copy_content_into(self.target, revision_id)
973
# the basis copy_content_into could misset this.
975
self.target.set_make_working_trees(self.source.make_working_trees())
976
except NotImplementedError:
978
self.target.fetch(self.source, revision_id=revision_id)
981
self.target.set_make_working_trees(self.source.make_working_trees())
982
except NotImplementedError:
985
if self.source.control_files._transport.listable():
986
pb = bzrlib.ui.ui_factory.progress_bar()
987
copy_all(self.source.weave_store,
988
self.target.weave_store, pb=pb)
989
pb.update('copying inventory', 0, 1)
990
self.target.control_weaves.copy_multi(
991
self.source.control_weaves, ['inventory'])
992
copy_all(self.source.revision_store,
993
self.target.revision_store, pb=pb)
995
self.target.fetch(self.source, revision_id=revision_id)
998
def fetch(self, revision_id=None, pb=None):
999
"""See InterRepository.fetch()."""
1000
from bzrlib.fetch import RepoFetcher
1001
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1002
self.source, self.source._format, self.target, self.target._format)
1003
f = RepoFetcher(to_repository=self.target,
1004
from_repository=self.source,
1005
last_revision=revision_id,
1007
return f.count_copied, f.failed_revisions
1010
def missing_revision_ids(self, revision_id=None):
1011
"""See InterRepository.missing_revision_ids()."""
1012
# we want all revisions to satisfy revision_id in source.
1013
# but we dont want to stat every file here and there.
1014
# we want then, all revisions other needs to satisfy revision_id
1015
# checked, but not those that we have locally.
1016
# so the first thing is to get a subset of the revisions to
1017
# satisfy revision_id in source, and then eliminate those that
1018
# we do already have.
1019
# this is slow on high latency connection to self, but as as this
1020
# disk format scales terribly for push anyway due to rewriting
1021
# inventory.weave, this is considered acceptable.
1023
if revision_id is not None:
1024
source_ids = self.source.get_ancestry(revision_id)
1025
assert source_ids.pop(0) == None
1027
source_ids = self.source._all_possible_ids()
1028
source_ids_set = set(source_ids)
1029
# source_ids is the worst possible case we may need to pull.
1030
# now we want to filter source_ids against what we actually
1031
# have in target, but dont try to check for existence where we know
1032
# we do not have a revision as that would be pointless.
1033
target_ids = set(self.target._all_possible_ids())
1034
possibly_present_revisions = target_ids.intersection(source_ids_set)
1035
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
1036
required_revisions = source_ids_set.difference(actually_present_revisions)
1037
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1038
if revision_id is not None:
1039
# we used get_ancestry to determine source_ids then we are assured all
1040
# revisions referenced are present as they are installed in topological order.
1041
# and the tip revision was validated by get_ancestry.
1042
return required_topo_revisions
1044
# if we just grabbed the possibly available ids, then
1045
# we only have an estimate of whats available and need to validate
1046
# that against the revision records.
1047
return self.source._eliminate_revisions_not_present(required_topo_revisions)
1050
InterRepository.register_optimiser(InterWeaveRepo)
1053
class RepositoryTestProviderAdapter(object):
1054
"""A tool to generate a suite testing multiple repository formats at once.
1056
This is done by copying the test once for each transport and injecting
1057
the transport_server, transport_readonly_server, and bzrdir_format and
1058
repository_format classes into each copy. Each copy is also given a new id()
1059
to make it easy to identify.
1062
def __init__(self, transport_server, transport_readonly_server, formats):
1063
self._transport_server = transport_server
1064
self._transport_readonly_server = transport_readonly_server
1065
self._formats = formats
1067
def adapt(self, test):
1068
result = TestSuite()
1069
for repository_format, bzrdir_format in self._formats:
1070
new_test = deepcopy(test)
1071
new_test.transport_server = self._transport_server
1072
new_test.transport_readonly_server = self._transport_readonly_server
1073
new_test.bzrdir_format = bzrdir_format
1074
new_test.repository_format = repository_format
1075
def make_new_test_id():
1076
new_id = "%s(%s)" % (new_test.id(), repository_format.__class__.__name__)
1077
return lambda: new_id
1078
new_test.id = make_new_test_id()
1079
result.addTest(new_test)
1083
class InterRepositoryTestProviderAdapter(object):
1084
"""A tool to generate a suite testing multiple inter repository formats.
1086
This is done by copying the test once for each interrepo provider and injecting
1087
the transport_server, transport_readonly_server, repository_format and
1088
repository_to_format classes into each copy.
1089
Each copy is also given a new id() to make it easy to identify.
1092
def __init__(self, transport_server, transport_readonly_server, formats):
1093
self._transport_server = transport_server
1094
self._transport_readonly_server = transport_readonly_server
1095
self._formats = formats
1097
def adapt(self, test):
1098
result = TestSuite()
1099
for interrepo_class, repository_format, repository_format_to in self._formats:
1100
new_test = deepcopy(test)
1101
new_test.transport_server = self._transport_server
1102
new_test.transport_readonly_server = self._transport_readonly_server
1103
new_test.interrepo_class = interrepo_class
1104
new_test.repository_format = repository_format
1105
new_test.repository_format_to = repository_format_to
1106
def make_new_test_id():
1107
new_id = "%s(%s)" % (new_test.id(), interrepo_class.__name__)
1108
return lambda: new_id
1109
new_test.id = make_new_test_id()
1110
result.addTest(new_test)
1114
def default_test_list():
1115
"""Generate the default list of interrepo permutations to test."""
1117
# test the default InterRepository between format 6 and the current
1119
# XXX: robertc 20060220 reinstate this when there are two supported
1120
# formats which do not have an optimal code path between them.
1121
#result.append((InterRepository, RepositoryFormat6(),
1122
# RepositoryFormat.get_default_format()))
1123
for optimiser in InterRepository._optimisers:
1124
result.append((optimiser,
1125
optimiser._matching_repo_format,
1126
optimiser._matching_repo_format
1128
# if there are specific combinations we want to use, we can add them