1
# Copyright (C) 2005, 2006 Canonical Ltd
1
# Copyright (C) 2005, 2006, 2007, 2008 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
from binascii import hexlify
18
from copy import deepcopy
19
17
from cStringIO import StringIO
19
from bzrlib.lazy_import import lazy_import
20
lazy_import(globals(), """
22
from unittest import TestSuite
24
import bzrlib.bzrdir as bzrdir
39
revision as _mod_revision,
45
from bzrlib.bundle import serializer
46
from bzrlib.revisiontree import RevisionTree
47
from bzrlib.store.versioned import VersionedFileStore
48
from bzrlib.store.text import TextStore
49
from bzrlib.testament import Testament
50
from bzrlib.util import bencode
25
53
from bzrlib.decorators import needs_read_lock, needs_write_lock
26
import bzrlib.errors as errors
27
from bzrlib.errors import InvalidRevisionId
28
import bzrlib.gpg as gpg
29
from bzrlib.graph import Graph
30
54
from bzrlib.inter import InterObject
31
from bzrlib.inventory import Inventory
32
from bzrlib.knit import KnitVersionedFile, KnitPlainFactory
33
from bzrlib.lockable_files import LockableFiles, TransportLock
34
from bzrlib.lockdir import LockDir
35
from bzrlib.osutils import (safe_unicode, rand_bytes, compact_date,
37
from bzrlib.revision import NULL_REVISION, Revision
38
from bzrlib.store.versioned import VersionedFileStore, WeaveStore
39
from bzrlib.store.text import TextStore
40
from bzrlib.symbol_versioning import *
41
from bzrlib.trace import mutter, note
42
from bzrlib.tree import RevisionTree
43
from bzrlib.tsort import topo_sort
44
from bzrlib.testament import Testament
45
from bzrlib.tree import EmptyTree
47
from bzrlib.weave import WeaveFile
55
from bzrlib.inventory import Inventory, InventoryDirectory, ROOT_ID
56
from bzrlib.symbol_versioning import (
63
from bzrlib.trace import mutter, mutter_callsite, note, warning
66
# Old formats display a warning, but only once
67
_deprecation_warning_done = False
70
class CommitBuilder(object):
71
"""Provides an interface to build up a commit.
73
This allows describing a tree to be committed without needing to
74
know the internals of the format of the repository.
77
# all clients should supply tree roots.
78
record_root_entry = True
79
# the default CommitBuilder does not manage trees whose root is versioned.
80
_versioned_root = False
82
def __init__(self, repository, parents, config, timestamp=None,
83
timezone=None, committer=None, revprops=None,
85
"""Initiate a CommitBuilder.
87
:param repository: Repository to commit to.
88
:param parents: Revision ids of the parents of the new revision.
89
:param config: Configuration to use.
90
:param timestamp: Optional timestamp recorded for commit.
91
:param timezone: Optional timezone for timestamp.
92
:param committer: Optional committer to set for commit.
93
:param revprops: Optional dictionary of revision properties.
94
:param revision_id: Optional revision id.
99
self._committer = self._config.username()
101
self._committer = committer
103
self.new_inventory = Inventory(None)
104
self._new_revision_id = revision_id
105
self.parents = parents
106
self.repository = repository
109
if revprops is not None:
110
self._revprops.update(revprops)
112
if timestamp is None:
113
timestamp = time.time()
114
# Restrict resolution to 1ms
115
self._timestamp = round(timestamp, 3)
118
self._timezone = osutils.local_time_offset()
120
self._timezone = int(timezone)
122
self._generate_revision_if_needed()
123
self.__heads = graph.HeadsCache(repository.get_graph()).heads
125
def commit(self, message):
126
"""Make the actual commit.
128
:return: The revision id of the recorded revision.
130
rev = _mod_revision.Revision(
131
timestamp=self._timestamp,
132
timezone=self._timezone,
133
committer=self._committer,
135
inventory_sha1=self.inv_sha1,
136
revision_id=self._new_revision_id,
137
properties=self._revprops)
138
rev.parent_ids = self.parents
139
self.repository.add_revision(self._new_revision_id, rev,
140
self.new_inventory, self._config)
141
self.repository.commit_write_group()
142
return self._new_revision_id
145
"""Abort the commit that is being built.
147
self.repository.abort_write_group()
149
def revision_tree(self):
150
"""Return the tree that was just committed.
152
After calling commit() this can be called to get a RevisionTree
153
representing the newly committed tree. This is preferred to
154
calling Repository.revision_tree() because that may require
155
deserializing the inventory, while we already have a copy in
158
return RevisionTree(self.repository, self.new_inventory,
159
self._new_revision_id)
161
def finish_inventory(self):
162
"""Tell the builder that the inventory is finished."""
163
if self.new_inventory.root is None:
164
raise AssertionError('Root entry should be supplied to'
165
' record_entry_contents, as of bzr 0.10.')
166
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
167
self.new_inventory.revision_id = self._new_revision_id
168
self.inv_sha1 = self.repository.add_inventory(
169
self._new_revision_id,
174
def _gen_revision_id(self):
175
"""Return new revision-id."""
176
return generate_ids.gen_revision_id(self._config.username(),
179
def _generate_revision_if_needed(self):
180
"""Create a revision id if None was supplied.
182
If the repository can not support user-specified revision ids
183
they should override this function and raise CannotSetRevisionId
184
if _new_revision_id is not None.
186
:raises: CannotSetRevisionId
188
if self._new_revision_id is None:
189
self._new_revision_id = self._gen_revision_id()
190
self.random_revid = True
192
self.random_revid = False
194
def _heads(self, file_id, revision_ids):
195
"""Calculate the graph heads for revision_ids in the graph of file_id.
197
This can use either a per-file graph or a global revision graph as we
198
have an identity relationship between the two graphs.
200
return self.__heads(revision_ids)
202
def _check_root(self, ie, parent_invs, tree):
203
"""Helper for record_entry_contents.
205
:param ie: An entry being added.
206
:param parent_invs: The inventories of the parent revisions of the
208
:param tree: The tree that is being committed.
210
# In this revision format, root entries have no knit or weave When
211
# serializing out to disk and back in root.revision is always
213
ie.revision = self._new_revision_id
215
def _get_delta(self, ie, basis_inv, path):
216
"""Get a delta against the basis inventory for ie."""
217
if ie.file_id not in basis_inv:
219
return (None, path, ie.file_id, ie)
220
elif ie != basis_inv[ie.file_id]:
222
# TODO: avoid tis id2path call.
223
return (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
228
def record_entry_contents(self, ie, parent_invs, path, tree,
230
"""Record the content of ie from tree into the commit if needed.
232
Side effect: sets ie.revision when unchanged
234
:param ie: An inventory entry present in the commit.
235
:param parent_invs: The inventories of the parent revisions of the
237
:param path: The path the entry is at in the tree.
238
:param tree: The tree which contains this entry and should be used to
240
:param content_summary: Summary data from the tree about the paths
241
content - stat, length, exec, sha/link target. This is only
242
accessed when the entry has a revision of None - that is when it is
243
a candidate to commit.
244
:return: A tuple (change_delta, version_recorded). change_delta is
245
an inventory_delta change for this entry against the basis tree of
246
the commit, or None if no change occured against the basis tree.
247
version_recorded is True if a new version of the entry has been
248
recorded. For instance, committing a merge where a file was only
249
changed on the other side will return (delta, False).
251
if self.new_inventory.root is None:
252
if ie.parent_id is not None:
253
raise errors.RootMissing()
254
self._check_root(ie, parent_invs, tree)
255
if ie.revision is None:
256
kind = content_summary[0]
258
# ie is carried over from a prior commit
260
# XXX: repository specific check for nested tree support goes here - if
261
# the repo doesn't want nested trees we skip it ?
262
if (kind == 'tree-reference' and
263
not self.repository._format.supports_tree_reference):
264
# mismatch between commit builder logic and repository:
265
# this needs the entry creation pushed down into the builder.
266
raise NotImplementedError('Missing repository subtree support.')
267
self.new_inventory.add(ie)
269
# TODO: slow, take it out of the inner loop.
271
basis_inv = parent_invs[0]
273
basis_inv = Inventory(root_id=None)
275
# ie.revision is always None if the InventoryEntry is considered
276
# for committing. We may record the previous parents revision if the
277
# content is actually unchanged against a sole head.
278
if ie.revision is not None:
279
if not self._versioned_root and path == '':
280
# repositories that do not version the root set the root's
281
# revision to the new commit even when no change occurs, and
282
# this masks when a change may have occurred against the basis,
283
# so calculate if one happened.
284
if ie.file_id in basis_inv:
285
delta = (basis_inv.id2path(ie.file_id), path,
289
delta = (None, path, ie.file_id, ie)
292
# we don't need to commit this, because the caller already
293
# determined that an existing revision of this file is
294
# appropriate. If its not being considered for committing then
295
# it and all its parents to the root must be unaltered so
296
# no-change against the basis.
297
if ie.revision == self._new_revision_id:
298
raise AssertionError("Impossible situation, a skipped "
299
"inventory entry (%r) claims to be modified in this "
300
"commit (%r).", (ie, self._new_revision_id))
302
# XXX: Friction: parent_candidates should return a list not a dict
303
# so that we don't have to walk the inventories again.
304
parent_candiate_entries = ie.parent_candidates(parent_invs)
305
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())
307
for inv in parent_invs:
308
if ie.file_id in inv:
309
old_rev = inv[ie.file_id].revision
310
if old_rev in head_set:
311
heads.append(inv[ie.file_id].revision)
312
head_set.remove(inv[ie.file_id].revision)
315
# now we check to see if we need to write a new record to the
317
# We write a new entry unless there is one head to the ancestors, and
318
# the kind-derived content is unchanged.
320
# Cheapest check first: no ancestors, or more the one head in the
321
# ancestors, we write a new node.
325
# There is a single head, look it up for comparison
326
parent_entry = parent_candiate_entries[heads[0]]
327
# if the non-content specific data has changed, we'll be writing a
329
if (parent_entry.parent_id != ie.parent_id or
330
parent_entry.name != ie.name):
332
# now we need to do content specific checks:
334
# if the kind changed the content obviously has
335
if kind != parent_entry.kind:
338
if content_summary[2] is None:
339
raise ValueError("Files must not have executable = None")
341
if (# if the file length changed we have to store:
342
parent_entry.text_size != content_summary[1] or
343
# if the exec bit has changed we have to store:
344
parent_entry.executable != content_summary[2]):
346
elif parent_entry.text_sha1 == content_summary[3]:
347
# all meta and content is unchanged (using a hash cache
348
# hit to check the sha)
349
ie.revision = parent_entry.revision
350
ie.text_size = parent_entry.text_size
351
ie.text_sha1 = parent_entry.text_sha1
352
ie.executable = parent_entry.executable
353
return self._get_delta(ie, basis_inv, path), False
355
# Either there is only a hash change(no hash cache entry,
356
# or same size content change), or there is no change on
358
# Provide the parent's hash to the store layer, so that the
359
# content is unchanged we will not store a new node.
360
nostore_sha = parent_entry.text_sha1
362
# We want to record a new node regardless of the presence or
363
# absence of a content change in the file.
365
ie.executable = content_summary[2]
366
lines = tree.get_file(ie.file_id, path).readlines()
368
ie.text_sha1, ie.text_size = self._add_text_to_weave(
369
ie.file_id, lines, heads, nostore_sha)
370
except errors.ExistingContent:
371
# Turns out that the file content was unchanged, and we were
372
# only going to store a new node if it was changed. Carry over
374
ie.revision = parent_entry.revision
375
ie.text_size = parent_entry.text_size
376
ie.text_sha1 = parent_entry.text_sha1
377
ie.executable = parent_entry.executable
378
return self._get_delta(ie, basis_inv, path), False
379
elif kind == 'directory':
381
# all data is meta here, nothing specific to directory, so
383
ie.revision = parent_entry.revision
384
return self._get_delta(ie, basis_inv, path), False
386
self._add_text_to_weave(ie.file_id, lines, heads, None)
387
elif kind == 'symlink':
388
current_link_target = content_summary[3]
390
# symlink target is not generic metadata, check if it has
392
if current_link_target != parent_entry.symlink_target:
395
# unchanged, carry over.
396
ie.revision = parent_entry.revision
397
ie.symlink_target = parent_entry.symlink_target
398
return self._get_delta(ie, basis_inv, path), False
399
ie.symlink_target = current_link_target
401
self._add_text_to_weave(ie.file_id, lines, heads, None)
402
elif kind == 'tree-reference':
404
if content_summary[3] != parent_entry.reference_revision:
407
# unchanged, carry over.
408
ie.reference_revision = parent_entry.reference_revision
409
ie.revision = parent_entry.revision
410
return self._get_delta(ie, basis_inv, path), False
411
ie.reference_revision = content_summary[3]
413
self._add_text_to_weave(ie.file_id, lines, heads, None)
415
raise NotImplementedError('unknown kind')
416
ie.revision = self._new_revision_id
417
return self._get_delta(ie, basis_inv, path), True
419
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha):
420
# Note: as we read the content directly from the tree, we know its not
421
# been turned into unicode or badly split - but a broken tree
422
# implementation could give us bad output from readlines() so this is
423
# not a guarantee of safety. What would be better is always checking
424
# the content during test suite execution. RBC 20070912
425
parent_keys = tuple((file_id, parent) for parent in parents)
426
return self.repository.texts.add_lines(
427
(file_id, self._new_revision_id), parent_keys, new_lines,
428
nostore_sha=nostore_sha, random_id=self.random_revid,
429
check_content=False)[0:2]
432
class RootCommitBuilder(CommitBuilder):
433
"""This commitbuilder actually records the root id"""
435
# the root entry gets versioned properly by this builder.
436
_versioned_root = True
438
def _check_root(self, ie, parent_invs, tree):
439
"""Helper for record_entry_contents.
441
:param ie: An entry being added.
442
:param parent_invs: The inventories of the parent revisions of the
444
:param tree: The tree that is being committed.
448
######################################################################
51
451
class Repository(object):
52
452
"""Repository holding history for one or more branches.
55
455
revisions and file history. It's normally accessed only by the Branch,
56
456
which views a particular line of development through that history.
58
The Repository builds on top of Stores and a Transport, which respectively
59
describe the disk data format and the way of accessing the (possibly
458
The Repository builds on top of some byte storage facilies (the revisions,
459
signatures, inventories and texts attributes) and a Transport, which
460
respectively provide byte storage and a means to access the (possibly
463
The byte storage facilities are addressed via tuples, which we refer to
464
as 'keys' throughout the code base. Revision_keys, inventory_keys and
465
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
466
(file_id, revision_id). We use this interface because it allows low
467
friction with the underlying code that implements disk indices, network
468
encoding and other parts of bzrlib.
470
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
471
the serialised revisions for the repository. This can be used to obtain
472
revision graph information or to access raw serialised revisions.
473
The result of trying to insert data into the repository via this store
474
is undefined: it should be considered read-only except for implementors
476
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
477
the serialised signatures for the repository. This can be used to
478
obtain access to raw serialised signatures. The result of trying to
479
insert data into the repository via this store is undefined: it should
480
be considered read-only except for implementors of repositories.
481
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
482
the serialised inventories for the repository. This can be used to
483
obtain unserialised inventories. The result of trying to insert data
484
into the repository via this store is undefined: it should be
485
considered read-only except for implementors of repositories.
486
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
487
texts of files and directories for the repository. This can be used to
488
obtain file texts or file graphs. Note that Repository.iter_file_bytes
489
is usually a better interface for accessing file texts.
490
The result of trying to insert data into the repository via this store
491
is undefined: it should be considered read-only except for implementors
493
:ivar _transport: Transport for file access to repository, typically
494
pointing to .bzr/repository.
64
def add_inventory(self, revid, inv, parents):
65
"""Add the inventory inv to the repository as revid.
67
:param parents: The revision ids of the parents that revid
497
# What class to use for a CommitBuilder. Often its simpler to change this
498
# in a Repository class subclass rather than to override
499
# get_commit_builder.
500
_commit_builder_class = CommitBuilder
501
# The search regex used by xml based repositories to determine what things
502
# where changed in a single commit.
503
_file_ids_altered_regex = lazy_regex.lazy_compile(
504
r'file_id="(?P<file_id>[^"]+)"'
505
r'.* revision="(?P<revision_id>[^"]+)"'
508
def abort_write_group(self):
509
"""Commit the contents accrued within the current write group.
511
:seealso: start_write_group.
513
if self._write_group is not self.get_transaction():
514
# has an unlock or relock occured ?
515
raise errors.BzrError('mismatched lock context and write group.')
516
self._abort_write_group()
517
self._write_group = None
519
def _abort_write_group(self):
520
"""Template method for per-repository write group cleanup.
522
This is called during abort before the write group is considered to be
523
finished and should cleanup any internal state accrued during the write
524
group. There is no requirement that data handed to the repository be
525
*not* made available - this is not a rollback - but neither should any
526
attempt be made to ensure that data added is fully commited. Abort is
527
invoked when an error has occured so futher disk or network operations
528
may not be possible or may error and if possible should not be
532
def add_fallback_repository(self, repository):
533
"""Add a repository to use for looking up data not held locally.
535
:param repository: A repository.
537
if not self._format.supports_external_lookups:
538
raise errors.UnstackableRepositoryFormat(self._format, self.base)
539
self._check_fallback_repository(repository)
540
self._fallback_repositories.append(repository)
541
self.texts.add_fallback_versioned_files(repository.texts)
542
self.inventories.add_fallback_versioned_files(repository.inventories)
543
self.revisions.add_fallback_versioned_files(repository.revisions)
544
self.signatures.add_fallback_versioned_files(repository.signatures)
546
def _check_fallback_repository(self, repository):
547
"""Check that this repository can fallback to repository safely.
549
Raise an error if not.
551
:param repository: A repository to fallback to.
553
return InterRepository._assert_same_model(self, repository)
555
def add_inventory(self, revision_id, inv, parents):
556
"""Add the inventory inv to the repository as revision_id.
558
:param parents: The revision ids of the parents that revision_id
68
559
is known to have and are in the repository already.
70
returns the sha1 of the serialized inventory.
561
:returns: The validator(which is a sha1 digest, though what is sha'd is
562
repository format specific) of the serialized inventory.
72
assert inv.revision_id is None or inv.revision_id == revid, \
73
"Mismatch between inventory revision" \
74
" id and insertion revid (%r, %r)" % (inv.revision_id, revid)
75
inv_text = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
76
inv_sha1 = bzrlib.osutils.sha_string(inv_text)
77
inv_vf = self.control_weaves.get_weave('inventory',
78
self.get_transaction())
79
self._inventory_add_lines(inv_vf, revid, parents, bzrlib.osutils.split_lines(inv_text))
82
def _inventory_add_lines(self, inv_vf, revid, parents, lines):
84
for parent in parents:
86
final_parents.append(parent)
88
inv_vf.add_lines(revid, final_parents, lines)
91
def add_revision(self, rev_id, rev, inv=None, config=None):
92
"""Add rev to the revision store as rev_id.
94
:param rev_id: the revision id to use.
564
if not self.is_in_write_group():
565
raise AssertionError("%r not in write group" % (self,))
566
_mod_revision.check_not_reserved_id(revision_id)
567
if not (inv.revision_id is None or inv.revision_id == revision_id):
568
raise AssertionError(
569
"Mismatch between inventory revision"
570
" id and insertion revid (%r, %r)"
571
% (inv.revision_id, revision_id))
573
raise AssertionError()
574
inv_lines = self._serialise_inventory_to_lines(inv)
575
return self._inventory_add_lines(revision_id, parents,
576
inv_lines, check_content=False)
578
def _inventory_add_lines(self, revision_id, parents, lines,
580
"""Store lines in inv_vf and return the sha1 of the inventory."""
581
parents = [(parent,) for parent in parents]
582
return self.inventories.add_lines((revision_id,), parents, lines,
583
check_content=check_content)[0]
585
def add_revision(self, revision_id, rev, inv=None, config=None):
586
"""Add rev to the revision store as revision_id.
588
:param revision_id: the revision id to use.
95
589
:param rev: The revision object.
96
590
:param inv: The inventory for the revision. if None, it will be looked
97
591
up in the inventory storer
172
682
# the following are part of the public API for Repository:
173
683
self.bzrdir = a_bzrdir
174
684
self.control_files = control_files
175
self._revision_store = _revision_store
176
self.text_store = text_store
177
# backwards compatability
178
self.weave_store = text_store
685
self._transport = control_files._transport
686
self.base = self._transport.base
688
self._reconcile_does_inventory_gc = True
689
self._reconcile_fixes_text_parents = False
690
self._reconcile_backsup_inventory = True
179
691
# not right yet - should be more semantically clear ?
181
self.control_store = control_store
182
self.control_weaves = control_store
183
693
# TODO: make sure to construct the right store classes, etc, depending
184
694
# on whether escaping is required.
695
self._warn_if_deprecated()
696
self._write_group = None
697
# Additional places to query for data.
698
self._fallback_repositories = []
699
# What order should fetch operations request streams in?
700
# The default is unsorted as that is the cheapest for an origin to
702
self._fetch_order = 'unsorted'
703
# Does this repository use deltas that can be fetched as-deltas ?
704
# (E.g. knits, where the knit deltas can be transplanted intact.
705
# We default to False, which will ensure that enough data to get
706
# a full text out of any fetch stream will be grabbed.
707
self._fetch_uses_deltas = False
708
# Should fetch trigger a reconcile after the fetch? Only needed for
709
# some repository formats that can suffer internal inconsistencies.
710
self._fetch_reconcile = False
186
712
def __repr__(self):
187
return '%s(%r)' % (self.__class__.__name__,
188
self.bzrdir.transport.base)
713
return '%s(%r)' % (self.__class__.__name__,
716
def has_same_location(self, other):
717
"""Returns a boolean indicating if this repository is at the same
718
location as another repository.
720
This might return False even when two repository objects are accessing
721
the same physical repository via different URLs.
723
if self.__class__ is not other.__class__:
725
return (self._transport.base == other._transport.base)
727
def is_in_write_group(self):
728
"""Return True if there is an open write group.
730
:seealso: start_write_group.
732
return self._write_group is not None
190
734
def is_locked(self):
191
735
return self.control_files.is_locked()
193
def lock_write(self):
194
self.control_files.lock_write()
737
def is_write_locked(self):
738
"""Return True if this object is write locked."""
739
return self.is_locked() and self.control_files._lock_mode == 'w'
741
def lock_write(self, token=None):
742
"""Lock this repository for writing.
744
This causes caching within the repository obejct to start accumlating
745
data during reads, and allows a 'write_group' to be obtained. Write
746
groups must be used for actual data insertion.
748
:param token: if this is already locked, then lock_write will fail
749
unless the token matches the existing lock.
750
:returns: a token if this instance supports tokens, otherwise None.
751
:raises TokenLockingNotSupported: when a token is given but this
752
instance doesn't support using token locks.
753
:raises MismatchedToken: if the specified token doesn't match the token
754
of the existing lock.
755
:seealso: start_write_group.
757
A token should be passed in if you know that you have locked the object
758
some other way, and need to synchronise this object's state with that
761
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
763
result = self.control_files.lock_write(token=token)
764
for repo in self._fallback_repositories:
765
# Writes don't affect fallback repos
196
770
def lock_read(self):
197
771
self.control_files.lock_read()
772
for repo in self._fallback_repositories:
199
776
def get_physical_lock_status(self):
200
777
return self.control_files.get_physical_lock_status()
203
def missing_revision_ids(self, other, revision_id=None):
204
"""Return the revision ids that other has that this does not.
206
These are returned in topological order.
208
revision_id: only return revision ids included by revision_id.
210
return InterRepository.get(other, self).missing_revision_ids(revision_id)
779
def leave_lock_in_place(self):
780
"""Tell this repository not to release the physical lock when this
783
If lock_write doesn't return a token, then this method is not supported.
785
self.control_files.leave_in_place()
787
def dont_leave_lock_in_place(self):
788
"""Tell this repository to release the physical lock when this
789
object is unlocked, even if it didn't originally acquire it.
791
If lock_write doesn't return a token, then this method is not supported.
793
self.control_files.dont_leave_in_place()
796
def gather_stats(self, revid=None, committers=None):
797
"""Gather statistics from a revision id.
799
:param revid: The revision id to gather statistics from, if None, then
800
no revision specific statistics are gathered.
801
:param committers: Optional parameter controlling whether to grab
802
a count of committers from the revision specific statistics.
803
:return: A dictionary of statistics. Currently this contains:
804
committers: The number of committers if requested.
805
firstrev: A tuple with timestamp, timezone for the penultimate left
806
most ancestor of revid, if revid is not the NULL_REVISION.
807
latestrev: A tuple with timestamp, timezone for revid, if revid is
808
not the NULL_REVISION.
809
revisions: The total revision count in the repository.
810
size: An estimate disk size of the repository in bytes.
813
if revid and committers:
814
result['committers'] = 0
815
if revid and revid != _mod_revision.NULL_REVISION:
817
all_committers = set()
818
revisions = self.get_ancestry(revid)
819
# pop the leading None
821
first_revision = None
823
# ignore the revisions in the middle - just grab first and last
824
revisions = revisions[0], revisions[-1]
825
for revision in self.get_revisions(revisions):
826
if not first_revision:
827
first_revision = revision
829
all_committers.add(revision.committer)
830
last_revision = revision
832
result['committers'] = len(all_committers)
833
result['firstrev'] = (first_revision.timestamp,
834
first_revision.timezone)
835
result['latestrev'] = (last_revision.timestamp,
836
last_revision.timezone)
838
# now gather global repository information
839
# XXX: This is available for many repos regardless of listability.
840
if self.bzrdir.root_transport.listable():
841
# XXX: do we want to __define len__() ?
842
# Maybe the versionedfiles object should provide a different
843
# method to get the number of keys.
844
result['revisions'] = len(self.revisions.keys())
848
def find_branches(self, using=False):
849
"""Find branches underneath this repository.
851
This will include branches inside other branches.
853
:param using: If True, list only branches using this repository.
855
if using and not self.is_shared():
857
return [self.bzrdir.open_branch()]
858
except errors.NotBranchError:
860
class Evaluator(object):
863
self.first_call = True
865
def __call__(self, bzrdir):
866
# On the first call, the parameter is always the bzrdir
867
# containing the current repo.
868
if not self.first_call:
870
repository = bzrdir.open_repository()
871
except errors.NoRepositoryPresent:
874
return False, (None, repository)
875
self.first_call = False
877
value = (bzrdir.open_branch(), None)
878
except errors.NotBranchError:
883
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
884
self.bzrdir.root_transport, evaluate=Evaluator()):
885
if branch is not None:
886
branches.append(branch)
887
if not using and repository is not None:
888
branches.extend(repository.find_branches())
892
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
893
"""Return the revision ids that other has that this does not.
895
These are returned in topological order.
897
revision_id: only return revision ids included by revision_id.
899
return InterRepository.get(other, self).search_missing_revision_ids(
900
revision_id, find_ghosts)
902
@deprecated_method(one_two)
904
def missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
905
"""Return the revision ids that other has that this does not.
907
These are returned in topological order.
909
revision_id: only return revision ids included by revision_id.
911
keys = self.search_missing_revision_ids(
912
other, revision_id, find_ghosts).get_keys()
915
parents = other.get_graph().get_parent_map(keys)
918
return tsort.topo_sort(parents)
249
997
:param revprops: Optional dictionary of revision properties.
250
998
:param revision_id: Optional revision id.
252
return CommitBuilder(self, parents, config, timestamp, timezone,
253
committer, revprops, revision_id)
1000
result = self._commit_builder_class(self, parents, config,
1001
timestamp, timezone, committer, revprops, revision_id)
1002
self.start_write_group()
255
1005
def unlock(self):
1006
if (self.control_files._lock_count == 1 and
1007
self.control_files._lock_mode == 'w'):
1008
if self._write_group is not None:
1009
self.abort_write_group()
1010
self.control_files.unlock()
1011
raise errors.BzrError(
1012
'Must end write groups before releasing write locks.')
256
1013
self.control_files.unlock()
1014
for repo in self._fallback_repositories:
258
1017
@needs_read_lock
259
def clone(self, a_bzrdir, revision_id=None, basis=None):
1018
def clone(self, a_bzrdir, revision_id=None):
260
1019
"""Clone this repository into a_bzrdir using the current format.
262
1021
Currently no check is made that the format of this repository and
263
1022
the bzrdir format are compatible. FIXME RBC 20060201.
1024
:return: The newly created destination repository.
1026
# TODO: deprecate after 0.16; cloning this with all its settings is
1027
# probably not very useful -- mbp 20070423
1028
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())
1029
self.copy_content_into(dest_repo, revision_id)
1032
def start_write_group(self):
1033
"""Start a write group in the repository.
1035
Write groups are used by repositories which do not have a 1:1 mapping
1036
between file ids and backend store to manage the insertion of data from
1037
both fetch and commit operations.
1039
A write lock is required around the start_write_group/commit_write_group
1040
for the support of lock-requiring repository formats.
1042
One can only insert data into a repository inside a write group.
1046
if not self.is_write_locked():
1047
raise errors.NotWriteLocked(self)
1048
if self._write_group:
1049
raise errors.BzrError('already in a write group')
1050
self._start_write_group()
1051
# so we can detect unlock/relock - the write group is now entered.
1052
self._write_group = self.get_transaction()
1054
def _start_write_group(self):
1055
"""Template method for per-repository write group startup.
1057
This is called before the write group is considered to be
1062
def sprout(self, to_bzrdir, revision_id=None):
1063
"""Create a descendent repository for new development.
1065
Unlike clone, this does not copy the settings of the repository.
1067
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)
1068
dest_repo.fetch(self, revision_id=revision_id)
1071
def _create_sprouting_repo(self, a_bzrdir, shared):
265
1072
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):
266
1073
# use target default format.
267
result = a_bzrdir.create_repository()
268
# FIXME RBC 20060209 split out the repository type to avoid this check ?
269
elif isinstance(a_bzrdir._format,
270
(bzrlib.bzrdir.BzrDirFormat4,
271
bzrlib.bzrdir.BzrDirFormat5,
272
bzrlib.bzrdir.BzrDirFormat6)):
273
result = a_bzrdir.open_repository()
1074
dest_repo = a_bzrdir.create_repository()
275
result = self._format.initialize(a_bzrdir, shared=self.is_shared())
276
self.copy_content_into(result, revision_id, basis)
1076
# Most control formats need the repository to be specifically
1077
# created, but on some old all-in-one formats it's not needed
1079
dest_repo = self._format.initialize(a_bzrdir, shared=shared)
1080
except errors.UninitializableFormat:
1081
dest_repo = a_bzrdir.open_repository()
279
1084
@needs_read_lock
280
1085
def has_revision(self, revision_id):
281
1086
"""True if this repository has a copy of the revision."""
282
return self._revision_store.has_revision_id(revision_id,
283
self.get_transaction())
1087
return revision_id in self.has_revisions((revision_id,))
1090
def has_revisions(self, revision_ids):
1091
"""Probe to find out the presence of multiple revisions.
1093
:param revision_ids: An iterable of revision_ids.
1094
:return: A set of the revision_ids that were present.
1096
parent_map = self.revisions.get_parent_map(
1097
[(rev_id,) for rev_id in revision_ids])
1099
if _mod_revision.NULL_REVISION in revision_ids:
1100
result.add(_mod_revision.NULL_REVISION)
1101
result.update([key[0] for key in parent_map])
1105
def get_revision(self, revision_id):
1106
"""Return the Revision object for a named revision."""
1107
return self.get_revisions([revision_id])[0]
285
1109
@needs_read_lock
286
1110
def get_revision_reconcile(self, revision_id):
291
1115
be used by reconcile, or reconcile-alike commands that are correcting
292
1116
or testing the revision graph.
294
if not revision_id or not isinstance(revision_id, basestring):
295
raise InvalidRevisionId(revision_id=revision_id, branch=self)
296
return self._revision_store.get_revision(revision_id,
297
self.get_transaction())
1118
return self._get_revisions([revision_id])[0]
1121
def get_revisions(self, revision_ids):
1122
"""Get many revisions at once."""
1123
return self._get_revisions(revision_ids)
1126
def _get_revisions(self, revision_ids):
1127
"""Core work logic to get many revisions without sanity checks."""
1128
for rev_id in revision_ids:
1129
if not rev_id or not isinstance(rev_id, basestring):
1130
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1131
keys = [(key,) for key in revision_ids]
1132
stream = self.revisions.get_record_stream(keys, 'unordered', True)
1134
for record in stream:
1135
if record.storage_kind == 'absent':
1136
raise errors.NoSuchRevision(self, record.key[0])
1137
text = record.get_bytes_as('fulltext')
1138
rev = self._serializer.read_revision_from_string(text)
1139
revs[record.key[0]] = rev
1140
return [revs[revid] for revid in revision_ids]
299
1142
@needs_read_lock
300
1143
def get_revision_xml(self, revision_id):
301
rev = self.get_revision(revision_id)
1144
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1145
# would have already do it.
1146
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1147
rev = self.get_revision(revision_id)
302
1148
rev_tmp = StringIO()
303
1149
# the current serializer..
304
self._revision_store._serializer.write_revision(rev, rev_tmp)
1150
self._serializer.write_revision(rev, rev_tmp)
306
1152
return rev_tmp.getvalue()
1154
def get_deltas_for_revisions(self, revisions):
1155
"""Produce a generator of revision deltas.
1157
Note that the input is a sequence of REVISIONS, not revision_ids.
1158
Trees will be held in memory until the generator exits.
1159
Each delta is relative to the revision's lefthand predecessor.
1161
required_trees = set()
1162
for revision in revisions:
1163
required_trees.add(revision.revision_id)
1164
required_trees.update(revision.parent_ids[:1])
1165
trees = dict((t.get_revision_id(), t) for
1166
t in self.revision_trees(required_trees))
1167
for revision in revisions:
1168
if not revision.parent_ids:
1169
old_tree = self.revision_tree(None)
1171
old_tree = trees[revision.parent_ids[0]]
1172
yield trees[revision.revision_id].changes_from(old_tree)
308
1174
@needs_read_lock
309
def get_revision(self, revision_id):
310
"""Return the Revision object for a named revision"""
311
r = self.get_revision_reconcile(revision_id)
312
# weave corruption can lead to absent revision markers that should be
314
# the following test is reasonably cheap (it needs a single weave read)
315
# and the weave is cached in read transactions. In write transactions
316
# it is not cached but typically we only read a small number of
317
# revisions. For knits when they are introduced we will probably want
318
# to ensure that caching write transactions are in use.
319
inv = self.get_inventory_weave()
320
self._check_revision_parents(r, inv)
1175
def get_revision_delta(self, revision_id):
1176
"""Return the delta for one revision.
323
def _check_revision_parents(self, revision, inventory):
324
"""Private to Repository and Fetch.
326
This checks the parentage of revision in an inventory weave for
327
consistency and is only applicable to inventory-weave-for-ancestry
328
using repository formats & fetchers.
1178
The delta is relative to the left-hand predecessor of the
330
weave_parents = inventory.get_parents(revision.revision_id)
331
weave_names = inventory.versions()
332
for parent_id in revision.parent_ids:
333
if parent_id in weave_names:
334
# this parent must not be a ghost.
335
if not parent_id in weave_parents:
337
raise errors.CorruptRepository(self)
1181
r = self.get_revision(revision_id)
1182
return list(self.get_deltas_for_revisions([r]))[0]
339
1184
@needs_write_lock
340
1185
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):
341
1186
signature = gpg_strategy.sign(plaintext)
342
self._revision_store.add_revision_signature_text(revision_id,
344
self.get_transaction())
346
def fileids_altered_by_revision_ids(self, revision_ids):
347
"""Find the file ids and versions affected by revisions.
349
:param revisions: an iterable containing revision ids.
350
:return: a dictionary mapping altered file-ids to an iterable of
1187
self.add_signature_text(revision_id, signature)
1190
def add_signature_text(self, revision_id, signature):
1191
self.signatures.add_lines((revision_id,), (),
1192
osutils.split_lines(signature))
1194
def find_text_key_references(self):
1195
"""Find the text key references within the repository.
1197
:return: a dictionary mapping (file_id, revision_id) tuples to altered file-ids to an iterable of
351
1198
revision_ids. Each altered file-ids has the exact revision_ids that
352
1199
altered it listed explicitly.
354
assert isinstance(self._format, (RepositoryFormat5,
357
RepositoryFormatKnit1)), \
358
"fileid_involved only supported for branches which store inventory as unnested xml"
359
selected_revision_ids = set(revision_ids)
360
w = self.get_inventory_weave()
1200
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1201
to whether they were referred to by the inventory of the
1202
revision_id that they contain. The inventory texts from all present
1203
revision ids are assessed to generate this report.
1205
revision_keys = self.revisions.keys()
1206
w = self.inventories
1207
pb = ui.ui_factory.nested_progress_bar()
1209
return self._find_text_key_references_from_xml_inventory_lines(
1210
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1214
def _find_text_key_references_from_xml_inventory_lines(self,
1216
"""Core routine for extracting references to texts from inventories.
1218
This performs the translation of xml lines to revision ids.
1220
:param line_iterator: An iterator of lines, origin_version_id
1221
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1222
to whether they were referred to by the inventory of the
1223
revision_id that they contain. Note that if that revision_id was
1224
not part of the line_iterator's output then False will be given -
1225
even though it may actually refer to that key.
1227
if not self._serializer.support_altered_by_hack:
1228
raise AssertionError(
1229
"_find_text_key_references_from_xml_inventory_lines only "
1230
"supported for branches which store inventory as unnested xml"
1231
", not on %r" % self)
363
1234
# this code needs to read every new line in every inventory for the
364
1235
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
365
# not pesent in one of those inventories is unnecessary but not
1236
# not present in one of those inventories is unnecessary but not
366
1237
# harmful because we are filtering by the revision id marker in the
367
1238
# inventory lines : we only select file ids altered in one of those
368
# revisions. We dont need to see all lines in the inventory because
1239
# revisions. We don't need to see all lines in the inventory because
369
1240
# only those added in an inventory in rev X can contain a revision=X
371
for line in w.iter_lines_added_or_present_in_versions(selected_revision_ids):
372
start = line.find('file_id="')+9
373
if start < 9: continue
374
end = line.find('"', start)
376
file_id = _unescape_xml(line[start:end])
378
start = line.find('revision="')+10
379
if start < 10: continue
380
end = line.find('"', start)
382
revision_id = _unescape_xml(line[start:end])
383
if revision_id in selected_revision_ids:
384
result.setdefault(file_id, set()).add(revision_id)
388
def get_inventory_weave(self):
389
return self.control_weaves.get_weave('inventory',
390
self.get_transaction())
1242
unescape_revid_cache = {}
1243
unescape_fileid_cache = {}
1245
# jam 20061218 In a big fetch, this handles hundreds of thousands
1246
# of lines, so it has had a lot of inlining and optimizing done.
1247
# Sorry that it is a little bit messy.
1248
# Move several functions to be local variables, since this is a long
1250
search = self._file_ids_altered_regex.search
1251
unescape = _unescape_xml
1252
setdefault = result.setdefault
1253
for line, line_key in line_iterator:
1254
match = search(line)
1257
# One call to match.group() returning multiple items is quite a
1258
# bit faster than 2 calls to match.group() each returning 1
1259
file_id, revision_id = match.group('file_id', 'revision_id')
1261
# Inlining the cache lookups helps a lot when you make 170,000
1262
# lines and 350k ids, versus 8.4 unique ids.
1263
# Using a cache helps in 2 ways:
1264
# 1) Avoids unnecessary decoding calls
1265
# 2) Re-uses cached strings, which helps in future set and
1267
# (2) is enough that removing encoding entirely along with
1268
# the cache (so we are using plain strings) results in no
1269
# performance improvement.
1271
revision_id = unescape_revid_cache[revision_id]
1273
unescaped = unescape(revision_id)
1274
unescape_revid_cache[revision_id] = unescaped
1275
revision_id = unescaped
1277
# Note that unconditionally unescaping means that we deserialise
1278
# every fileid, which for general 'pull' is not great, but we don't
1279
# really want to have some many fulltexts that this matters anyway.
1282
file_id = unescape_fileid_cache[file_id]
1284
unescaped = unescape(file_id)
1285
unescape_fileid_cache[file_id] = unescaped
1288
key = (file_id, revision_id)
1289
setdefault(key, False)
1290
if revision_id == line_key[-1]:
1294
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1296
"""Helper routine for fileids_altered_by_revision_ids.
1298
This performs the translation of xml lines to revision ids.
1300
:param line_iterator: An iterator of lines, origin_version_id
1301
:param revision_ids: The revision ids to filter for. This should be a
1302
set or other type which supports efficient __contains__ lookups, as
1303
the revision id from each parsed line will be looked up in the
1304
revision_ids filter.
1305
:return: a dictionary mapping altered file-ids to an iterable of
1306
revision_ids. Each altered file-ids has the exact revision_ids that
1307
altered it listed explicitly.
1310
setdefault = result.setdefault
1312
self._find_text_key_references_from_xml_inventory_lines(
1313
line_iterator).iterkeys():
1314
# once data is all ensured-consistent; then this is
1315
# if revision_id == version_id
1316
if key[-1:] in revision_ids:
1317
setdefault(key[0], set()).add(key[-1])
1320
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1321
"""Find the file ids and versions affected by revisions.
1323
:param revisions: an iterable containing revision ids.
1324
:param _inv_weave: The inventory weave from this repository or None.
1325
If None, the inventory weave will be opened automatically.
1326
:return: a dictionary mapping altered file-ids to an iterable of
1327
revision_ids. Each altered file-ids has the exact revision_ids that
1328
altered it listed explicitly.
1330
selected_keys = set((revid,) for revid in revision_ids)
1331
w = _inv_weave or self.inventories
1332
pb = ui.ui_factory.nested_progress_bar()
1334
return self._find_file_ids_from_xml_inventory_lines(
1335
w.iter_lines_added_or_present_in_keys(
1336
selected_keys, pb=pb),
1341
def iter_files_bytes(self, desired_files):
1342
"""Iterate through file versions.
1344
Files will not necessarily be returned in the order they occur in
1345
desired_files. No specific order is guaranteed.
1347
Yields pairs of identifier, bytes_iterator. identifier is an opaque
1348
value supplied by the caller as part of desired_files. It should
1349
uniquely identify the file version in the caller's context. (Examples:
1350
an index number or a TreeTransform trans_id.)
1352
bytes_iterator is an iterable of bytestrings for the file. The
1353
kind of iterable and length of the bytestrings are unspecified, but for
1354
this implementation, it is a list of bytes produced by
1355
VersionedFile.get_record_stream().
1357
:param desired_files: a list of (file_id, revision_id, identifier)
1360
transaction = self.get_transaction()
1362
for file_id, revision_id, callable_data in desired_files:
1363
text_keys[(file_id, revision_id)] = callable_data
1364
for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1365
if record.storage_kind == 'absent':
1366
raise errors.RevisionNotPresent(record.key, self)
1367
yield text_keys[record.key], record.get_bytes_as('fulltext')
1369
def _generate_text_key_index(self, text_key_references=None,
1371
"""Generate a new text key index for the repository.
1373
This is an expensive function that will take considerable time to run.
1375
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1376
list of parents, also text keys. When a given key has no parents,
1377
the parents list will be [NULL_REVISION].
1379
# All revisions, to find inventory parents.
1380
if ancestors is None:
1381
graph = self.get_graph()
1382
ancestors = graph.get_parent_map(self.all_revision_ids())
1383
if text_key_references is None:
1384
text_key_references = self.find_text_key_references()
1385
pb = ui.ui_factory.nested_progress_bar()
1387
return self._do_generate_text_key_index(ancestors,
1388
text_key_references, pb)
1392
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1393
"""Helper for _generate_text_key_index to avoid deep nesting."""
1394
revision_order = tsort.topo_sort(ancestors)
1395
invalid_keys = set()
1397
for revision_id in revision_order:
1398
revision_keys[revision_id] = set()
1399
text_count = len(text_key_references)
1400
# a cache of the text keys to allow reuse; costs a dict of all the
1401
# keys, but saves a 2-tuple for every child of a given key.
1403
for text_key, valid in text_key_references.iteritems():
1405
invalid_keys.add(text_key)
1407
revision_keys[text_key[1]].add(text_key)
1408
text_key_cache[text_key] = text_key
1409
del text_key_references
1411
text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1412
NULL_REVISION = _mod_revision.NULL_REVISION
1413
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
1414
# too small for large or very branchy trees. However, for 55K path
1415
# trees, it would be easy to use too much memory trivially. Ideally we
1416
# could gauge this by looking at available real memory etc, but this is
1417
# always a tricky proposition.
1418
inventory_cache = lru_cache.LRUCache(10)
1419
batch_size = 10 # should be ~150MB on a 55K path tree
1420
batch_count = len(revision_order) / batch_size + 1
1422
pb.update("Calculating text parents.", processed_texts, text_count)
1423
for offset in xrange(batch_count):
1424
to_query = revision_order[offset * batch_size:(offset + 1) *
1428
for rev_tree in self.revision_trees(to_query):
1429
revision_id = rev_tree.get_revision_id()
1430
parent_ids = ancestors[revision_id]
1431
for text_key in revision_keys[revision_id]:
1432
pb.update("Calculating text parents.", processed_texts)
1433
processed_texts += 1
1434
candidate_parents = []
1435
for parent_id in parent_ids:
1436
parent_text_key = (text_key[0], parent_id)
1438
check_parent = parent_text_key not in \
1439
revision_keys[parent_id]
1441
# the parent parent_id is a ghost:
1442
check_parent = False
1443
# truncate the derived graph against this ghost.
1444
parent_text_key = None
1446
# look at the parent commit details inventories to
1447
# determine possible candidates in the per file graph.
1450
inv = inventory_cache[parent_id]
1452
inv = self.revision_tree(parent_id).inventory
1453
inventory_cache[parent_id] = inv
1454
parent_entry = inv._byid.get(text_key[0], None)
1455
if parent_entry is not None:
1457
text_key[0], parent_entry.revision)
1459
parent_text_key = None
1460
if parent_text_key is not None:
1461
candidate_parents.append(
1462
text_key_cache[parent_text_key])
1463
parent_heads = text_graph.heads(candidate_parents)
1464
new_parents = list(parent_heads)
1465
new_parents.sort(key=lambda x:candidate_parents.index(x))
1466
if new_parents == []:
1467
new_parents = [NULL_REVISION]
1468
text_index[text_key] = new_parents
1470
for text_key in invalid_keys:
1471
text_index[text_key] = [NULL_REVISION]
1474
def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1475
"""Get an iterable listing the keys of all the data introduced by a set
1478
The keys will be ordered so that the corresponding items can be safely
1479
fetched and inserted in that order.
1481
:returns: An iterable producing tuples of (knit-kind, file-id,
1482
versions). knit-kind is one of 'file', 'inventory', 'signatures',
1483
'revisions'. file-id is None unless knit-kind is 'file'.
1485
# XXX: it's a bit weird to control the inventory weave caching in this
1486
# generator. Ideally the caching would be done in fetch.py I think. Or
1487
# maybe this generator should explicitly have the contract that it
1488
# should not be iterated until the previously yielded item has been
1490
inv_w = self.inventories
1492
# file ids that changed
1493
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
1495
num_file_ids = len(file_ids)
1496
for file_id, altered_versions in file_ids.iteritems():
1497
if _files_pb is not None:
1498
_files_pb.update("fetch texts", count, num_file_ids)
1500
yield ("file", file_id, altered_versions)
1501
# We're done with the files_pb. Note that it finished by the caller,
1502
# just as it was created by the caller.
1506
yield ("inventory", None, revision_ids)
1509
revisions_with_signatures = set()
1510
for rev_id in revision_ids:
1512
self.get_signature_text(rev_id)
1513
except errors.NoSuchRevision:
1517
revisions_with_signatures.add(rev_id)
1518
yield ("signatures", None, revisions_with_signatures)
1521
yield ("revisions", None, revision_ids)
392
1523
@needs_read_lock
393
1524
def get_inventory(self, revision_id):
394
"""Get Inventory object by hash."""
395
return self.deserialise_inventory(
396
revision_id, self.get_inventory_xml(revision_id))
1525
"""Get Inventory object by revision id."""
1526
return self.iter_inventories([revision_id]).next()
1528
def iter_inventories(self, revision_ids):
1529
"""Get many inventories by revision_ids.
1531
This will buffer some or all of the texts used in constructing the
1532
inventories in memory, but will only parse a single inventory at a
1535
:return: An iterator of inventories.
1537
if ((None in revision_ids)
1538
or (_mod_revision.NULL_REVISION in revision_ids)):
1539
raise ValueError('cannot get null revision inventory')
1540
return self._iter_inventories(revision_ids)
1542
def _iter_inventories(self, revision_ids):
1543
"""single-document based inventory iteration."""
1544
for text, revision_id in self._iter_inventory_xmls(revision_ids):
1545
yield self.deserialise_inventory(revision_id, text)
1547
def _iter_inventory_xmls(self, revision_ids):
1548
keys = [(revision_id,) for revision_id in revision_ids]
1549
stream = self.inventories.get_record_stream(keys, 'unordered', True)
1551
for record in stream:
1552
if record.storage_kind != 'absent':
1553
texts[record.key] = record.get_bytes_as('fulltext')
1555
raise errors.NoSuchRevision(self, record.key)
1557
yield texts[key], key[-1]
398
1559
def deserialise_inventory(self, revision_id, xml):
399
1560
"""Transform the xml into an inventory object.
585
1821
@needs_read_lock
586
1822
def has_signature_for_revision_id(self, revision_id):
587
1823
"""Query for a revision signature for revision_id in the repository."""
588
return self._revision_store.has_signature(revision_id,
589
self.get_transaction())
1824
if not self.has_revision(revision_id):
1825
raise errors.NoSuchRevision(self, revision_id)
1826
sig_present = (1 == len(
1827
self.signatures.get_parent_map([(revision_id,)])))
591
1830
@needs_read_lock
592
1831
def get_signature_text(self, revision_id):
593
1832
"""Return the text for a signature."""
594
return self._revision_store.get_signature_text(revision_id,
595
self.get_transaction())
598
class AllInOneRepository(Repository):
599
"""Legacy support - the repository behaviour for all-in-one branches."""
601
def __init__(self, _format, a_bzrdir, _revision_store, control_store, text_store):
602
# we reuse one control files instance.
603
dir_mode = a_bzrdir._control_files._dir_mode
604
file_mode = a_bzrdir._control_files._file_mode
606
def get_store(name, compressed=True, prefixed=False):
607
# FIXME: This approach of assuming stores are all entirely compressed
608
# or entirely uncompressed is tidy, but breaks upgrade from
609
# some existing branches where there's a mixture; we probably
610
# still want the option to look for both.
611
relpath = a_bzrdir._control_files._escape(name)
612
store = TextStore(a_bzrdir._control_files._transport.clone(relpath),
613
prefixed=prefixed, compressed=compressed,
616
#if self._transport.should_cache():
617
# cache_path = os.path.join(self.cache_root, name)
618
# os.mkdir(cache_path)
619
# store = bzrlib.store.CachedStore(store, cache_path)
622
# not broken out yet because the controlweaves|inventory_store
623
# and text_store | weave_store bits are still different.
624
if isinstance(_format, RepositoryFormat4):
625
# cannot remove these - there is still no consistent api
626
# which allows access to this old info.
627
self.inventory_store = get_store('inventory-store')
628
text_store = get_store('text-store')
629
super(AllInOneRepository, self).__init__(_format, a_bzrdir, a_bzrdir._control_files, _revision_store, control_store, text_store)
1833
stream = self.signatures.get_record_stream([(revision_id,)],
1835
record = stream.next()
1836
if record.storage_kind == 'absent':
1837
raise errors.NoSuchRevision(self, revision_id)
1838
return record.get_bytes_as('fulltext')
631
1840
@needs_read_lock
633
"""AllInOne repositories cannot be shared."""
637
def set_make_working_trees(self, new_value):
638
"""Set the policy flag for making working trees when creating branches.
640
This only applies to branches that use this repository.
642
The default is 'True'.
643
:param new_value: True to restore the default, False to disable making
1841
def check(self, revision_ids=None):
1842
"""Check consistency of all history of given revision_ids.
1844
Different repository implementations should override _check().
1846
:param revision_ids: A non-empty list of revision_ids whose ancestry
1847
will be checked. Typically the last revision_id of a branch.
646
raise NotImplementedError(self.set_make_working_trees)
1849
return self._check(revision_ids)
1851
def _check(self, revision_ids):
1852
result = check.Check(self)
1856
def _warn_if_deprecated(self):
1857
global _deprecation_warning_done
1858
if _deprecation_warning_done:
1860
_deprecation_warning_done = True
1861
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
1862
% (self._format, self.bzrdir.transport.base))
1864
def supports_rich_root(self):
1865
return self._format.rich_root_data
1867
def _check_ascii_revisionid(self, revision_id, method):
1868
"""Private helper for ascii-only repositories."""
1869
# weave repositories refuse to store revisionids that are non-ascii.
1870
if revision_id is not None:
1871
# weaves require ascii revision ids.
1872
if isinstance(revision_id, unicode):
1874
revision_id.encode('ascii')
1875
except UnicodeEncodeError:
1876
raise errors.NonAsciiRevisionId(method, self)
1879
revision_id.decode('ascii')
1880
except UnicodeDecodeError:
1881
raise errors.NonAsciiRevisionId(method, self)
648
def make_working_trees(self):
649
"""Returns the policy for making working trees on new branches."""
1883
def revision_graph_can_have_wrong_parents(self):
1884
"""Is it possible for this repository to have a revision graph with
1887
If True, then this repository must also implement
1888
_find_inconsistent_revision_parents so that check and reconcile can
1889
check for inconsistencies before proceeding with other checks that may
1890
depend on the revision index being consistent.
1892
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)
1895
# remove these delegates a while after bzr 0.15
1896
def __make_delegated(name, from_module):
1897
def _deprecated_repository_forwarder():
1898
symbol_versioning.warn('%s moved to %s in bzr 0.15'
1899
% (name, from_module),
1902
m = __import__(from_module, globals(), locals(), [name])
1904
return getattr(m, name)
1905
except AttributeError:
1906
raise AttributeError('module %s has no name %s'
1908
globals()[name] = _deprecated_repository_forwarder
1911
'AllInOneRepository',
1912
'WeaveMetaDirRepository',
1913
'PreSplitOutRepositoryFormat',
1914
'RepositoryFormat4',
1915
'RepositoryFormat5',
1916
'RepositoryFormat6',
1917
'RepositoryFormat7',
1919
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')
1923
'RepositoryFormatKnit',
1924
'RepositoryFormatKnit1',
1926
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')
653
1929
def install_revision(repository, rev, revision_tree):
654
1930
"""Install all revision data into a repository."""
1931
install_revisions(repository, [(rev, revision_tree, None)])
1934
def install_revisions(repository, iterable, num_revisions=None, pb=None):
1935
"""Install all revision data into a repository.
1937
Accepts an iterable of revision, tree, signature tuples. The signature
1940
repository.start_write_group()
1942
for n, (revision, revision_tree, signature) in enumerate(iterable):
1943
_install_revision(repository, revision, revision_tree, signature)
1945
pb.update('Transferring revisions', n + 1, num_revisions)
1947
repository.abort_write_group()
1950
repository.commit_write_group()
1953
def _install_revision(repository, rev, revision_tree, signature):
1954
"""Install all revision data into a repository."""
655
1955
present_parents = []
656
1956
parent_trees = {}
657
1957
for p_id in rev.parent_ids:
727
self.control_files._transport.delete('no-working-trees')
2030
self._transport.delete('no-working-trees')
728
2031
except errors.NoSuchFile:
731
self.control_files.put_utf8('no-working-trees', '')
2034
self._transport.put_bytes('no-working-trees', '',
2035
mode=self.bzrdir._get_file_mode())
733
2037
def make_working_trees(self):
734
2038
"""Returns the policy for making working trees on new branches."""
735
return not self.control_files._transport.has('no-working-trees')
738
class KnitRepository(MetaDirRepository):
739
"""Knit format repository."""
741
def _inventory_add_lines(self, inv_vf, revid, parents, lines):
742
inv_vf.add_lines_with_ghosts(revid, parents, lines)
745
def all_revision_ids(self):
746
"""See Repository.all_revision_ids()."""
747
return self._revision_store.all_revision_ids(self.get_transaction())
749
def fileid_involved_between_revs(self, from_revid, to_revid):
750
"""Find file_id(s) which are involved in the changes between revisions.
752
This determines the set of revisions which are involved, and then
753
finds all file ids affected by those revisions.
755
vf = self._get_revision_vf()
756
from_set = set(vf.get_ancestry(from_revid))
757
to_set = set(vf.get_ancestry(to_revid))
758
changed = to_set.difference(from_set)
759
return self._fileid_involved_by_set(changed)
761
def fileid_involved(self, last_revid=None):
762
"""Find all file_ids modified in the ancestry of last_revid.
764
:param last_revid: If None, last_revision() will be used.
767
changed = set(self.all_revision_ids())
769
changed = set(self.get_ancestry(last_revid))
772
return self._fileid_involved_by_set(changed)
775
def get_ancestry(self, revision_id):
776
"""Return a list of revision-ids integrated by a revision.
778
This is topologically sorted.
780
if revision_id is None:
782
vf = self._get_revision_vf()
784
return [None] + vf.get_ancestry(revision_id)
785
except errors.RevisionNotPresent:
786
raise errors.NoSuchRevision(self, revision_id)
789
def get_revision(self, revision_id):
790
"""Return the Revision object for a named revision"""
791
return self.get_revision_reconcile(revision_id)
794
def get_revision_graph(self, revision_id=None):
795
"""Return a dictionary containing the revision graph.
797
:return: a dictionary of revision_id->revision_parents_list.
799
weave = self._get_revision_vf()
800
entire_graph = weave.get_graph()
801
if revision_id is None:
802
return weave.get_graph()
803
elif revision_id not in weave:
804
raise errors.NoSuchRevision(self, revision_id)
806
# add what can be reached from revision_id
808
pending = set([revision_id])
809
while len(pending) > 0:
811
result[node] = weave.get_parents(node)
812
for revision_id in result[node]:
813
if revision_id not in result:
814
pending.add(revision_id)
818
def get_revision_graph_with_ghosts(self, revision_ids=None):
819
"""Return a graph of the revisions with ghosts marked as applicable.
821
:param revision_ids: an iterable of revisions to graph or None for all.
822
:return: a Graph object with the graph reachable from revision_ids.
825
vf = self._get_revision_vf()
826
versions = set(vf.versions())
828
pending = set(self.all_revision_ids())
831
pending = set(revision_ids)
832
required = set(revision_ids)
835
revision_id = pending.pop()
836
if not revision_id in versions:
837
if revision_id in required:
838
raise errors.NoSuchRevision(self, revision_id)
840
result.add_ghost(revision_id)
841
# mark it as done so we dont try for it again.
842
done.add(revision_id)
844
parent_ids = vf.get_parents_with_ghosts(revision_id)
845
for parent_id in parent_ids:
846
# is this queued or done ?
847
if (parent_id not in pending and
848
parent_id not in done):
850
pending.add(parent_id)
851
result.add_node(revision_id, parent_ids)
852
done.add(revision_id)
855
def _get_revision_vf(self):
856
""":return: a versioned file containing the revisions."""
857
vf = self._revision_store.get_revision_file(self.get_transaction())
861
def reconcile(self, other=None, thorough=False):
862
"""Reconcile this repository."""
863
from bzrlib.reconcile import KnitReconciler
864
reconciler = KnitReconciler(self, thorough=thorough)
865
reconciler.reconcile()
2039
return not self._transport.has('no-working-trees')
2042
class MetaDirVersionedFileRepository(MetaDirRepository):
2043
"""Repositories in a meta-dir, that work via versioned file objects."""
2045
def __init__(self, _format, a_bzrdir, control_files):
2046
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2050
class RepositoryFormatRegistry(registry.Registry):
2051
"""Registry of RepositoryFormats."""
2053
def get(self, format_string):
2054
r = registry.Registry.get(self, format_string)
868
def revision_parents(self, revid):
869
return self._get_revision_vf().get_parents(rev_id)
2060
format_registry = RepositoryFormatRegistry()
2061
"""Registry of formats, indexed by their identifying format string.
2063
This can contain either format instances themselves, or classes/factories that
2064
can be called to obtain one.
2068
#####################################################################
2069
# Repository Formats
871
2071
class RepositoryFormat(object):
872
2072
"""A repository format.
1001
2212
raise NotImplementedError(self.open)
1004
def register_format(klass, format):
1005
klass._formats[format.get_format_string()] = format
1008
def set_default_format(klass, format):
1009
klass._default_format = format
1012
def unregister_format(klass, format):
1013
assert klass._formats[format.get_format_string()] is format
1014
del klass._formats[format.get_format_string()]
1017
class PreSplitOutRepositoryFormat(RepositoryFormat):
1018
"""Base class for the pre split out repository formats."""
1020
def initialize(self, a_bzrdir, shared=False, _internal=False):
1021
"""Create a weave repository.
1023
TODO: when creating split out bzr branch formats, move this to a common
1024
base for Format5, Format6. or something like that.
1026
from bzrlib.weavefile import write_weave_v5
1027
from bzrlib.weave import Weave
1030
raise errors.IncompatibleFormat(self, a_bzrdir._format)
1033
# always initialized when the bzrdir is.
1034
return self.open(a_bzrdir, _found=True)
1036
# Create an empty weave
1038
bzrlib.weavefile.write_weave_v5(Weave(), sio)
1039
empty_weave = sio.getvalue()
1041
mutter('creating repository in %s.', a_bzrdir.transport.base)
1042
dirs = ['revision-store', 'weaves']
1043
files = [('inventory.weave', StringIO(empty_weave)),
1046
# FIXME: RBC 20060125 dont peek under the covers
1047
# NB: no need to escape relative paths that are url safe.
1048
control_files = LockableFiles(a_bzrdir.transport, 'branch-lock',
1050
control_files.create_lock()
1051
control_files.lock_write()
1052
control_files._transport.mkdir_multi(dirs,
1053
mode=control_files._dir_mode)
1055
for file, content in files:
1056
control_files.put(file, content)
1058
control_files.unlock()
1059
return self.open(a_bzrdir, _found=True)
1061
def _get_control_store(self, repo_transport, control_files):
1062
"""Return the control store for this repository."""
1063
return self._get_versioned_file_store('',
1068
def _get_text_store(self, transport, control_files):
1069
"""Get a store for file texts for this format."""
1070
raise NotImplementedError(self._get_text_store)
1072
def open(self, a_bzrdir, _found=False):
1073
"""See RepositoryFormat.open()."""
1075
# we are being called directly and must probe.
1076
raise NotImplementedError
1078
repo_transport = a_bzrdir.get_repository_transport(None)
1079
control_files = a_bzrdir._control_files
1080
text_store = self._get_text_store(repo_transport, control_files)
1081
control_store = self._get_control_store(repo_transport, control_files)
1082
_revision_store = self._get_revision_store(repo_transport, control_files)
1083
return AllInOneRepository(_format=self,
1085
_revision_store=_revision_store,
1086
control_store=control_store,
1087
text_store=text_store)
1090
class RepositoryFormat4(PreSplitOutRepositoryFormat):
1091
"""Bzr repository format 4.
1093
This repository format has:
1095
- TextStores for texts, inventories,revisions.
1097
This format is deprecated: it indexes texts using a text id which is
1098
removed in format 5; initializationa and write support for this format
1103
super(RepositoryFormat4, self).__init__()
1104
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat4()
1106
def get_format_description(self):
1107
"""See RepositoryFormat.get_format_description()."""
1108
return "Repository format 4"
1110
def initialize(self, url, shared=False, _internal=False):
1111
"""Format 4 branches cannot be created."""
1112
raise errors.UninitializableFormat(self)
1114
def is_supported(self):
1115
"""Format 4 is not supported.
1117
It is not supported because the model changed from 4 to 5 and the
1118
conversion logic is expensive - so doing it on the fly was not
1123
def _get_control_store(self, repo_transport, control_files):
1124
"""Format 4 repositories have no formal control store at this point.
1126
This will cause any control-file-needing apis to fail - this is desired.
1130
def _get_revision_store(self, repo_transport, control_files):
1131
"""See RepositoryFormat._get_revision_store()."""
1132
from bzrlib.xml4 import serializer_v4
1133
return self._get_text_rev_store(repo_transport,
1136
serializer=serializer_v4)
1138
def _get_text_store(self, transport, control_files):
1139
"""See RepositoryFormat._get_text_store()."""
1142
class RepositoryFormat5(PreSplitOutRepositoryFormat):
1143
"""Bzr control format 5.
1145
This repository format has:
1146
- weaves for file texts and inventory
1148
- TextStores for revisions and signatures.
1152
super(RepositoryFormat5, self).__init__()
1153
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat5()
1155
def get_format_description(self):
1156
"""See RepositoryFormat.get_format_description()."""
1157
return "Weave repository format 5"
1159
def _get_revision_store(self, repo_transport, control_files):
1160
"""See RepositoryFormat._get_revision_store()."""
1161
"""Return the revision store object for this a_bzrdir."""
1162
return self._get_text_rev_store(repo_transport,
1167
def _get_text_store(self, transport, control_files):
1168
"""See RepositoryFormat._get_text_store()."""
1169
return self._get_versioned_file_store('weaves', transport, control_files, prefixed=False)
1172
class RepositoryFormat6(PreSplitOutRepositoryFormat):
1173
"""Bzr control format 6.
1175
This repository format has:
1176
- weaves for file texts and inventory
1177
- hash subdirectory based stores.
1178
- TextStores for revisions and signatures.
1182
super(RepositoryFormat6, self).__init__()
1183
self._matchingbzrdir = bzrlib.bzrdir.BzrDirFormat6()
1185
def get_format_description(self):
1186
"""See RepositoryFormat.get_format_description()."""
1187
return "Weave repository format 6"
1189
def _get_revision_store(self, repo_transport, control_files):
1190
"""See RepositoryFormat._get_revision_store()."""
1191
return self._get_text_rev_store(repo_transport,
1197
def _get_text_store(self, transport, control_files):
1198
"""See RepositoryFormat._get_text_store()."""
1199
return self._get_versioned_file_store('weaves', transport, control_files)
1202
2215
class MetaDirRepositoryFormat(RepositoryFormat):
1203
"""Common base class for the new repositories using the metadir layour."""
2216
"""Common base class for the new repositories using the metadir layout."""
2218
rich_root_data = False
2219
supports_tree_reference = False
2220
supports_external_lookups = False
2221
_matchingbzrdir = bzrdir.BzrDirMetaFormat1()
1205
2223
def __init__(self):
1206
2224
super(MetaDirRepositoryFormat, self).__init__()
1207
self._matchingbzrdir = bzrlib.bzrdir.BzrDirMetaFormat1()
1209
2226
def _create_control_files(self, a_bzrdir):
1210
2227
"""Create the required files and the initial control_files object."""
1211
# FIXME: RBC 20060125 dont peek under the covers
2228
# FIXME: RBC 20060125 don't peek under the covers
1212
2229
# NB: no need to escape relative paths that are url safe.
1213
2230
repository_transport = a_bzrdir.get_repository_transport(self)
1214
control_files = LockableFiles(repository_transport, 'lock', LockDir)
2231
control_files = lockable_files.LockableFiles(repository_transport,
2232
'lock', lockdir.LockDir)
1215
2233
control_files.create_lock()
1216
2234
return control_files
1219
2237
"""Upload the initial blank content."""
1220
2238
control_files = self._create_control_files(a_bzrdir)
1221
2239
control_files.lock_write()
2240
transport = control_files._transport
2242
utf8_files += [('shared-storage', '')]
1223
control_files._transport.mkdir_multi(dirs,
1224
mode=control_files._dir_mode)
1225
for file, content in files:
1226
control_files.put(file, content)
1227
for file, content in utf8_files:
1228
control_files.put_utf8(file, content)
1230
control_files.put_utf8('shared-storage', '')
2244
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())
2245
for (filename, content_stream) in files:
2246
transport.put_file(filename, content_stream,
2247
mode=a_bzrdir._get_file_mode())
2248
for (filename, content_bytes) in utf8_files:
2249
transport.put_bytes_non_atomic(filename, content_bytes,
2250
mode=a_bzrdir._get_file_mode())
1232
2252
control_files.unlock()
1235
class RepositoryFormat7(MetaDirRepositoryFormat):
1236
"""Bzr repository 7.
1238
This repository format has:
1239
- weaves for file texts and inventory
1240
- hash subdirectory based stores.
1241
- TextStores for revisions and signatures.
1242
- a format marker of its own
1243
- an optional 'shared-storage' flag
1244
- an optional 'no-working-trees' flag
1247
def _get_control_store(self, repo_transport, control_files):
1248
"""Return the control store for this repository."""
1249
return self._get_versioned_file_store('',
1254
def get_format_string(self):
1255
"""See RepositoryFormat.get_format_string()."""
1256
return "Bazaar-NG Repository format 7"
1258
def get_format_description(self):
1259
"""See RepositoryFormat.get_format_description()."""
1260
return "Weave repository format 7"
1262
def _get_revision_store(self, repo_transport, control_files):
1263
"""See RepositoryFormat._get_revision_store()."""
1264
return self._get_text_rev_store(repo_transport,
1271
def _get_text_store(self, transport, control_files):
1272
"""See RepositoryFormat._get_text_store()."""
1273
return self._get_versioned_file_store('weaves',
1277
def initialize(self, a_bzrdir, shared=False):
1278
"""Create a weave repository.
1280
:param shared: If true the repository will be initialized as a shared
1283
from bzrlib.weavefile import write_weave_v5
1284
from bzrlib.weave import Weave
1286
# Create an empty weave
1288
bzrlib.weavefile.write_weave_v5(Weave(), sio)
1289
empty_weave = sio.getvalue()
1291
mutter('creating repository in %s.', a_bzrdir.transport.base)
1292
dirs = ['revision-store', 'weaves']
1293
files = [('inventory.weave', StringIO(empty_weave)),
1295
utf8_files = [('format', self.get_format_string())]
1297
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1298
return self.open(a_bzrdir=a_bzrdir, _found=True)
1300
def open(self, a_bzrdir, _found=False, _override_transport=None):
1301
"""See RepositoryFormat.open().
1303
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1304
repository at a slightly different url
1305
than normal. I.e. during 'upgrade'.
1308
format = RepositoryFormat.find_format(a_bzrdir)
1309
assert format.__class__ == self.__class__
1310
if _override_transport is not None:
1311
repo_transport = _override_transport
1313
repo_transport = a_bzrdir.get_repository_transport(None)
1314
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1315
text_store = self._get_text_store(repo_transport, control_files)
1316
control_store = self._get_control_store(repo_transport, control_files)
1317
_revision_store = self._get_revision_store(repo_transport, control_files)
1318
return MetaDirRepository(_format=self,
1320
control_files=control_files,
1321
_revision_store=_revision_store,
1322
control_store=control_store,
1323
text_store=text_store)
1326
class RepositoryFormatKnit1(MetaDirRepositoryFormat):
1327
"""Bzr repository knit format 1.
1329
This repository format has:
1330
- knits for file texts and inventory
1331
- hash subdirectory based stores.
1332
- knits for revisions and signatures
1333
- TextStores for revisions and signatures.
1334
- a format marker of its own
1335
- an optional 'shared-storage' flag
1336
- an optional 'no-working-trees' flag
1339
This format was introduced in bzr 0.8.
1342
def _get_control_store(self, repo_transport, control_files):
1343
"""Return the control store for this repository."""
1344
return VersionedFileStore(
1347
file_mode=control_files._file_mode,
1348
versionedfile_class=KnitVersionedFile,
1349
versionedfile_kwargs={'factory':KnitPlainFactory()},
1352
def get_format_string(self):
1353
"""See RepositoryFormat.get_format_string()."""
1354
return "Bazaar-NG Knit Repository Format 1"
1356
def get_format_description(self):
1357
"""See RepositoryFormat.get_format_description()."""
1358
return "Knit repository format 1"
1360
def _get_revision_store(self, repo_transport, control_files):
1361
"""See RepositoryFormat._get_revision_store()."""
1362
from bzrlib.store.revision.knit import KnitRevisionStore
1363
versioned_file_store = VersionedFileStore(
1365
file_mode=control_files._file_mode,
1368
versionedfile_class=KnitVersionedFile,
1369
versionedfile_kwargs={'delta':False, 'factory':KnitPlainFactory()},
1372
return KnitRevisionStore(versioned_file_store)
1374
def _get_text_store(self, transport, control_files):
1375
"""See RepositoryFormat._get_text_store()."""
1376
return self._get_versioned_file_store('knits',
1379
versionedfile_class=KnitVersionedFile,
1382
def initialize(self, a_bzrdir, shared=False):
1383
"""Create a knit format 1 repository.
1385
:param a_bzrdir: bzrdir to contain the new repository; must already
1387
:param shared: If true the repository will be initialized as a shared
1390
mutter('creating repository in %s.', a_bzrdir.transport.base)
1391
dirs = ['revision-store', 'knits']
1393
utf8_files = [('format', self.get_format_string())]
1395
self._upload_blank_content(a_bzrdir, dirs, files, utf8_files, shared)
1396
repo_transport = a_bzrdir.get_repository_transport(None)
1397
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1398
control_store = self._get_control_store(repo_transport, control_files)
1399
transaction = bzrlib.transactions.WriteTransaction()
1400
# trigger a write of the inventory store.
1401
control_store.get_weave_or_empty('inventory', transaction)
1402
_revision_store = self._get_revision_store(repo_transport, control_files)
1403
_revision_store.has_revision_id('A', transaction)
1404
_revision_store.get_signature_file(transaction)
1405
return self.open(a_bzrdir=a_bzrdir, _found=True)
1407
def open(self, a_bzrdir, _found=False, _override_transport=None):
1408
"""See RepositoryFormat.open().
1410
:param _override_transport: INTERNAL USE ONLY. Allows opening the
1411
repository at a slightly different url
1412
than normal. I.e. during 'upgrade'.
1415
format = RepositoryFormat.find_format(a_bzrdir)
1416
assert format.__class__ == self.__class__
1417
if _override_transport is not None:
1418
repo_transport = _override_transport
1420
repo_transport = a_bzrdir.get_repository_transport(None)
1421
control_files = LockableFiles(repo_transport, 'lock', LockDir)
1422
text_store = self._get_text_store(repo_transport, control_files)
1423
control_store = self._get_control_store(repo_transport, control_files)
1424
_revision_store = self._get_revision_store(repo_transport, control_files)
1425
return KnitRepository(_format=self,
1427
control_files=control_files,
1428
_revision_store=_revision_store,
1429
control_store=control_store,
1430
text_store=text_store)
1433
2255
# formats which have no format string are not discoverable
1434
# and not independently creatable, so are not registered.
1435
RepositoryFormat.register_format(RepositoryFormat7())
1436
_default_format = RepositoryFormatKnit1()
1437
RepositoryFormat.register_format(_default_format)
1438
RepositoryFormat.set_default_format(_default_format)
1439
_legacy_formats = [RepositoryFormat4(),
1440
RepositoryFormat5(),
1441
RepositoryFormat6()]
2256
# and not independently creatable, so are not registered. They're
2257
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
2258
# needed, it's constructed directly by the BzrDir. Non-native formats where
2259
# the repository is not separately opened are similar.
2261
format_registry.register_lazy(
2262
'Bazaar-NG Repository format 7',
2263
'bzrlib.repofmt.weaverepo',
2267
format_registry.register_lazy(
2268
'Bazaar-NG Knit Repository Format 1',
2269
'bzrlib.repofmt.knitrepo',
2270
'RepositoryFormatKnit1',
2273
format_registry.register_lazy(
2274
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',
2275
'bzrlib.repofmt.knitrepo',
2276
'RepositoryFormatKnit3',
2279
format_registry.register_lazy(
2280
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',
2281
'bzrlib.repofmt.knitrepo',
2282
'RepositoryFormatKnit4',
2285
# Pack-based formats. There is one format for pre-subtrees, and one for
2286
# post-subtrees to allow ease of testing.
2287
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
2288
format_registry.register_lazy(
2289
'Bazaar pack repository format 1 (needs bzr 0.92)\n',
2290
'bzrlib.repofmt.pack_repo',
2291
'RepositoryFormatKnitPack1',
2293
format_registry.register_lazy(
2294
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',
2295
'bzrlib.repofmt.pack_repo',
2296
'RepositoryFormatKnitPack3',
2298
format_registry.register_lazy(
2299
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',
2300
'bzrlib.repofmt.pack_repo',
2301
'RepositoryFormatKnitPack4',
2303
format_registry.register_lazy(
2304
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',
2305
'bzrlib.repofmt.pack_repo',
2306
'RepositoryFormatKnitPack5',
2308
format_registry.register_lazy(
2309
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',
2310
'bzrlib.repofmt.pack_repo',
2311
'RepositoryFormatKnitPack5RichRoot',
2314
# Development formats.
2316
# development 0 - stub to introduce development versioning scheme.
2317
format_registry.register_lazy(
2318
"Bazaar development format 0 (needs bzr.dev from before 1.3)\n",
2319
'bzrlib.repofmt.pack_repo',
2320
'RepositoryFormatPackDevelopment0',
2322
format_registry.register_lazy(
2323
("Bazaar development format 0 with subtree support "
2324
"(needs bzr.dev from before 1.3)\n"),
2325
'bzrlib.repofmt.pack_repo',
2326
'RepositoryFormatPackDevelopment0Subtree',
2328
format_registry.register_lazy(
2329
"Bazaar development format 1 (needs bzr.dev from before 1.6)\n",
2330
'bzrlib.repofmt.pack_repo',
2331
'RepositoryFormatPackDevelopment1',
2333
format_registry.register_lazy(
2334
("Bazaar development format 1 with subtree support "
2335
"(needs bzr.dev from before 1.6)\n"),
2336
'bzrlib.repofmt.pack_repo',
2337
'RepositoryFormatPackDevelopment1Subtree',
2339
# 1.3->1.4 go below here
1444
2342
class InterRepository(InterObject):
1453
2351
InterRepository.get(other).method_name(parameters).
1457
2355
"""The available optimised InterRepository types."""
1460
def copy_content(self, revision_id=None, basis=None):
1461
"""Make a complete copy of the content in self into destination.
1463
This is a destructive operation! Do not use it on existing
1466
:param revision_id: Only copy the content needed to construct
1467
revision_id and its parents.
1468
:param basis: Copy the needed data preferentially from basis.
1471
self.target.set_make_working_trees(self.source.make_working_trees())
1472
except NotImplementedError:
1474
# grab the basis available data
1475
if basis is not None:
1476
self.target.fetch(basis, revision_id=revision_id)
1477
# but dont bother fetching if we have the needed data now.
1478
if (revision_id not in (None, NULL_REVISION) and
1479
self.target.has_revision(revision_id)):
1481
self.target.fetch(self.source, revision_id=revision_id)
1483
def _double_lock(self, lock_source, lock_target):
1484
"""Take out too locks, rolling back the first if the second throws."""
1489
# we want to ensure that we don't leave source locked by mistake.
1490
# and any error on target should not confuse source.
1491
self.source.unlock()
1495
def fetch(self, revision_id=None, pb=None):
2357
def copy_content(self, revision_id=None):
2358
raise NotImplementedError(self.copy_content)
2360
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
1496
2361
"""Fetch the content required to construct revision_id.
1498
The content is copied from source to target.
2363
The content is copied from self.source to self.target.
1500
2365
:param revision_id: if None all content is copied, if NULL_REVISION no
1501
2366
content is copied.
1502
2367
:param pb: optional progress bar to use for progress reports. If not
1503
2368
provided a default one will be created.
1505
Returns the copied revision count and the failed revisions in a tuple:
1508
from bzrlib.fetch import GenericRepoFetcher
1509
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1510
self.source, self.source._format, self.target, self.target._format)
1511
f = GenericRepoFetcher(to_repository=self.target,
1512
from_repository=self.source,
1513
last_revision=revision_id,
1515
return f.count_copied, f.failed_revisions
1517
def lock_read(self):
1518
"""Take out a logical read lock.
1520
This will lock the source branch and the target branch. The source gets
1521
a read lock and the target a read lock.
1523
self._double_lock(self.source.lock_read, self.target.lock_read)
1525
def lock_write(self):
1526
"""Take out a logical write lock.
1528
This will lock the source branch and the target branch. The source gets
1529
a read lock and the target a write lock.
1531
self._double_lock(self.source.lock_read, self.target.lock_write)
2370
:returns: (copied_revision_count, failures).
2372
# Normally we should find a specific InterRepository subclass to do
2373
# the fetch; if nothing else then at least InterSameDataRepository.
2374
# If none of them is suitable it looks like fetching is not possible;
2375
# we try to give a good message why. _assert_same_model will probably
2376
# give a helpful message; otherwise a generic one.
2377
self._assert_same_model(self.source, self.target)
2378
raise errors.IncompatibleRepositories(self.source, self.target,
2379
"no suitableInterRepository found")
2381
def _walk_to_common_revisions(self, revision_ids):
2382
"""Walk out from revision_ids in source to revisions target has.
2384
:param revision_ids: The start point for the search.
2385
:return: A set of revision ids.
2387
target_graph = self.target.get_graph()
2388
revision_ids = frozenset(revision_ids)
2389
if set(target_graph.get_parent_map(revision_ids)) == revision_ids:
2390
return graph.SearchResult(revision_ids, set(), 0, set())
2391
missing_revs = set()
2392
source_graph = self.source.get_graph()
2393
# ensure we don't pay silly lookup costs.
2394
searcher = source_graph._make_breadth_first_searcher(revision_ids)
2395
null_set = frozenset([_mod_revision.NULL_REVISION])
2398
next_revs, ghosts = searcher.next_with_ghosts()
2399
except StopIteration:
2401
if revision_ids.intersection(ghosts):
2402
absent_ids = set(revision_ids.intersection(ghosts))
2403
# If all absent_ids are present in target, no error is needed.
2404
absent_ids.difference_update(
2405
set(target_graph.get_parent_map(absent_ids)))
2407
raise errors.NoSuchRevision(self.source, absent_ids.pop())
2408
# we don't care about other ghosts as we can't fetch them and
2409
# haven't been asked to.
2410
next_revs = set(next_revs)
2411
# we always have NULL_REVISION present.
2412
have_revs = set(target_graph.get_parent_map(next_revs)).union(null_set)
2413
missing_revs.update(next_revs - have_revs)
2414
searcher.stop_searching_any(have_revs)
2415
return searcher.get_result()
2417
@deprecated_method(one_two)
1533
2418
@needs_read_lock
1534
def missing_revision_ids(self, revision_id=None):
2419
def missing_revision_ids(self, revision_id=None, find_ghosts=True):
1535
2420
"""Return the revision ids that source has that target does not.
1537
2422
These are returned in topological order.
1539
2424
:param revision_id: only return revision ids included by this
2426
:param find_ghosts: If True find missing revisions in deep history
2427
rather than just finding the surface difference.
2429
return list(self.search_missing_revision_ids(
2430
revision_id, find_ghosts).get_keys())
2433
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2434
"""Return the revision ids that source has that target does not.
2436
:param revision_id: only return revision ids included by this
2438
:param find_ghosts: If True find missing revisions in deep history
2439
rather than just finding the surface difference.
2440
:return: A bzrlib.graph.SearchResult.
2442
# stop searching at found target revisions.
2443
if not find_ghosts and revision_id is not None:
2444
return self._walk_to_common_revisions([revision_id])
1542
2445
# generic, possibly worst case, slow code path.
1543
2446
target_ids = set(self.target.all_revision_ids())
1544
2447
if revision_id is not None:
1545
2448
source_ids = self.source.get_ancestry(revision_id)
1546
assert source_ids[0] == None
2449
if source_ids[0] is not None:
2450
raise AssertionError()
1547
2451
source_ids.pop(0)
1549
2453
source_ids = self.source.all_revision_ids()
1550
2454
result_set = set(source_ids).difference(target_ids)
1551
# this may look like a no-op: its not. It preserves the ordering
1552
# other_ids had while only returning the members from other_ids
1553
# that we've decided we need.
1554
return [rev_id for rev_id in source_ids if rev_id in result_set]
1557
"""Release the locks on source and target."""
1559
self.target.unlock()
1561
self.source.unlock()
1564
class InterWeaveRepo(InterRepository):
1565
"""Optimised code paths between Weave based repositories."""
1567
_matching_repo_format = RepositoryFormat7()
1568
"""Repository format for testing with."""
2455
return self.source.revision_ids_to_search_result(result_set)
2458
def _same_model(source, target):
2459
"""True if source and target have the same data representation.
2461
Note: this is always called on the base class; overriding it in a
2462
subclass will have no effect.
2465
InterRepository._assert_same_model(source, target)
2467
except errors.IncompatibleRepositories, e:
2471
def _assert_same_model(source, target):
2472
"""Raise an exception if two repositories do not use the same model.
2474
if source.supports_rich_root() != target.supports_rich_root():
2475
raise errors.IncompatibleRepositories(source, target,
2476
"different rich-root support")
2477
if source._serializer != target._serializer:
2478
raise errors.IncompatibleRepositories(source, target,
2479
"different serializers")
2482
class InterSameDataRepository(InterRepository):
2483
"""Code for converting between repositories that represent the same data.
2485
Data format and model must match for this to work.
2489
def _get_repo_format_to_test(self):
2490
"""Repository format for testing with.
2492
InterSameData can pull from subtree to subtree and from non-subtree to
2493
non-subtree, so we test this with the richest repository format.
2495
from bzrlib.repofmt import knitrepo
2496
return knitrepo.RepositoryFormatKnit3()
2499
def is_compatible(source, target):
2500
return InterRepository._same_model(source, target)
2503
def copy_content(self, revision_id=None):
2504
"""Make a complete copy of the content in self into destination.
2506
This copies both the repository's revision data, and configuration information
2507
such as the make_working_trees setting.
2509
This is a destructive operation! Do not use it on existing
2512
:param revision_id: Only copy the content needed to construct
2513
revision_id and its parents.
2516
self.target.set_make_working_trees(self.source.make_working_trees())
2517
except NotImplementedError:
2519
# but don't bother fetching if we have the needed data now.
2520
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2521
self.target.has_revision(revision_id)):
2523
self.target.fetch(self.source, revision_id=revision_id)
2526
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2527
"""See InterRepository.fetch()."""
2528
from bzrlib.fetch import RepoFetcher
2529
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2530
self.source, self.source._format, self.target,
2531
self.target._format)
2532
f = RepoFetcher(to_repository=self.target,
2533
from_repository=self.source,
2534
last_revision=revision_id,
2535
pb=pb, find_ghosts=find_ghosts)
2536
return f.count_copied, f.failed_revisions
2539
class InterWeaveRepo(InterSameDataRepository):
2540
"""Optimised code paths between Weave based repositories.
2542
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
2543
implemented lazy inter-object optimisation.
2547
def _get_repo_format_to_test(self):
2548
from bzrlib.repofmt import weaverepo
2549
return weaverepo.RepositoryFormat7()
1571
2552
def is_compatible(source, target):
1572
2553
"""Be compatible with known Weave formats.
1574
We dont test for the stores being of specific types becase that
2555
We don't test for the stores being of specific types because that
1575
2556
could lead to confusing results, and there is no need to be
1576
2557
overly general.
2559
from bzrlib.repofmt.weaverepo import (
1579
2565
return (isinstance(source._format, (RepositoryFormat5,
1580
2566
RepositoryFormat6,
1653
2631
# - RBC 20060209
1654
2632
if revision_id is not None:
1655
2633
source_ids = self.source.get_ancestry(revision_id)
1656
assert source_ids[0] == None
2634
if source_ids[0] is not None:
2635
raise AssertionError()
1657
2636
source_ids.pop(0)
1659
2638
source_ids = self.source._all_possible_ids()
1660
2639
source_ids_set = set(source_ids)
1661
2640
# source_ids is the worst possible case we may need to pull.
1662
2641
# now we want to filter source_ids against what we actually
1663
# have in target, but dont try to check for existence where we know
2642
# have in target, but don't try to check for existence where we know
1664
2643
# we do not have a revision as that would be pointless.
1665
2644
target_ids = set(self.target._all_possible_ids())
1666
2645
possibly_present_revisions = target_ids.intersection(source_ids_set)
1667
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2646
actually_present_revisions = set(
2647
self.target._eliminate_revisions_not_present(possibly_present_revisions))
1668
2648
required_revisions = source_ids_set.difference(actually_present_revisions)
1669
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1670
2649
if revision_id is not None:
1671
2650
# we used get_ancestry to determine source_ids then we are assured all
1672
2651
# revisions referenced are present as they are installed in topological order.
1673
2652
# and the tip revision was validated by get_ancestry.
1674
return required_topo_revisions
2653
result_set = required_revisions
1676
2655
# if we just grabbed the possibly available ids, then
1677
2656
# we only have an estimate of whats available and need to validate
1678
2657
# that against the revision records.
1679
return self.source._eliminate_revisions_not_present(required_topo_revisions)
1682
class InterKnitRepo(InterRepository):
2659
self.source._eliminate_revisions_not_present(required_revisions))
2660
return self.source.revision_ids_to_search_result(result_set)
2663
class InterKnitRepo(InterSameDataRepository):
1683
2664
"""Optimised code paths between Knit based repositories."""
1685
_matching_repo_format = RepositoryFormatKnit1()
1686
"""Repository format for testing with."""
2667
def _get_repo_format_to_test(self):
2668
from bzrlib.repofmt import knitrepo
2669
return knitrepo.RepositoryFormatKnit1()
1689
2672
def is_compatible(source, target):
1690
2673
"""Be compatible with known Knit formats.
1692
We dont test for the stores being of specific types becase that
2675
We don't test for the stores being of specific types because that
1693
2676
could lead to confusing results, and there is no need to be
1694
2677
overly general.
2679
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit
1697
return (isinstance(source._format, (RepositoryFormatKnit1)) and
1698
isinstance(target._format, (RepositoryFormatKnit1)))
2681
are_knits = (isinstance(source._format, RepositoryFormatKnit) and
2682
isinstance(target._format, RepositoryFormatKnit))
1699
2683
except AttributeError:
2685
return are_knits and InterRepository._same_model(source, target)
1702
2687
@needs_write_lock
1703
def fetch(self, revision_id=None, pb=None):
2688
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
1704
2689
"""See InterRepository.fetch()."""
1705
from bzrlib.fetch import KnitRepoFetcher
2690
from bzrlib.fetch import RepoFetcher
1706
2691
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
1707
2692
self.source, self.source._format, self.target, self.target._format)
1708
f = KnitRepoFetcher(to_repository=self.target,
2693
f = RepoFetcher(to_repository=self.target,
1709
2694
from_repository=self.source,
1710
2695
last_revision=revision_id,
2696
pb=pb, find_ghosts=find_ghosts)
1712
2697
return f.count_copied, f.failed_revisions
1714
2699
@needs_read_lock
1715
def missing_revision_ids(self, revision_id=None):
2700
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
1716
2701
"""See InterRepository.missing_revision_ids()."""
1717
2702
if revision_id is not None:
1718
2703
source_ids = self.source.get_ancestry(revision_id)
1719
assert source_ids[0] == None
2704
if source_ids[0] is not None:
2705
raise AssertionError()
1720
2706
source_ids.pop(0)
1722
source_ids = self.source._all_possible_ids()
2708
source_ids = self.source.all_revision_ids()
1723
2709
source_ids_set = set(source_ids)
1724
2710
# source_ids is the worst possible case we may need to pull.
1725
2711
# now we want to filter source_ids against what we actually
1726
# have in target, but dont try to check for existence where we know
2712
# have in target, but don't try to check for existence where we know
1727
2713
# we do not have a revision as that would be pointless.
1728
target_ids = set(self.target._all_possible_ids())
2714
target_ids = set(self.target.all_revision_ids())
1729
2715
possibly_present_revisions = target_ids.intersection(source_ids_set)
1730
actually_present_revisions = set(self.target._eliminate_revisions_not_present(possibly_present_revisions))
2716
actually_present_revisions = set(
2717
self.target._eliminate_revisions_not_present(possibly_present_revisions))
1731
2718
required_revisions = source_ids_set.difference(actually_present_revisions)
1732
required_topo_revisions = [rev_id for rev_id in source_ids if rev_id in required_revisions]
1733
2719
if revision_id is not None:
1734
2720
# we used get_ancestry to determine source_ids then we are assured all
1735
2721
# revisions referenced are present as they are installed in topological order.
1736
2722
# and the tip revision was validated by get_ancestry.
1737
return required_topo_revisions
2723
result_set = required_revisions
1739
2725
# if we just grabbed the possibly available ids, then
1740
2726
# we only have an estimate of whats available and need to validate
1741
2727
# that against the revision records.
1742
return self.source._eliminate_revisions_not_present(required_topo_revisions)
2729
self.source._eliminate_revisions_not_present(required_revisions))
2730
return self.source.revision_ids_to_search_result(result_set)
2733
class InterPackRepo(InterSameDataRepository):
2734
"""Optimised code paths between Pack based repositories."""
2737
def _get_repo_format_to_test(self):
2738
from bzrlib.repofmt import pack_repo
2739
return pack_repo.RepositoryFormatKnitPack1()
2742
def is_compatible(source, target):
2743
"""Be compatible with known Pack formats.
2745
We don't test for the stores being of specific types because that
2746
could lead to confusing results, and there is no need to be
2749
from bzrlib.repofmt.pack_repo import RepositoryFormatPack
2751
are_packs = (isinstance(source._format, RepositoryFormatPack) and
2752
isinstance(target._format, RepositoryFormatPack))
2753
except AttributeError:
2755
return are_packs and InterRepository._same_model(source, target)
2758
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2759
"""See InterRepository.fetch()."""
2760
if (len(self.source._fallback_repositories) > 0 or
2761
len(self.target._fallback_repositories) > 0):
2762
# The pack layer is not aware of fallback repositories, so when
2763
# fetching from a stacked repository or into a stacked repository
2764
# we use the generic fetch logic which uses the VersionedFiles
2765
# attributes on repository.
2766
from bzrlib.fetch import RepoFetcher
2767
fetcher = RepoFetcher(self.target, self.source, revision_id,
2769
return fetcher.count_copied, fetcher.failed_revisions
2770
from bzrlib.repofmt.pack_repo import Packer
2771
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2772
self.source, self.source._format, self.target, self.target._format)
2773
self.count_copied = 0
2774
if revision_id is None:
2776
# everything to do - use pack logic
2777
# to fetch from all packs to one without
2778
# inventory parsing etc, IFF nothing to be copied is in the target.
2780
source_revision_ids = frozenset(self.source.all_revision_ids())
2781
revision_ids = source_revision_ids - \
2782
frozenset(self.target.get_parent_map(source_revision_ids))
2783
revision_keys = [(revid,) for revid in revision_ids]
2784
index = self.target._pack_collection.revision_index.combined_index
2785
present_revision_ids = set(item[1][0] for item in
2786
index.iter_entries(revision_keys))
2787
revision_ids = set(revision_ids) - present_revision_ids
2788
# implementing the TODO will involve:
2789
# - detecting when all of a pack is selected
2790
# - avoiding as much as possible pre-selection, so the
2791
# more-core routines such as create_pack_from_packs can filter in
2792
# a just-in-time fashion. (though having a HEADS list on a
2793
# repository might make this a lot easier, because we could
2794
# sensibly detect 'new revisions' without doing a full index scan.
2795
elif _mod_revision.is_null(revision_id):
2800
revision_ids = self.search_missing_revision_ids(revision_id,
2801
find_ghosts=find_ghosts).get_keys()
2802
except errors.NoSuchRevision:
2803
raise errors.InstallFailed([revision_id])
2804
if len(revision_ids) == 0:
2806
packs = self.source._pack_collection.all_packs()
2807
pack = Packer(self.target._pack_collection, packs, '.fetch',
2808
revision_ids).pack()
2809
if pack is not None:
2810
self.target._pack_collection._save_pack_names()
2811
# Trigger an autopack. This may duplicate effort as we've just done
2812
# a pack creation, but for now it is simpler to think about as
2813
# 'upload data, then repack if needed'.
2814
self.target._pack_collection.autopack()
2815
return (pack.get_revision_count(), [])
2820
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):
2821
"""See InterRepository.missing_revision_ids().
2823
:param find_ghosts: Find ghosts throughout the ancestry of
2826
if not find_ghosts and revision_id is not None:
2827
return self._walk_to_common_revisions([revision_id])
2828
elif revision_id is not None:
2829
# Find ghosts: search for revisions pointing from one repository to
2830
# the other, and vice versa, anywhere in the history of revision_id.
2831
graph = self.target.get_graph(other_repository=self.source)
2832
searcher = graph._make_breadth_first_searcher([revision_id])
2836
next_revs, ghosts = searcher.next_with_ghosts()
2837
except StopIteration:
2839
if revision_id in ghosts:
2840
raise errors.NoSuchRevision(self.source, revision_id)
2841
found_ids.update(next_revs)
2842
found_ids.update(ghosts)
2843
found_ids = frozenset(found_ids)
2844
# Double query here: should be able to avoid this by changing the
2845
# graph api further.
2846
result_set = found_ids - frozenset(
2847
self.target.get_parent_map(found_ids))
2849
source_ids = self.source.all_revision_ids()
2850
# source_ids is the worst possible case we may need to pull.
2851
# now we want to filter source_ids against what we actually
2852
# have in target, but don't try to check for existence where we know
2853
# we do not have a revision as that would be pointless.
2854
target_ids = set(self.target.all_revision_ids())
2855
result_set = set(source_ids).difference(target_ids)
2856
return self.source.revision_ids_to_search_result(result_set)
2859
class InterModel1and2(InterRepository):
2862
def _get_repo_format_to_test(self):
2866
def is_compatible(source, target):
2867
if not source.supports_rich_root() and target.supports_rich_root():
2873
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2874
"""See InterRepository.fetch()."""
2875
from bzrlib.fetch import Model1toKnit2Fetcher
2876
f = Model1toKnit2Fetcher(to_repository=self.target,
2877
from_repository=self.source,
2878
last_revision=revision_id,
2879
pb=pb, find_ghosts=find_ghosts)
2880
return f.count_copied, f.failed_revisions
2883
def copy_content(self, revision_id=None):
2884
"""Make a complete copy of the content in self into destination.
2886
This is a destructive operation! Do not use it on existing
2889
:param revision_id: Only copy the content needed to construct
2890
revision_id and its parents.
2893
self.target.set_make_working_trees(self.source.make_working_trees())
2894
except NotImplementedError:
2896
# but don't bother fetching if we have the needed data now.
2897
if (revision_id not in (None, _mod_revision.NULL_REVISION) and
2898
self.target.has_revision(revision_id)):
2900
self.target.fetch(self.source, revision_id=revision_id)
2903
class InterKnit1and2(InterKnitRepo):
2906
def _get_repo_format_to_test(self):
2910
def is_compatible(source, target):
2911
"""Be compatible with Knit1 source and Knit3 target"""
2912
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit3
2914
from bzrlib.repofmt.knitrepo import (RepositoryFormatKnit1,
2915
RepositoryFormatKnit3)
2916
from bzrlib.repofmt.pack_repo import (
2917
RepositoryFormatKnitPack1,
2918
RepositoryFormatKnitPack3,
2919
RepositoryFormatPackDevelopment0,
2920
RepositoryFormatPackDevelopment0Subtree,
2923
RepositoryFormatKnit1,
2924
RepositoryFormatKnitPack1,
2925
RepositoryFormatPackDevelopment0,
2928
RepositoryFormatKnit3,
2929
RepositoryFormatKnitPack3,
2930
RepositoryFormatPackDevelopment0Subtree,
2932
return (isinstance(source._format, nosubtrees) and
2933
isinstance(target._format, subtrees))
2934
except AttributeError:
2938
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2939
"""See InterRepository.fetch()."""
2940
from bzrlib.fetch import Knit1to2Fetcher
2941
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
2942
self.source, self.source._format, self.target,
2943
self.target._format)
2944
f = Knit1to2Fetcher(to_repository=self.target,
2945
from_repository=self.source,
2946
last_revision=revision_id,
2947
pb=pb, find_ghosts=find_ghosts)
2948
return f.count_copied, f.failed_revisions
2951
class InterDifferingSerializer(InterKnitRepo):
2954
def _get_repo_format_to_test(self):
2958
def is_compatible(source, target):
2959
"""Be compatible with Knit2 source and Knit3 target"""
2960
if source.supports_rich_root() != target.supports_rich_root():
2962
# Ideally, we'd support fetching if the source had no tree references
2963
# even if it supported them...
2964
if (getattr(source, '_format.supports_tree_reference', False) and
2965
not getattr(target, '_format.supports_tree_reference', False)):
2970
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
2971
"""See InterRepository.fetch()."""
2972
revision_ids = self.target.search_missing_revision_ids(self.source,
2973
revision_id, find_ghosts=find_ghosts).get_keys()
2974
revision_ids = tsort.topo_sort(
2975
self.source.get_graph().get_parent_map(revision_ids))
2976
def revisions_iterator():
2977
for current_revision_id in revision_ids:
2978
revision = self.source.get_revision(current_revision_id)
2979
tree = self.source.revision_tree(current_revision_id)
2981
signature = self.source.get_signature_text(
2982
current_revision_id)
2983
except errors.NoSuchRevision:
2985
yield revision, tree, signature
2987
my_pb = ui.ui_factory.nested_progress_bar()
2992
install_revisions(self.target, revisions_iterator(),
2993
len(revision_ids), pb)
2995
if my_pb is not None:
2997
return len(revision_ids), 0
3000
class InterOtherToRemote(InterRepository):
3002
def __init__(self, source, target):
3003
InterRepository.__init__(self, source, target)
3004
self._real_inter = None
3007
def is_compatible(source, target):
3008
if isinstance(target, remote.RemoteRepository):
3012
def _ensure_real_inter(self):
3013
if self._real_inter is None:
3014
self.target._ensure_real()
3015
real_target = self.target._real_repository
3016
self._real_inter = InterRepository.get(self.source, real_target)
3018
def copy_content(self, revision_id=None):
3019
self._ensure_real_inter()
3020
self._real_inter.copy_content(revision_id=revision_id)
3022
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3023
self._ensure_real_inter()
3024
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3025
find_ghosts=find_ghosts)
3028
def _get_repo_format_to_test(self):
3032
class InterRemoteToOther(InterRepository):
3034
def __init__(self, source, target):
3035
InterRepository.__init__(self, source, target)
3036
self._real_inter = None
3039
def is_compatible(source, target):
3040
if not isinstance(source, remote.RemoteRepository):
3042
# Is source's model compatible with target's model?
3043
source._ensure_real()
3044
real_source = source._real_repository
3045
if isinstance(real_source, remote.RemoteRepository):
3046
raise NotImplementedError(
3047
"We don't support remote repos backed by remote repos yet.")
3048
return InterRepository._same_model(real_source, target)
3050
def _ensure_real_inter(self):
3051
if self._real_inter is None:
3052
self.source._ensure_real()
3053
real_source = self.source._real_repository
3054
self._real_inter = InterRepository.get(real_source, self.target)
3056
def fetch(self, revision_id=None, pb=None, find_ghosts=False):
3057
self._ensure_real_inter()
3058
return self._real_inter.fetch(revision_id=revision_id, pb=pb,
3059
find_ghosts=find_ghosts)
3061
def copy_content(self, revision_id=None):
3062
self._ensure_real_inter()
3063
self._real_inter.copy_content(revision_id=revision_id)
3066
def _get_repo_format_to_test(self):
3071
InterRepository.register_optimiser(InterDifferingSerializer)
3072
InterRepository.register_optimiser(InterSameDataRepository)
1744
3073
InterRepository.register_optimiser(InterWeaveRepo)
1745
3074
InterRepository.register_optimiser(InterKnitRepo)
1748
class RepositoryTestProviderAdapter(object):
1749
"""A tool to generate a suite testing multiple repository formats at once.
1751
This is done by copying the test once for each transport and injecting
1752
the transport_server, transport_readonly_server, and bzrdir_format and
1753
repository_format classes into each copy. Each copy is also given a new id()
1754
to make it easy to identify.
1757
def __init__(self, transport_server, transport_readonly_server, formats):
1758
self._transport_server = transport_server
1759
self._transport_readonly_server = transport_readonly_server
1760
self._formats = formats
1762
def adapt(self, test):
1763
result = TestSuite()
1764
for repository_format, bzrdir_format in self._formats:
1765
new_test = deepcopy(test)
1766
new_test.transport_server = self._transport_server
1767
new_test.transport_readonly_server = self._transport_readonly_server
1768
new_test.bzrdir_format = bzrdir_format
1769
new_test.repository_format = repository_format
1770
def make_new_test_id():
1771
new_id = "%s(%s)" % (new_test.id(), repository_format.__class__.__name__)
1772
return lambda: new_id
1773
new_test.id = make_new_test_id()
1774
result.addTest(new_test)
1778
class InterRepositoryTestProviderAdapter(object):
1779
"""A tool to generate a suite testing multiple inter repository formats.
1781
This is done by copying the test once for each interrepo provider and injecting
1782
the transport_server, transport_readonly_server, repository_format and
1783
repository_to_format classes into each copy.
1784
Each copy is also given a new id() to make it easy to identify.
1787
def __init__(self, transport_server, transport_readonly_server, formats):
1788
self._transport_server = transport_server
1789
self._transport_readonly_server = transport_readonly_server
1790
self._formats = formats
1792
def adapt(self, test):
1793
result = TestSuite()
1794
for interrepo_class, repository_format, repository_format_to in self._formats:
1795
new_test = deepcopy(test)
1796
new_test.transport_server = self._transport_server
1797
new_test.transport_readonly_server = self._transport_readonly_server
1798
new_test.interrepo_class = interrepo_class
1799
new_test.repository_format = repository_format
1800
new_test.repository_format_to = repository_format_to
1801
def make_new_test_id():
1802
new_id = "%s(%s)" % (new_test.id(), interrepo_class.__name__)
1803
return lambda: new_id
1804
new_test.id = make_new_test_id()
1805
result.addTest(new_test)
1809
def default_test_list():
1810
"""Generate the default list of interrepo permutations to test."""
1812
# test the default InterRepository between format 6 and the current
1814
# XXX: robertc 20060220 reinstate this when there are two supported
1815
# formats which do not have an optimal code path between them.
1816
result.append((InterRepository,
1817
RepositoryFormat6(),
1818
RepositoryFormatKnit1()))
1819
for optimiser in InterRepository._optimisers:
1820
result.append((optimiser,
1821
optimiser._matching_repo_format,
1822
optimiser._matching_repo_format
1824
# if there are specific combinations we want to use, we can add them
3075
InterRepository.register_optimiser(InterModel1and2)
3076
InterRepository.register_optimiser(InterKnit1and2)
3077
InterRepository.register_optimiser(InterPackRepo)
3078
InterRepository.register_optimiser(InterOtherToRemote)
3079
InterRepository.register_optimiser(InterRemoteToOther)
1829
3082
class CopyConverter(object):
1877
3131
self.pb.update(message, self.count, self.total)
1880
class CommitBuilder(object):
1881
"""Provides an interface to build up a commit.
1883
This allows describing a tree to be committed without needing to
1884
know the internals of the format of the repository.
1886
def __init__(self, repository, parents, config, timestamp=None,
1887
timezone=None, committer=None, revprops=None,
1889
"""Initiate a CommitBuilder.
1891
:param repository: Repository to commit to.
1892
:param parents: Revision ids of the parents of the new revision.
1893
:param config: Configuration to use.
1894
:param timestamp: Optional timestamp recorded for commit.
1895
:param timezone: Optional timezone for timestamp.
1896
:param committer: Optional committer to set for commit.
1897
:param revprops: Optional dictionary of revision properties.
1898
:param revision_id: Optional revision id.
1900
self._config = config
1902
if committer is None:
1903
self._committer = self._config.username()
1905
assert isinstance(committer, basestring), type(committer)
1906
self._committer = committer
1908
self.new_inventory = Inventory()
1909
self._new_revision_id = revision_id
1910
self.parents = parents
1911
self.repository = repository
1914
if revprops is not None:
1915
self._revprops.update(revprops)
1917
if timestamp is None:
1918
self._timestamp = time.time()
1920
self._timestamp = long(timestamp)
1922
if timezone is None:
1923
self._timezone = local_time_offset()
1925
self._timezone = int(timezone)
1927
self._generate_revision_if_needed()
1929
def commit(self, message):
1930
"""Make the actual commit.
1932
:return: The revision id of the recorded revision.
1934
rev = Revision(timestamp=self._timestamp,
1935
timezone=self._timezone,
1936
committer=self._committer,
1938
inventory_sha1=self.inv_sha1,
1939
revision_id=self._new_revision_id,
1940
properties=self._revprops)
1941
rev.parent_ids = self.parents
1942
self.repository.add_revision(self._new_revision_id, rev,
1943
self.new_inventory, self._config)
1944
return self._new_revision_id
1946
def finish_inventory(self):
1947
"""Tell the builder that the inventory is finished."""
1948
self.new_inventory.revision_id = self._new_revision_id
1949
self.inv_sha1 = self.repository.add_inventory(
1950
self._new_revision_id,
1955
def _gen_revision_id(self):
1956
"""Return new revision-id."""
1957
s = '%s-%s-' % (self._config.user_email(),
1958
compact_date(self._timestamp))
1959
s += hexlify(rand_bytes(8))
1962
def _generate_revision_if_needed(self):
1963
"""Create a revision id if None was supplied.
1965
If the repository can not support user-specified revision ids
1966
they should override this function and raise UnsupportedOperation
1967
if _new_revision_id is not None.
1969
:raises: UnsupportedOperation
1971
if self._new_revision_id is None:
1972
self._new_revision_id = self._gen_revision_id()
1974
def record_entry_contents(self, ie, parent_invs, path, tree):
1975
"""Record the content of ie from tree into the commit if needed.
1977
:param ie: An inventory entry present in the commit.
1978
:param parent_invs: The inventories of the parent revisions of the
1980
:param path: The path the entry is at in the tree.
1981
:param tree: The tree which contains this entry and should be used to
1984
self.new_inventory.add(ie)
1986
# ie.revision is always None if the InventoryEntry is considered
1987
# for committing. ie.snapshot will record the correct revision
1988
# which may be the sole parent if it is untouched.
1989
if ie.revision is not None:
1991
previous_entries = ie.find_previous_heads(
1993
self.repository.weave_store,
1994
self.repository.get_transaction())
1995
# we are creating a new revision for ie in the history store
1997
ie.snapshot(self._new_revision_id, path, previous_entries, tree, self)
1999
def modified_directory(self, file_id, file_parents):
2000
"""Record the presence of a symbolic link.
2002
:param file_id: The file_id of the link to record.
2003
:param file_parents: The per-file parent revision ids.
2005
self._add_text_to_weave(file_id, [], file_parents.keys())
2007
def modified_file_text(self, file_id, file_parents,
2008
get_content_byte_lines, text_sha1=None,
2010
"""Record the text of file file_id
2012
:param file_id: The file_id of the file to record the text of.
2013
:param file_parents: The per-file parent revision ids.
2014
:param get_content_byte_lines: A callable which will return the byte
2016
:param text_sha1: Optional SHA1 of the file contents.
2017
:param text_size: Optional size of the file contents.
2019
mutter('storing text of file {%s} in revision {%s} into %r',
2020
file_id, self._new_revision_id, self.repository.weave_store)
2021
# special case to avoid diffing on renames or
2023
if (len(file_parents) == 1
2024
and text_sha1 == file_parents.values()[0].text_sha1
2025
and text_size == file_parents.values()[0].text_size):
2026
previous_ie = file_parents.values()[0]
2027
versionedfile = self.repository.weave_store.get_weave(file_id,
2028
self.repository.get_transaction())
2029
versionedfile.clone_text(self._new_revision_id,
2030
previous_ie.revision, file_parents.keys())
2031
return text_sha1, text_size
2033
new_lines = get_content_byte_lines()
2034
# TODO: Rather than invoking sha_strings here, _add_text_to_weave
2035
# should return the SHA1 and size
2036
self._add_text_to_weave(file_id, new_lines, file_parents.keys())
2037
return bzrlib.osutils.sha_strings(new_lines), \
2038
sum(map(len, new_lines))
2040
def modified_link(self, file_id, file_parents, link_target):
2041
"""Record the presence of a symbolic link.
2043
:param file_id: The file_id of the link to record.
2044
:param file_parents: The per-file parent revision ids.
2045
:param link_target: Target location of this link.
2047
self._add_text_to_weave(file_id, [], file_parents.keys())
2049
def _add_text_to_weave(self, file_id, new_lines, parents):
2050
versionedfile = self.repository.weave_store.get_weave_or_empty(
2051
file_id, self.repository.get_transaction())
2052
versionedfile.add_lines(self._new_revision_id, parents, new_lines)
2053
versionedfile.clear_cache()
2056
# Copied from xml.sax.saxutils
3143
def _unescaper(match, _map=_unescape_map):
3144
code = match.group(1)
3148
if not code.startswith('#'):
3150
return unichr(int(code[1:])).encode('utf8')
2057
3156
def _unescape_xml(data):
2058
"""Unescape &, <, and > in a string of data.
2060
data = data.replace("<", "<")
2061
data = data.replace(">", ">")
2062
# must do ampersand last
2063
return data.replace("&", "&")
3157
"""Unescape predefined XML entities in a string of data."""
3159
if _unescape_re is None:
3160
_unescape_re = re.compile('\&([^;]*);')
3161
return _unescape_re.sub(_unescaper, data)
3164
class _VersionedFileChecker(object):
3166
def __init__(self, repository):
3167
self.repository = repository
3168
self.text_index = self.repository._generate_text_key_index()
3170
def calculate_file_version_parents(self, text_key):
3171
"""Calculate the correct parents for a file version according to
3174
parent_keys = self.text_index[text_key]
3175
if parent_keys == [_mod_revision.NULL_REVISION]:
3177
return tuple(parent_keys)
3179
def check_file_version_parents(self, texts, progress_bar=None):
3180
"""Check the parents stored in a versioned file are correct.
3182
It also detects file versions that are not referenced by their
3183
corresponding revision's inventory.
3185
:returns: A tuple of (wrong_parents, dangling_file_versions).
3186
wrong_parents is a dict mapping {revision_id: (stored_parents,
3187
correct_parents)} for each revision_id where the stored parents
3188
are not correct. dangling_file_versions is a set of (file_id,
3189
revision_id) tuples for versions that are present in this versioned
3190
file, but not used by the corresponding inventory.
3193
self.file_ids = set([file_id for file_id, _ in
3194
self.text_index.iterkeys()])
3195
# text keys is now grouped by file_id
3196
n_weaves = len(self.file_ids)
3197
files_in_revisions = {}
3198
revisions_of_files = {}
3199
n_versions = len(self.text_index)
3200
progress_bar.update('loading text store', 0, n_versions)
3201
parent_map = self.repository.texts.get_parent_map(self.text_index)
3202
# On unlistable transports this could well be empty/error...
3203
text_keys = self.repository.texts.keys()
3204
unused_keys = frozenset(text_keys) - set(self.text_index)
3205
for num, key in enumerate(self.text_index.iterkeys()):
3206
if progress_bar is not None:
3207
progress_bar.update('checking text graph', num, n_versions)
3208
correct_parents = self.calculate_file_version_parents(key)
3210
knit_parents = parent_map[key]
3211
except errors.RevisionNotPresent:
3214
if correct_parents != knit_parents:
3215
wrong_parents[key] = (knit_parents, correct_parents)
3216
return wrong_parents, unused_keys
3219
def _old_get_graph(repository, revision_id):
3220
"""DO NOT USE. That is all. I'm serious."""
3221
graph = repository.get_graph()
3222
revision_graph = dict(((key, value) for key, value in
3223
graph.iter_ancestry([revision_id]) if value is not None))
3224
return _strip_NULL_ghosts(revision_graph)
3227
def _strip_NULL_ghosts(revision_graph):
3228
"""Also don't use this. more compatibility code for unmigrated clients."""
3229
# Filter ghosts, and null:
3230
if _mod_revision.NULL_REVISION in revision_graph:
3231
del revision_graph[_mod_revision.NULL_REVISION]
3232
for key, parents in revision_graph.items():
3233
revision_graph[key] = tuple(parent for parent in parents if parent
3235
return revision_graph