bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
4110.2.1
by Martin Pool
Remove duplicated code from InterRepository implementations. |
1 |
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
2 |
#
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
3 |
# This program is free software; you can redistribute it and/or modify
|
4 |
# it under the terms of the GNU General Public License as published by
|
|
5 |
# the Free Software Foundation; either version 2 of the License, or
|
|
6 |
# (at your option) any later version.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
7 |
#
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
8 |
# This program is distributed in the hope that it will be useful,
|
9 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
10 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
11 |
# GNU General Public License for more details.
|
|
1887.1.1
by Adeodato Simó
Do not separate paragraphs in the copyright statement with blank lines, |
12 |
#
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
13 |
# You should have received a copy of the GNU General Public License
|
14 |
# along with this program; if not, write to the Free Software
|
|
4183.7.1
by Sabin Iacob
update FSF mailing address |
15 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
1185.65.10
by Robert Collins
Rename Controlfiles to LockableFiles. |
16 |
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
17 |
from bzrlib.lazy_import import lazy_import |
18 |
lazy_import(globals(), """ |
|
1740.3.7
by Jelmer Vernooij
Move committer, log, revprops, timestamp and timezone to CommitBuilder. |
19 |
import re
|
20 |
import time
|
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
21 |
|
1910.2.22
by Aaron Bentley
Make commits preserve root entry data |
22 |
from bzrlib import (
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
23 |
bzrdir,
|
24 |
check,
|
|
2745.1.1
by Robert Collins
Add a number of -Devil checkpoints. |
25 |
debug,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
26 |
errors,
|
3882.6.23
by John Arbash Meinel
Change the XMLSerializer.read_inventory_from_string api. |
27 |
fifo_cache,
|
2116.4.1
by John Arbash Meinel
Update file and revision id generators. |
28 |
generate_ids,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
29 |
gpg,
|
30 |
graph,
|
|
2163.2.1
by John Arbash Meinel
Speed up the fileids_altered_by_revision_ids processing |
31 |
lazy_regex,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
32 |
lockable_files,
|
33 |
lockdir,
|
|
2988.1.5
by Robert Collins
Use a LRU cache when generating the text index to reduce inventory deserialisations. |
34 |
lru_cache,
|
1910.2.22
by Aaron Bentley
Make commits preserve root entry data |
35 |
osutils,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
36 |
revision as _mod_revision,
|
37 |
symbol_versioning,
|
|
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
38 |
tsort,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
39 |
ui,
|
3831.2.1
by Andrew Bennetts
Quick hack to do batching in InterDifferingSerializer. Almost halves the HPSS round-trips fetching pack-0.92-subtree to 1.9-rich-root. |
40 |
versionedfile,
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
41 |
)
|
2520.4.54
by Aaron Bentley
Hang a create_bundle method off repository |
42 |
from bzrlib.bundle import serializer
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
43 |
from bzrlib.revisiontree import RevisionTree
|
44 |
from bzrlib.store.versioned import VersionedFileStore
|
|
45 |
from bzrlib.testament import Testament
|
|
46 |
""") |
|
47 |
||
1534.4.28
by Robert Collins
first cut at merge from integration. |
48 |
from bzrlib.decorators import needs_read_lock, needs_write_lock |
1563.2.12
by Robert Collins
Checkpointing: created InterObject to factor out common inter object worker code, added InterVersionedFile and tests to allow making join work between any versionedfile. |
49 |
from bzrlib.inter import InterObject |
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
50 |
from bzrlib.inventory import ( |
51 |
Inventory, |
|
52 |
InventoryDirectory, |
|
53 |
ROOT_ID, |
|
54 |
entry_factory, |
|
55 |
)
|
|
4032.3.1
by Robert Collins
Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls. |
56 |
from bzrlib import registry |
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
57 |
from bzrlib.symbol_versioning import ( |
58 |
deprecated_method, |
|
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
59 |
)
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
60 |
from bzrlib.trace import ( |
61 |
log_exception_quietly, note, mutter, mutter_callsite, warning) |
|
1185.70.3
by Martin Pool
Various updates to make storage branch mergeable: |
62 |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
63 |
|
1904.2.5
by Martin Pool
Fix format warning inside test suite and add test |
64 |
# Old formats display a warning, but only once
|
65 |
_deprecation_warning_done = False |
|
66 |
||
67 |
||
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
68 |
class CommitBuilder(object): |
69 |
"""Provides an interface to build up a commit. |
|
70 |
||
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
71 |
This allows describing a tree to be committed without needing to
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
72 |
know the internals of the format of the repository.
|
73 |
"""
|
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
74 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
75 |
# all clients should supply tree roots.
|
76 |
record_root_entry = True |
|
2825.5.2
by Robert Collins
Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately. |
77 |
# the default CommitBuilder does not manage trees whose root is versioned.
|
78 |
_versioned_root = False |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
79 |
|
2979.2.2
by Robert Collins
Per-file graph heads detection during commit for pack repositories. |
80 |
def __init__(self, repository, parents, config, timestamp=None, |
81 |
timezone=None, committer=None, revprops=None, |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
82 |
revision_id=None): |
83 |
"""Initiate a CommitBuilder. |
|
84 |
||
85 |
:param repository: Repository to commit to.
|
|
86 |
:param parents: Revision ids of the parents of the new revision.
|
|
87 |
:param config: Configuration to use.
|
|
88 |
:param timestamp: Optional timestamp recorded for commit.
|
|
89 |
:param timezone: Optional timezone for timestamp.
|
|
90 |
:param committer: Optional committer to set for commit.
|
|
91 |
:param revprops: Optional dictionary of revision properties.
|
|
92 |
:param revision_id: Optional revision id.
|
|
93 |
"""
|
|
94 |
self._config = config |
|
95 |
||
96 |
if committer is None: |
|
97 |
self._committer = self._config.username() |
|
98 |
else: |
|
99 |
self._committer = committer |
|
100 |
||
101 |
self.new_inventory = Inventory(None) |
|
2858.2.1
by Martin Pool
Remove most calls to safe_file_id and safe_revision_id. |
102 |
self._new_revision_id = revision_id |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
103 |
self.parents = parents |
104 |
self.repository = repository |
|
105 |
||
106 |
self._revprops = {} |
|
107 |
if revprops is not None: |
|
3831.1.1
by John Arbash Meinel
Before allowing commit to succeed, verify the texts will be 'safe'. |
108 |
self._validate_revprops(revprops) |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
109 |
self._revprops.update(revprops) |
110 |
||
111 |
if timestamp is None: |
|
112 |
timestamp = time.time() |
|
113 |
# Restrict resolution to 1ms
|
|
114 |
self._timestamp = round(timestamp, 3) |
|
115 |
||
116 |
if timezone is None: |
|
117 |
self._timezone = osutils.local_time_offset() |
|
118 |
else: |
|
119 |
self._timezone = int(timezone) |
|
120 |
||
121 |
self._generate_revision_if_needed() |
|
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
122 |
self.__heads = graph.HeadsCache(repository.get_graph()).heads |
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
123 |
self._basis_delta = [] |
124 |
# API compatibility, older code that used CommitBuilder did not call
|
|
125 |
# .record_delete(), which means the delta that is computed would not be
|
|
126 |
# valid. Callers that will call record_delete() should call
|
|
127 |
# .will_record_deletes() to indicate that.
|
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
128 |
self._recording_deletes = False |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
129 |
# memo'd check for no-op commits.
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
130 |
self._any_changes = False |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
131 |
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
132 |
def any_changes(self): |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
133 |
"""Return True if any entries were changed. |
134 |
|
|
135 |
This includes merge-only changes. It is the core for the --unchanged
|
|
136 |
detection in commit.
|
|
137 |
||
138 |
:return: True if any changes have occured.
|
|
139 |
"""
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
140 |
return self._any_changes |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
141 |
|
3831.1.1
by John Arbash Meinel
Before allowing commit to succeed, verify the texts will be 'safe'. |
142 |
def _validate_unicode_text(self, text, context): |
143 |
"""Verify things like commit messages don't have bogus characters.""" |
|
144 |
if '\r' in text: |
|
145 |
raise ValueError('Invalid value for %s: %r' % (context, text)) |
|
146 |
||
147 |
def _validate_revprops(self, revprops): |
|
148 |
for key, value in revprops.iteritems(): |
|
149 |
# We know that the XML serializers do not round trip '\r'
|
|
150 |
# correctly, so refuse to accept them
|
|
3831.1.5
by John Arbash Meinel
It seems we have some direct tests that don't use strings and expect a value error as well. |
151 |
if not isinstance(value, basestring): |
152 |
raise ValueError('revision property (%s) is not a valid' |
|
153 |
' (unicode) string: %r' % (key, value)) |
|
3831.1.1
by John Arbash Meinel
Before allowing commit to succeed, verify the texts will be 'safe'. |
154 |
self._validate_unicode_text(value, |
155 |
'revision property (%s)' % (key,)) |
|
156 |
||
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
157 |
def commit(self, message): |
158 |
"""Make the actual commit. |
|
159 |
||
160 |
:return: The revision id of the recorded revision.
|
|
161 |
"""
|
|
3831.1.1
by John Arbash Meinel
Before allowing commit to succeed, verify the texts will be 'safe'. |
162 |
self._validate_unicode_text(message, 'commit message') |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
163 |
rev = _mod_revision.Revision( |
164 |
timestamp=self._timestamp, |
|
165 |
timezone=self._timezone, |
|
166 |
committer=self._committer, |
|
167 |
message=message, |
|
168 |
inventory_sha1=self.inv_sha1, |
|
169 |
revision_id=self._new_revision_id, |
|
170 |
properties=self._revprops) |
|
171 |
rev.parent_ids = self.parents |
|
172 |
self.repository.add_revision(self._new_revision_id, rev, |
|
173 |
self.new_inventory, self._config) |
|
174 |
self.repository.commit_write_group() |
|
175 |
return self._new_revision_id |
|
176 |
||
177 |
def abort(self): |
|
178 |
"""Abort the commit that is being built. |
|
179 |
"""
|
|
180 |
self.repository.abort_write_group() |
|
181 |
||
182 |
def revision_tree(self): |
|
183 |
"""Return the tree that was just committed. |
|
184 |
||
185 |
After calling commit() this can be called to get a RevisionTree
|
|
186 |
representing the newly committed tree. This is preferred to
|
|
187 |
calling Repository.revision_tree() because that may require
|
|
188 |
deserializing the inventory, while we already have a copy in
|
|
189 |
memory.
|
|
190 |
"""
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
191 |
if self.new_inventory is None: |
192 |
self.new_inventory = self.repository.get_inventory( |
|
193 |
self._new_revision_id) |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
194 |
return RevisionTree(self.repository, self.new_inventory, |
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
195 |
self._new_revision_id) |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
196 |
|
197 |
def finish_inventory(self): |
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
198 |
"""Tell the builder that the inventory is finished. |
199 |
|
|
200 |
:return: The inventory id in the repository, which can be used with
|
|
201 |
repository.get_inventory.
|
|
202 |
"""
|
|
203 |
if self.new_inventory is None: |
|
204 |
# an inventory delta was accumulated without creating a new
|
|
205 |
# inventory.
|
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
206 |
basis_id = self.basis_delta_revision |
207 |
self.inv_sha1 = self.repository.add_inventory_by_delta( |
|
208 |
basis_id, self._basis_delta, self._new_revision_id, |
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
209 |
self.parents) |
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
210 |
else: |
211 |
if self.new_inventory.root is None: |
|
212 |
raise AssertionError('Root entry should be supplied to' |
|
213 |
' record_entry_contents, as of bzr 0.10.') |
|
214 |
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None)) |
|
215 |
self.new_inventory.revision_id = self._new_revision_id |
|
216 |
self.inv_sha1 = self.repository.add_inventory( |
|
217 |
self._new_revision_id, |
|
218 |
self.new_inventory, |
|
219 |
self.parents |
|
220 |
)
|
|
221 |
return self._new_revision_id |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
222 |
|
223 |
def _gen_revision_id(self): |
|
224 |
"""Return new revision-id.""" |
|
225 |
return generate_ids.gen_revision_id(self._config.username(), |
|
226 |
self._timestamp) |
|
227 |
||
228 |
def _generate_revision_if_needed(self): |
|
229 |
"""Create a revision id if None was supplied. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
230 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
231 |
If the repository can not support user-specified revision ids
|
232 |
they should override this function and raise CannotSetRevisionId
|
|
233 |
if _new_revision_id is not None.
|
|
234 |
||
235 |
:raises: CannotSetRevisionId
|
|
236 |
"""
|
|
237 |
if self._new_revision_id is None: |
|
238 |
self._new_revision_id = self._gen_revision_id() |
|
239 |
self.random_revid = True |
|
240 |
else: |
|
241 |
self.random_revid = False |
|
242 |
||
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
243 |
def _heads(self, file_id, revision_ids): |
2979.2.1
by Robert Collins
Make it possible for different commit builders to override heads(). |
244 |
"""Calculate the graph heads for revision_ids in the graph of file_id. |
245 |
||
246 |
This can use either a per-file graph or a global revision graph as we
|
|
247 |
have an identity relationship between the two graphs.
|
|
248 |
"""
|
|
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
249 |
return self.__heads(revision_ids) |
2979.2.1
by Robert Collins
Make it possible for different commit builders to override heads(). |
250 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
251 |
def _check_root(self, ie, parent_invs, tree): |
252 |
"""Helper for record_entry_contents. |
|
253 |
||
254 |
:param ie: An entry being added.
|
|
255 |
:param parent_invs: The inventories of the parent revisions of the
|
|
256 |
commit.
|
|
257 |
:param tree: The tree that is being committed.
|
|
258 |
"""
|
|
2871.1.2
by Robert Collins
* ``CommitBuilder.record_entry_contents`` now requires the root entry of a |
259 |
# In this revision format, root entries have no knit or weave When
|
260 |
# serializing out to disk and back in root.revision is always
|
|
261 |
# _new_revision_id
|
|
262 |
ie.revision = self._new_revision_id |
|
2818.3.1
by Robert Collins
Change CommitBuilder factory delegation to allow simple declaration. |
263 |
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
264 |
def _require_root_change(self, tree): |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
265 |
"""Enforce an appropriate root object change. |
266 |
||
267 |
This is called once when record_iter_changes is called, if and only if
|
|
268 |
the root was not in the delta calculated by record_iter_changes.
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
269 |
|
270 |
:param tree: The tree which is being committed.
|
|
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
271 |
"""
|
272 |
# NB: if there are no parents then this method is not called, so no
|
|
273 |
# need to guard on parents having length.
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
274 |
entry = entry_factory['directory'](tree.path2id(''), '', |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
275 |
None) |
276 |
entry.revision = self._new_revision_id |
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
277 |
self._basis_delta.append(('', '', entry.file_id, entry)) |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
278 |
|
2871.1.4
by Robert Collins
Merge bzr.dev. |
279 |
def _get_delta(self, ie, basis_inv, path): |
280 |
"""Get a delta against the basis inventory for ie.""" |
|
281 |
if ie.file_id not in basis_inv: |
|
282 |
# add
|
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
283 |
result = (None, path, ie.file_id, ie) |
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
284 |
self._basis_delta.append(result) |
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
285 |
return result |
2871.1.4
by Robert Collins
Merge bzr.dev. |
286 |
elif ie != basis_inv[ie.file_id]: |
287 |
# common but altered
|
|
288 |
# TODO: avoid tis id2path call.
|
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
289 |
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie) |
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
290 |
self._basis_delta.append(result) |
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
291 |
return result |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
292 |
else: |
2871.1.4
by Robert Collins
Merge bzr.dev. |
293 |
# common, unaltered
|
294 |
return None |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
295 |
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
296 |
def get_basis_delta(self): |
297 |
"""Return the complete inventory delta versus the basis inventory. |
|
298 |
||
299 |
This has been built up with the calls to record_delete and
|
|
300 |
record_entry_contents. The client must have already called
|
|
301 |
will_record_deletes() to indicate that they will be generating a
|
|
302 |
complete delta.
|
|
303 |
||
304 |
:return: An inventory delta, suitable for use with apply_delta, or
|
|
305 |
Repository.add_inventory_by_delta, etc.
|
|
306 |
"""
|
|
307 |
if not self._recording_deletes: |
|
308 |
raise AssertionError("recording deletes not activated.") |
|
309 |
return self._basis_delta |
|
310 |
||
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
311 |
def record_delete(self, path, file_id): |
312 |
"""Record that a delete occured against a basis tree. |
|
313 |
||
314 |
This is an optional API - when used it adds items to the basis_delta
|
|
315 |
being accumulated by the commit builder. It cannot be called unless the
|
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
316 |
method will_record_deletes() has been called to inform the builder that
|
317 |
a delta is being supplied.
|
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
318 |
|
319 |
:param path: The path of the thing deleted.
|
|
320 |
:param file_id: The file id that was deleted.
|
|
321 |
"""
|
|
322 |
if not self._recording_deletes: |
|
323 |
raise AssertionError("recording deletes not activated.") |
|
3879.2.5
by John Arbash Meinel
Change record_delete() to return the delta. |
324 |
delta = (path, None, file_id, None) |
325 |
self._basis_delta.append(delta) |
|
4183.5.5
by Robert Collins
Enable record_iter_changes for cases where it can work. |
326 |
self._any_changes = True |
3879.2.5
by John Arbash Meinel
Change record_delete() to return the delta. |
327 |
return delta |
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
328 |
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
329 |
def will_record_deletes(self): |
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
330 |
"""Tell the commit builder that deletes are being notified. |
331 |
||
332 |
This enables the accumulation of an inventory delta; for the resulting
|
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
333 |
commit to be valid, deletes against the basis MUST be recorded via
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
334 |
builder.record_delete().
|
335 |
"""
|
|
336 |
self._recording_deletes = True |
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
337 |
try: |
338 |
basis_id = self.parents[0] |
|
339 |
except IndexError: |
|
340 |
basis_id = _mod_revision.NULL_REVISION |
|
341 |
self.basis_delta_revision = basis_id |
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
342 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
343 |
def record_entry_contents(self, ie, parent_invs, path, tree, |
344 |
content_summary): |
|
345 |
"""Record the content of ie from tree into the commit if needed. |
|
346 |
||
347 |
Side effect: sets ie.revision when unchanged
|
|
348 |
||
349 |
:param ie: An inventory entry present in the commit.
|
|
350 |
:param parent_invs: The inventories of the parent revisions of the
|
|
351 |
commit.
|
|
352 |
:param path: The path the entry is at in the tree.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
353 |
:param tree: The tree which contains this entry and should be used to
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
354 |
obtain content.
|
355 |
:param content_summary: Summary data from the tree about the paths
|
|
356 |
content - stat, length, exec, sha/link target. This is only
|
|
357 |
accessed when the entry has a revision of None - that is when it is
|
|
358 |
a candidate to commit.
|
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
359 |
:return: A tuple (change_delta, version_recorded, fs_hash).
|
360 |
change_delta is an inventory_delta change for this entry against
|
|
361 |
the basis tree of the commit, or None if no change occured against
|
|
362 |
the basis tree.
|
|
2871.1.3
by Robert Collins
* The CommitBuilder method ``record_entry_contents`` now returns summary |
363 |
version_recorded is True if a new version of the entry has been
|
364 |
recorded. For instance, committing a merge where a file was only
|
|
365 |
changed on the other side will return (delta, False).
|
|
3709.3.3
by Robert Collins
NEWS for the record_entry_contents change. |
366 |
fs_hash is either None, or the hash details for the path (currently
|
367 |
a tuple of the contents sha1 and the statvalue returned by
|
|
368 |
tree.get_file_with_stat()).
|
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
369 |
"""
|
370 |
if self.new_inventory.root is None: |
|
2871.1.2
by Robert Collins
* ``CommitBuilder.record_entry_contents`` now requires the root entry of a |
371 |
if ie.parent_id is not None: |
372 |
raise errors.RootMissing() |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
373 |
self._check_root(ie, parent_invs, tree) |
374 |
if ie.revision is None: |
|
375 |
kind = content_summary[0] |
|
376 |
else: |
|
377 |
# ie is carried over from a prior commit
|
|
378 |
kind = ie.kind |
|
379 |
# XXX: repository specific check for nested tree support goes here - if
|
|
380 |
# the repo doesn't want nested trees we skip it ?
|
|
381 |
if (kind == 'tree-reference' and |
|
382 |
not self.repository._format.supports_tree_reference): |
|
383 |
# mismatch between commit builder logic and repository:
|
|
384 |
# this needs the entry creation pushed down into the builder.
|
|
2776.4.18
by Robert Collins
Review feedback. |
385 |
raise NotImplementedError('Missing repository subtree support.') |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
386 |
self.new_inventory.add(ie) |
387 |
||
2871.1.3
by Robert Collins
* The CommitBuilder method ``record_entry_contents`` now returns summary |
388 |
# TODO: slow, take it out of the inner loop.
|
389 |
try: |
|
390 |
basis_inv = parent_invs[0] |
|
391 |
except IndexError: |
|
392 |
basis_inv = Inventory(root_id=None) |
|
393 |
||
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
394 |
# ie.revision is always None if the InventoryEntry is considered
|
2776.4.13
by Robert Collins
Merge bzr.dev. |
395 |
# for committing. We may record the previous parents revision if the
|
396 |
# content is actually unchanged against a sole head.
|
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
397 |
if ie.revision is not None: |
2903.2.5
by Martin Pool
record_entry_contents should give back deltas for changed roots; clean it up a bit |
398 |
if not self._versioned_root and path == '': |
2871.1.3
by Robert Collins
* The CommitBuilder method ``record_entry_contents`` now returns summary |
399 |
# repositories that do not version the root set the root's
|
3775.2.2
by Robert Collins
Teach CommitBuilder to accumulate inventory deltas. |
400 |
# revision to the new commit even when no change occurs (more
|
401 |
# specifically, they do not record a revision on the root; and
|
|
402 |
# the rev id is assigned to the root during deserialisation -
|
|
403 |
# this masks when a change may have occurred against the basis.
|
|
404 |
# To match this we always issue a delta, because the revision
|
|
405 |
# of the root will always be changing.
|
|
2903.2.5
by Martin Pool
record_entry_contents should give back deltas for changed roots; clean it up a bit |
406 |
if ie.file_id in basis_inv: |
407 |
delta = (basis_inv.id2path(ie.file_id), path, |
|
408 |
ie.file_id, ie) |
|
409 |
else: |
|
2871.1.3
by Robert Collins
* The CommitBuilder method ``record_entry_contents`` now returns summary |
410 |
# add
|
411 |
delta = (None, path, ie.file_id, ie) |
|
3879.2.3
by John Arbash Meinel
Hide the .basis_delta variable, and require callers to use .get_basis_delta() |
412 |
self._basis_delta.append(delta) |
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
413 |
return delta, False, None |
2903.2.5
by Martin Pool
record_entry_contents should give back deltas for changed roots; clean it up a bit |
414 |
else: |
415 |
# we don't need to commit this, because the caller already
|
|
416 |
# determined that an existing revision of this file is
|
|
3619.1.1
by Robert Collins
Tighten up the handling of carried-over inventory entries. |
417 |
# appropriate. If its not being considered for committing then
|
418 |
# it and all its parents to the root must be unaltered so
|
|
419 |
# no-change against the basis.
|
|
420 |
if ie.revision == self._new_revision_id: |
|
421 |
raise AssertionError("Impossible situation, a skipped " |
|
3619.1.2
by Robert Collins
Review feedback. |
422 |
"inventory entry (%r) claims to be modified in this " |
423 |
"commit (%r).", (ie, self._new_revision_id)) |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
424 |
return None, False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
425 |
# XXX: Friction: parent_candidates should return a list not a dict
|
426 |
# so that we don't have to walk the inventories again.
|
|
427 |
parent_candiate_entries = ie.parent_candidates(parent_invs) |
|
2979.2.5
by Robert Collins
Make CommitBuilder.heads be _heads as its internal to CommitBuilder only. |
428 |
head_set = self._heads(ie.file_id, parent_candiate_entries.keys()) |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
429 |
heads = [] |
430 |
for inv in parent_invs: |
|
431 |
if ie.file_id in inv: |
|
432 |
old_rev = inv[ie.file_id].revision |
|
433 |
if old_rev in head_set: |
|
434 |
heads.append(inv[ie.file_id].revision) |
|
435 |
head_set.remove(inv[ie.file_id].revision) |
|
436 |
||
437 |
store = False |
|
438 |
# now we check to see if we need to write a new record to the
|
|
439 |
# file-graph.
|
|
440 |
# We write a new entry unless there is one head to the ancestors, and
|
|
441 |
# the kind-derived content is unchanged.
|
|
442 |
||
443 |
# Cheapest check first: no ancestors, or more the one head in the
|
|
444 |
# ancestors, we write a new node.
|
|
445 |
if len(heads) != 1: |
|
446 |
store = True |
|
447 |
if not store: |
|
448 |
# There is a single head, look it up for comparison
|
|
449 |
parent_entry = parent_candiate_entries[heads[0]] |
|
450 |
# if the non-content specific data has changed, we'll be writing a
|
|
451 |
# node:
|
|
452 |
if (parent_entry.parent_id != ie.parent_id or |
|
453 |
parent_entry.name != ie.name): |
|
454 |
store = True |
|
455 |
# now we need to do content specific checks:
|
|
456 |
if not store: |
|
457 |
# if the kind changed the content obviously has
|
|
458 |
if kind != parent_entry.kind: |
|
459 |
store = True |
|
3709.3.2
by Robert Collins
Race-free stat-fingerprint updating during commit via a new method get_file_with_stat. |
460 |
# Stat cache fingerprint feedback for the caller - None as we usually
|
461 |
# don't generate one.
|
|
462 |
fingerprint = None |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
463 |
if kind == 'file': |
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
464 |
if content_summary[2] is None: |
465 |
raise ValueError("Files must not have executable = None") |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
466 |
if not store: |
467 |
if (# if the file length changed we have to store: |
|
468 |
parent_entry.text_size != content_summary[1] or |
|
469 |
# if the exec bit has changed we have to store:
|
|
470 |
parent_entry.executable != content_summary[2]): |
|
471 |
store = True |
|
472 |
elif parent_entry.text_sha1 == content_summary[3]: |
|
473 |
# all meta and content is unchanged (using a hash cache
|
|
474 |
# hit to check the sha)
|
|
475 |
ie.revision = parent_entry.revision |
|
476 |
ie.text_size = parent_entry.text_size |
|
477 |
ie.text_sha1 = parent_entry.text_sha1 |
|
478 |
ie.executable = parent_entry.executable |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
479 |
return self._get_delta(ie, basis_inv, path), False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
480 |
else: |
481 |
# Either there is only a hash change(no hash cache entry,
|
|
482 |
# or same size content change), or there is no change on
|
|
483 |
# this file at all.
|
|
2776.4.19
by Robert Collins
Final review tweaks. |
484 |
# Provide the parent's hash to the store layer, so that the
|
485 |
# content is unchanged we will not store a new node.
|
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
486 |
nostore_sha = parent_entry.text_sha1 |
487 |
if store: |
|
2776.4.18
by Robert Collins
Review feedback. |
488 |
# We want to record a new node regardless of the presence or
|
489 |
# absence of a content change in the file.
|
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
490 |
nostore_sha = None |
2776.4.18
by Robert Collins
Review feedback. |
491 |
ie.executable = content_summary[2] |
3709.3.2
by Robert Collins
Race-free stat-fingerprint updating during commit via a new method get_file_with_stat. |
492 |
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path) |
493 |
try: |
|
494 |
lines = file_obj.readlines() |
|
495 |
finally: |
|
496 |
file_obj.close() |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
497 |
try: |
498 |
ie.text_sha1, ie.text_size = self._add_text_to_weave( |
|
499 |
ie.file_id, lines, heads, nostore_sha) |
|
3709.3.2
by Robert Collins
Race-free stat-fingerprint updating during commit via a new method get_file_with_stat. |
500 |
# Let the caller know we generated a stat fingerprint.
|
501 |
fingerprint = (ie.text_sha1, stat_value) |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
502 |
except errors.ExistingContent: |
2776.4.18
by Robert Collins
Review feedback. |
503 |
# Turns out that the file content was unchanged, and we were
|
504 |
# only going to store a new node if it was changed. Carry over
|
|
505 |
# the entry.
|
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
506 |
ie.revision = parent_entry.revision |
507 |
ie.text_size = parent_entry.text_size |
|
508 |
ie.text_sha1 = parent_entry.text_sha1 |
|
509 |
ie.executable = parent_entry.executable |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
510 |
return self._get_delta(ie, basis_inv, path), False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
511 |
elif kind == 'directory': |
512 |
if not store: |
|
513 |
# all data is meta here, nothing specific to directory, so
|
|
514 |
# carry over:
|
|
515 |
ie.revision = parent_entry.revision |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
516 |
return self._get_delta(ie, basis_inv, path), False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
517 |
lines = [] |
518 |
self._add_text_to_weave(ie.file_id, lines, heads, None) |
|
519 |
elif kind == 'symlink': |
|
520 |
current_link_target = content_summary[3] |
|
521 |
if not store: |
|
2776.4.18
by Robert Collins
Review feedback. |
522 |
# symlink target is not generic metadata, check if it has
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
523 |
# changed.
|
524 |
if current_link_target != parent_entry.symlink_target: |
|
525 |
store = True |
|
526 |
if not store: |
|
527 |
# unchanged, carry over.
|
|
528 |
ie.revision = parent_entry.revision |
|
529 |
ie.symlink_target = parent_entry.symlink_target |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
530 |
return self._get_delta(ie, basis_inv, path), False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
531 |
ie.symlink_target = current_link_target |
532 |
lines = [] |
|
533 |
self._add_text_to_weave(ie.file_id, lines, heads, None) |
|
534 |
elif kind == 'tree-reference': |
|
535 |
if not store: |
|
536 |
if content_summary[3] != parent_entry.reference_revision: |
|
537 |
store = True |
|
538 |
if not store: |
|
539 |
# unchanged, carry over.
|
|
540 |
ie.reference_revision = parent_entry.reference_revision |
|
541 |
ie.revision = parent_entry.revision |
|
3709.3.1
by Robert Collins
First cut - make it work - at updating the tree stat cache during commit. |
542 |
return self._get_delta(ie, basis_inv, path), False, None |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
543 |
ie.reference_revision = content_summary[3] |
544 |
lines = [] |
|
545 |
self._add_text_to_weave(ie.file_id, lines, heads, None) |
|
546 |
else: |
|
547 |
raise NotImplementedError('unknown kind') |
|
548 |
ie.revision = self._new_revision_id |
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
549 |
self._any_changes = True |
3709.3.2
by Robert Collins
Race-free stat-fingerprint updating during commit via a new method get_file_with_stat. |
550 |
return self._get_delta(ie, basis_inv, path), True, fingerprint |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
551 |
|
3775.2.30
by Robert Collins
Remove the basis_tree parameter to record_iter_changes. |
552 |
def record_iter_changes(self, tree, basis_revision_id, iter_changes, |
553 |
_entry_factory=entry_factory): |
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
554 |
"""Record a new tree via iter_changes. |
555 |
||
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
556 |
:param tree: The tree to obtain text contents from for changed objects.
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
557 |
:param basis_revision_id: The revision id of the tree the iter_changes
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
558 |
has been generated against. Currently assumed to be the same
|
559 |
as self.parents[0] - if it is not, errors may occur.
|
|
560 |
:param iter_changes: An iter_changes iterator with the changes to apply
|
|
4183.5.5
by Robert Collins
Enable record_iter_changes for cases where it can work. |
561 |
to basis_revision_id. The iterator must not include any items with
|
562 |
a current kind of None - missing items must be either filtered out
|
|
563 |
or errored-on beefore record_iter_changes sees the item.
|
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
564 |
:param _entry_factory: Private method to bind entry_factory locally for
|
565 |
performance.
|
|
4183.5.4
by Robert Collins
Turn record_iter_changes into a generator to emit file system hashes. |
566 |
:return: A generator of (file_id, relpath, fs_hash) tuples for use with
|
567 |
tree._observed_sha1.
|
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
568 |
"""
|
569 |
# Create an inventory delta based on deltas between all the parents and
|
|
570 |
# deltas between all the parent inventories. We use inventory delta's
|
|
571 |
# between the inventory objects because iter_changes masks
|
|
572 |
# last-changed-field only changes.
|
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
573 |
# Working data:
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
574 |
# file_id -> change map, change is fileid, paths, changed, versioneds,
|
575 |
# parents, names, kinds, executables
|
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
576 |
merged_ids = {} |
3775.2.32
by Robert Collins
Trivial review feedback. |
577 |
# {file_id -> revision_id -> inventory entry, for entries in parent
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
578 |
# trees that are not parents[0]
|
579 |
parent_entries = {} |
|
4183.5.5
by Robert Collins
Enable record_iter_changes for cases where it can work. |
580 |
ghost_basis = False |
581 |
try: |
|
582 |
revtrees = list(self.repository.revision_trees(self.parents)) |
|
583 |
except errors.NoSuchRevision: |
|
584 |
# one or more ghosts, slow path.
|
|
585 |
revtrees = [] |
|
586 |
for revision_id in self.parents: |
|
587 |
try: |
|
588 |
revtrees.append(self.repository.revision_tree(revision_id)) |
|
589 |
except errors.NoSuchRevision: |
|
590 |
if not revtrees: |
|
591 |
basis_revision_id = _mod_revision.NULL_REVISION |
|
592 |
ghost_basis = True |
|
593 |
revtrees.append(self.repository.revision_tree( |
|
594 |
_mod_revision.NULL_REVISION)) |
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
595 |
# The basis inventory from a repository
|
596 |
if revtrees: |
|
597 |
basis_inv = revtrees[0].inventory |
|
598 |
else: |
|
599 |
basis_inv = self.repository.revision_tree( |
|
600 |
_mod_revision.NULL_REVISION).inventory |
|
3775.2.32
by Robert Collins
Trivial review feedback. |
601 |
if len(self.parents) > 0: |
4183.5.5
by Robert Collins
Enable record_iter_changes for cases where it can work. |
602 |
if basis_revision_id != self.parents[0] and not ghost_basis: |
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
603 |
raise Exception( |
604 |
"arbitrary basis parents not yet supported with merges") |
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
605 |
for revtree in revtrees[1:]: |
3775.2.32
by Robert Collins
Trivial review feedback. |
606 |
for change in revtree.inventory._make_delta(basis_inv): |
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
607 |
if change[1] is None: |
3775.2.32
by Robert Collins
Trivial review feedback. |
608 |
# Not present in this parent.
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
609 |
continue
|
610 |
if change[2] not in merged_ids: |
|
611 |
if change[0] is not None: |
|
4183.5.9
by Robert Collins
Fix creating new revisions of files when merging. |
612 |
basis_entry = basis_inv[change[2]] |
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
613 |
merged_ids[change[2]] = [ |
4183.5.9
by Robert Collins
Fix creating new revisions of files when merging. |
614 |
# basis revid
|
615 |
basis_entry.revision, |
|
616 |
# new tree revid
|
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
617 |
change[3].revision] |
4183.5.9
by Robert Collins
Fix creating new revisions of files when merging. |
618 |
parent_entries[change[2]] = { |
619 |
# basis parent
|
|
620 |
basis_entry.revision:basis_entry, |
|
621 |
# this parent
|
|
622 |
change[3].revision:change[3], |
|
623 |
}
|
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
624 |
else: |
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
625 |
merged_ids[change[2]] = [change[3].revision] |
4183.5.9
by Robert Collins
Fix creating new revisions of files when merging. |
626 |
parent_entries[change[2]] = {change[3].revision:change[3]} |
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
627 |
else: |
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
628 |
merged_ids[change[2]].append(change[3].revision) |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
629 |
parent_entries[change[2]][change[3].revision] = change[3] |
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
630 |
else: |
631 |
merged_ids = {} |
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
632 |
# Setup the changes from the tree:
|
3775.2.32
by Robert Collins
Trivial review feedback. |
633 |
# changes maps file_id -> (change, [parent revision_ids])
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
634 |
changes= {} |
635 |
for change in iter_changes: |
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
636 |
# This probably looks up in basis_inv way to much.
|
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
637 |
if change[1][0] is not None: |
638 |
head_candidate = [basis_inv[change[0]].revision] |
|
639 |
else: |
|
640 |
head_candidate = [] |
|
641 |
changes[change[0]] = change, merged_ids.get(change[0], |
|
642 |
head_candidate) |
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
643 |
unchanged_merged = set(merged_ids) - set(changes) |
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
644 |
# Extend the changes dict with synthetic changes to record merges of
|
645 |
# texts.
|
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
646 |
for file_id in unchanged_merged: |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
647 |
# Record a merged version of these items that did not change vs the
|
648 |
# basis. This can be either identical parallel changes, or a revert
|
|
649 |
# of a specific file after a merge. The recorded content will be
|
|
650 |
# that of the current tree (which is the same as the basis), but
|
|
651 |
# the per-file graph will reflect a merge.
|
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
652 |
# NB:XXX: We are reconstructing path information we had, this
|
653 |
# should be preserved instead.
|
|
654 |
# inv delta change: (file_id, (path_in_source, path_in_target),
|
|
655 |
# changed_content, versioned, parent, name, kind,
|
|
656 |
# executable)
|
|
4183.5.5
by Robert Collins
Enable record_iter_changes for cases where it can work. |
657 |
try: |
658 |
basis_entry = basis_inv[file_id] |
|
659 |
except errors.NoSuchId: |
|
660 |
# a change from basis->some_parents but file_id isn't in basis
|
|
661 |
# so was new in the merge, which means it must have changed
|
|
662 |
# from basis -> current, and as it hasn't the add was reverted
|
|
663 |
# by the user. So we discard this change.
|
|
664 |
pass
|
|
665 |
else: |
|
666 |
change = (file_id, |
|
667 |
(basis_inv.id2path(file_id), tree.id2path(file_id)), |
|
668 |
False, (True, True), |
|
669 |
(basis_entry.parent_id, basis_entry.parent_id), |
|
670 |
(basis_entry.name, basis_entry.name), |
|
671 |
(basis_entry.kind, basis_entry.kind), |
|
672 |
(basis_entry.executable, basis_entry.executable)) |
|
673 |
changes[file_id] = (change, merged_ids[file_id]) |
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
674 |
# changes contains tuples with the change and a set of inventory
|
675 |
# candidates for the file.
|
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
676 |
# inv delta is:
|
677 |
# old_path, new_path, file_id, new_inventory_entry
|
|
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
678 |
seen_root = False # Is the root in the basis delta? |
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
679 |
inv_delta = self._basis_delta |
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
680 |
modified_rev = self._new_revision_id |
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
681 |
for change, head_candidates in changes.values(): |
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
682 |
if change[3][1]: # versioned in target. |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
683 |
# Several things may be happening here:
|
684 |
# We may have a fork in the per-file graph
|
|
685 |
# - record a change with the content from tree
|
|
686 |
# We may have a change against < all trees
|
|
687 |
# - carry over the tree that hasn't changed
|
|
688 |
# We may have a change against all trees
|
|
689 |
# - record the change with the content from tree
|
|
3775.2.11
by Robert Collins
CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes. |
690 |
kind = change[6][1] |
3775.2.12
by Robert Collins
CommitBuilder.record_iter_changes handles renamed files. |
691 |
file_id = change[0] |
692 |
entry = _entry_factory[kind](file_id, change[5][1], |
|
693 |
change[4][1]) |
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
694 |
head_set = self._heads(change[0], set(head_candidates)) |
695 |
heads = [] |
|
696 |
# Preserve ordering.
|
|
697 |
for head_candidate in head_candidates: |
|
698 |
if head_candidate in head_set: |
|
699 |
heads.append(head_candidate) |
|
700 |
head_set.remove(head_candidate) |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
701 |
carried_over = False |
3775.2.33
by Robert Collins
Fix bug with merges of new files, increasing test coverage to ensure its kept fixed. |
702 |
if len(heads) == 1: |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
703 |
# Could be a carry-over situation:
|
3775.2.34
by Robert Collins
Handle committing new files again. |
704 |
parent_entry_revs = parent_entries.get(file_id, None) |
705 |
if parent_entry_revs: |
|
706 |
parent_entry = parent_entry_revs.get(heads[0], None) |
|
707 |
else: |
|
708 |
parent_entry = None |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
709 |
if parent_entry is None: |
710 |
# The parent iter_changes was called against is the one
|
|
711 |
# that is the per-file head, so any change is relevant
|
|
712 |
# iter_changes is valid.
|
|
713 |
carry_over_possible = False |
|
714 |
else: |
|
715 |
# could be a carry over situation
|
|
716 |
# A change against the basis may just indicate a merge,
|
|
717 |
# we need to check the content against the source of the
|
|
718 |
# merge to determine if it was changed after the merge
|
|
719 |
# or carried over.
|
|
3775.2.23
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch files. |
720 |
if (parent_entry.kind != entry.kind or |
721 |
parent_entry.parent_id != entry.parent_id or |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
722 |
parent_entry.name != entry.name): |
723 |
# Metadata common to all entries has changed
|
|
724 |
# against per-file parent
|
|
725 |
carry_over_possible = False |
|
726 |
else: |
|
727 |
carry_over_possible = True |
|
728 |
# per-type checks for changes against the parent_entry
|
|
729 |
# are done below.
|
|
730 |
else: |
|
731 |
# Cannot be a carry-over situation
|
|
732 |
carry_over_possible = False |
|
733 |
# Populate the entry in the delta
|
|
734 |
if kind == 'file': |
|
3775.2.23
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch files. |
735 |
# XXX: There is still a small race here: If someone reverts the content of a file
|
736 |
# after iter_changes examines and decides it has changed,
|
|
737 |
# we will unconditionally record a new version even if some
|
|
738 |
# other process reverts it while commit is running (with
|
|
739 |
# the revert happening after iter_changes did it's
|
|
740 |
# examination).
|
|
741 |
if change[7][1]: |
|
742 |
entry.executable = True |
|
743 |
else: |
|
744 |
entry.executable = False |
|
745 |
if (carry_over_possible and |
|
746 |
parent_entry.executable == entry.executable): |
|
747 |
# Check the file length, content hash after reading
|
|
748 |
# the file.
|
|
749 |
nostore_sha = parent_entry.text_sha1 |
|
750 |
else: |
|
751 |
nostore_sha = None |
|
752 |
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1]) |
|
753 |
try: |
|
754 |
lines = file_obj.readlines() |
|
755 |
finally: |
|
756 |
file_obj.close() |
|
757 |
try: |
|
758 |
entry.text_sha1, entry.text_size = self._add_text_to_weave( |
|
759 |
file_id, lines, heads, nostore_sha) |
|
4183.5.4
by Robert Collins
Turn record_iter_changes into a generator to emit file system hashes. |
760 |
yield file_id, change[1][1], (entry.text_sha1, stat_value) |
3775.2.23
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch files. |
761 |
except errors.ExistingContent: |
762 |
# No content change against a carry_over parent
|
|
4183.5.4
by Robert Collins
Turn record_iter_changes into a generator to emit file system hashes. |
763 |
# Perhaps this should also yield a fs hash update?
|
3775.2.23
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch files. |
764 |
carried_over = True |
765 |
entry.text_size = parent_entry.text_size |
|
766 |
entry.text_sha1 = parent_entry.text_sha1 |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
767 |
elif kind == 'symlink': |
3775.2.24
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch symlinks. |
768 |
# Wants a path hint?
|
769 |
entry.symlink_target = tree.get_symlink_target(file_id) |
|
770 |
if (carry_over_possible and |
|
771 |
parent_entry.symlink_target == entry.symlink_target): |
|
4183.5.2
by Robert Collins
Support tree-reference in record_iter_changes. |
772 |
carried_over = True |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
773 |
else: |
3775.2.13
by Robert Collins
CommitBuilder.record_iter_changes handles renamed symlinks. |
774 |
self._add_text_to_weave(change[0], [], heads, None) |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
775 |
elif kind == 'directory': |
776 |
if carry_over_possible: |
|
777 |
carried_over = True |
|
778 |
else: |
|
3775.2.19
by Robert Collins
CommitBuilder.record_iter_changes handles merged directories. |
779 |
# Nothing to set on the entry.
|
780 |
# XXX: split into the Root and nonRoot versions.
|
|
781 |
if change[1][1] != '' or self.repository.supports_rich_root(): |
|
782 |
self._add_text_to_weave(change[0], [], heads, None) |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
783 |
elif kind == 'tree-reference': |
4183.5.2
by Robert Collins
Support tree-reference in record_iter_changes. |
784 |
if not self.repository._format.supports_tree_reference: |
785 |
# This isn't quite sane as an error, but we shouldn't
|
|
786 |
# ever see this code path in practice: tree's don't
|
|
787 |
# permit references when the repo doesn't support tree
|
|
788 |
# references.
|
|
789 |
raise errors.UnsupportedOperation(tree.add_reference, |
|
790 |
self.repository) |
|
791 |
entry.reference_revision = \ |
|
792 |
tree.get_reference_revision(change[0]) |
|
793 |
if (carry_over_possible and |
|
794 |
parent_entry.reference_revision == reference_revision): |
|
795 |
carried_over = True |
|
796 |
else: |
|
797 |
self._add_text_to_weave(change[0], [], heads, None) |
|
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
798 |
else: |
3775.2.27
by Robert Collins
CommitBuilder.record_iter_changes handles files becoming directories and links. |
799 |
raise AssertionError('unknown kind %r' % kind) |
3775.2.22
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch directories. |
800 |
if not carried_over: |
801 |
entry.revision = modified_rev |
|
3775.2.23
by Robert Collins
CommitBuilder.record_iter_changes handles changed-in-branch files. |
802 |
else: |
803 |
entry.revision = parent_entry.revision |
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
804 |
else: |
805 |
entry = None |
|
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
806 |
new_path = change[1][1] |
807 |
inv_delta.append((change[1][0], new_path, change[0], entry)) |
|
808 |
if new_path == '': |
|
809 |
seen_root = True |
|
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
810 |
self.new_inventory = None |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
811 |
if len(inv_delta): |
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
812 |
self._any_changes = True |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
813 |
if not seen_root: |
814 |
# housekeeping root entry changes do not affect no-change commits.
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
815 |
self._require_root_change(tree) |
3775.2.29
by Robert Collins
Updates to the form of add_inventory_by_delta that landed in trunk. |
816 |
self.basis_delta_revision = basis_revision_id |
3775.2.4
by Robert Collins
Start on a CommitBuilder.record_iter_changes method. |
817 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
818 |
def _add_text_to_weave(self, file_id, new_lines, parents, nostore_sha): |
2592.3.136
by Robert Collins
Merge bzr.dev. |
819 |
# Note: as we read the content directly from the tree, we know its not
|
820 |
# been turned into unicode or badly split - but a broken tree
|
|
821 |
# implementation could give us bad output from readlines() so this is
|
|
822 |
# not a guarantee of safety. What would be better is always checking
|
|
823 |
# the content during test suite execution. RBC 20070912
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
824 |
parent_keys = tuple((file_id, parent) for parent in parents) |
825 |
return self.repository.texts.add_lines( |
|
826 |
(file_id, self._new_revision_id), parent_keys, new_lines, |
|
3316.2.12
by Robert Collins
Catch some extra deprecated calls. |
827 |
nostore_sha=nostore_sha, random_id=self.random_revid, |
828 |
check_content=False)[0:2] |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
829 |
|
830 |
||
831 |
class RootCommitBuilder(CommitBuilder): |
|
832 |
"""This commitbuilder actually records the root id""" |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
833 |
|
2825.5.2
by Robert Collins
Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately. |
834 |
# the root entry gets versioned properly by this builder.
|
2840.1.1
by Ian Clatworthy
faster pointless commit detection (Robert Collins) |
835 |
_versioned_root = True |
2825.5.2
by Robert Collins
Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately. |
836 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
837 |
def _check_root(self, ie, parent_invs, tree): |
838 |
"""Helper for record_entry_contents. |
|
839 |
||
840 |
:param ie: An entry being added.
|
|
841 |
:param parent_invs: The inventories of the parent revisions of the
|
|
842 |
commit.
|
|
843 |
:param tree: The tree that is being committed.
|
|
844 |
"""
|
|
845 |
||
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
846 |
def _require_root_change(self, tree): |
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
847 |
"""Enforce an appropriate root object change. |
848 |
||
849 |
This is called once when record_iter_changes is called, if and only if
|
|
850 |
the root was not in the delta calculated by record_iter_changes.
|
|
3775.2.9
by Robert Collins
CommitBuilder handles deletes via record_iter_entries. |
851 |
|
852 |
:param tree: The tree which is being committed.
|
|
3775.2.7
by Robert Collins
CommitBuilder handles no-change commits to roots properly with record_iter_changes. |
853 |
"""
|
854 |
# versioned roots do not change unless the tree found a change.
|
|
855 |
||
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
856 |
|
2220.2.3
by Martin Pool
Add tag: revision namespace. |
857 |
######################################################################
|
858 |
# Repositories
|
|
859 |
||
1185.66.5
by Aaron Bentley
Renamed RevisionStorage to Repository |
860 |
class Repository(object): |
1185.70.3
by Martin Pool
Various updates to make storage branch mergeable: |
861 |
"""Repository holding history for one or more branches. |
862 |
||
863 |
The repository holds and retrieves historical information including
|
|
864 |
revisions and file history. It's normally accessed only by the Branch,
|
|
865 |
which views a particular line of development through that history.
|
|
866 |
||
3350.6.7
by Robert Collins
Review feedback, making things more clear, adding documentation on what is used where. |
867 |
The Repository builds on top of some byte storage facilies (the revisions,
|
868 |
signatures, inventories and texts attributes) and a Transport, which
|
|
869 |
respectively provide byte storage and a means to access the (possibly
|
|
1185.70.3
by Martin Pool
Various updates to make storage branch mergeable: |
870 |
remote) disk.
|
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
871 |
|
3350.6.7
by Robert Collins
Review feedback, making things more clear, adding documentation on what is used where. |
872 |
The byte storage facilities are addressed via tuples, which we refer to
|
873 |
as 'keys' throughout the code base. Revision_keys, inventory_keys and
|
|
874 |
signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
|
|
875 |
(file_id, revision_id). We use this interface because it allows low
|
|
876 |
friction with the underlying code that implements disk indices, network
|
|
877 |
encoding and other parts of bzrlib.
|
|
878 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
879 |
:ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
|
880 |
the serialised revisions for the repository. This can be used to obtain
|
|
881 |
revision graph information or to access raw serialised revisions.
|
|
882 |
The result of trying to insert data into the repository via this store
|
|
883 |
is undefined: it should be considered read-only except for implementors
|
|
884 |
of repositories.
|
|
3350.6.7
by Robert Collins
Review feedback, making things more clear, adding documentation on what is used where. |
885 |
:ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
|
886 |
the serialised signatures for the repository. This can be used to
|
|
887 |
obtain access to raw serialised signatures. The result of trying to
|
|
888 |
insert data into the repository via this store is undefined: it should
|
|
889 |
be considered read-only except for implementors of repositories.
|
|
890 |
:ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
|
|
891 |
the serialised inventories for the repository. This can be used to
|
|
892 |
obtain unserialised inventories. The result of trying to insert data
|
|
893 |
into the repository via this store is undefined: it should be
|
|
894 |
considered read-only except for implementors of repositories.
|
|
895 |
:ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
|
|
896 |
texts of files and directories for the repository. This can be used to
|
|
897 |
obtain file texts or file graphs. Note that Repository.iter_file_bytes
|
|
898 |
is usually a better interface for accessing file texts.
|
|
899 |
The result of trying to insert data into the repository via this store
|
|
900 |
is undefined: it should be considered read-only except for implementors
|
|
901 |
of repositories.
|
|
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
902 |
:ivar _transport: Transport for file access to repository, typically
|
903 |
pointing to .bzr/repository.
|
|
1185.70.3
by Martin Pool
Various updates to make storage branch mergeable: |
904 |
"""
|
1185.65.17
by Robert Collins
Merge from integration, mode-changes are broken. |
905 |
|
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
906 |
# What class to use for a CommitBuilder. Often its simpler to change this
|
907 |
# in a Repository class subclass rather than to override
|
|
908 |
# get_commit_builder.
|
|
909 |
_commit_builder_class = CommitBuilder |
|
910 |
# The search regex used by xml based repositories to determine what things
|
|
911 |
# where changed in a single commit.
|
|
2163.2.1
by John Arbash Meinel
Speed up the fileids_altered_by_revision_ids processing |
912 |
_file_ids_altered_regex = lazy_regex.lazy_compile( |
913 |
r'file_id="(?P<file_id>[^"]+)"' |
|
2776.4.6
by Robert Collins
Fixup various commit test failures falling out from the other commit changes. |
914 |
r'.* revision="(?P<revision_id>[^"]+)"' |
2163.2.1
by John Arbash Meinel
Speed up the fileids_altered_by_revision_ids processing |
915 |
)
|
916 |
||
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
917 |
def abort_write_group(self, suppress_errors=False): |
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
918 |
"""Commit the contents accrued within the current write group. |
919 |
||
3825.4.6
by Andrew Bennetts
Document the suppress_errors flag in the docstring. |
920 |
:param suppress_errors: if true, abort_write_group will catch and log
|
921 |
unexpected errors that happen during the abort, rather than
|
|
922 |
allowing them to propagate. Defaults to False.
|
|
923 |
||
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
924 |
:seealso: start_write_group.
|
925 |
"""
|
|
926 |
if self._write_group is not self.get_transaction(): |
|
927 |
# has an unlock or relock occured ?
|
|
928 |
raise errors.BzrError('mismatched lock context and write group.') |
|
3825.4.1
by Andrew Bennetts
Add suppress_errors to abort_write_group. |
929 |
try: |
930 |
self._abort_write_group() |
|
931 |
except Exception, exc: |
|
932 |
self._write_group = None |
|
933 |
if not suppress_errors: |
|
934 |
raise
|
|
935 |
mutter('abort_write_group failed') |
|
936 |
log_exception_quietly() |
|
937 |
note('bzr: ERROR (ignored): %s', exc) |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
938 |
self._write_group = None |
939 |
||
940 |
def _abort_write_group(self): |
|
941 |
"""Template method for per-repository write group cleanup. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
942 |
|
943 |
This is called during abort before the write group is considered to be
|
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
944 |
finished and should cleanup any internal state accrued during the write
|
945 |
group. There is no requirement that data handed to the repository be
|
|
946 |
*not* made available - this is not a rollback - but neither should any
|
|
947 |
attempt be made to ensure that data added is fully commited. Abort is
|
|
948 |
invoked when an error has occured so futher disk or network operations
|
|
949 |
may not be possible or may error and if possible should not be
|
|
950 |
attempted.
|
|
951 |
"""
|
|
952 |
||
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
953 |
def add_fallback_repository(self, repository): |
954 |
"""Add a repository to use for looking up data not held locally. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
955 |
|
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
956 |
:param repository: A repository.
|
957 |
"""
|
|
958 |
if not self._format.supports_external_lookups: |
|
959 |
raise errors.UnstackableRepositoryFormat(self._format, self.base) |
|
3582.1.7
by Martin Pool
add_fallback_repository gives more detail on incompatibilities |
960 |
self._check_fallback_repository(repository) |
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
961 |
self._fallback_repositories.append(repository) |
3221.12.13
by Robert Collins
Implement generic stacking rather than pack-internals based stacking. |
962 |
self.texts.add_fallback_versioned_files(repository.texts) |
963 |
self.inventories.add_fallback_versioned_files(repository.inventories) |
|
964 |
self.revisions.add_fallback_versioned_files(repository.revisions) |
|
965 |
self.signatures.add_fallback_versioned_files(repository.signatures) |
|
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
966 |
|
3582.1.7
by Martin Pool
add_fallback_repository gives more detail on incompatibilities |
967 |
def _check_fallback_repository(self, repository): |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
968 |
"""Check that this repository can fallback to repository safely. |
3582.1.7
by Martin Pool
add_fallback_repository gives more detail on incompatibilities |
969 |
|
970 |
Raise an error if not.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
971 |
|
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
972 |
:param repository: A repository to fallback to.
|
973 |
"""
|
|
3582.1.7
by Martin Pool
add_fallback_repository gives more detail on incompatibilities |
974 |
return InterRepository._assert_same_model(self, repository) |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
975 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
976 |
def add_inventory(self, revision_id, inv, parents): |
977 |
"""Add the inventory inv to the repository as revision_id. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
978 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
979 |
:param parents: The revision ids of the parents that revision_id
|
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
980 |
is known to have and are in the repository already.
|
981 |
||
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
982 |
:returns: The validator(which is a sha1 digest, though what is sha'd is
|
983 |
repository format specific) of the serialized inventory.
|
|
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
984 |
"""
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
985 |
if not self.is_in_write_group(): |
986 |
raise AssertionError("%r not in write group" % (self,)) |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
987 |
_mod_revision.check_not_reserved_id(revision_id) |
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
988 |
if not (inv.revision_id is None or inv.revision_id == revision_id): |
989 |
raise AssertionError( |
|
990 |
"Mismatch between inventory revision"
|
|
991 |
" id and insertion revid (%r, %r)" |
|
992 |
% (inv.revision_id, revision_id)) |
|
993 |
if inv.root is None: |
|
994 |
raise AssertionError() |
|
2817.2.1
by Robert Collins
* Inventory serialisation no longer double-sha's the content. |
995 |
inv_lines = self._serialise_inventory_to_lines(inv) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
996 |
return self._inventory_add_lines(revision_id, parents, |
2817.2.1
by Robert Collins
* Inventory serialisation no longer double-sha's the content. |
997 |
inv_lines, check_content=False) |
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
998 |
|
3879.2.2
by John Arbash Meinel
Rename add_inventory_delta to add_inventory_by_delta. |
999 |
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id, |
1000 |
parents): |
|
3775.2.1
by Robert Collins
Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas. |
1001 |
"""Add a new inventory expressed as a delta against another revision. |
3879.2.2
by John Arbash Meinel
Rename add_inventory_delta to add_inventory_by_delta. |
1002 |
|
3775.2.1
by Robert Collins
Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas. |
1003 |
:param basis_revision_id: The inventory id the delta was created
|
3879.2.2
by John Arbash Meinel
Rename add_inventory_delta to add_inventory_by_delta. |
1004 |
against. (This does not have to be a direct parent.)
|
3775.2.1
by Robert Collins
Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas. |
1005 |
:param delta: The inventory delta (see Inventory.apply_delta for
|
1006 |
details).
|
|
1007 |
:param new_revision_id: The revision id that the inventory is being
|
|
1008 |
added for.
|
|
1009 |
:param parents: The revision ids of the parents that revision_id is
|
|
1010 |
known to have and are in the repository already. These are supplied
|
|
1011 |
for repositories that depend on the inventory graph for revision
|
|
1012 |
graph access, as well as for those that pun ancestry with delta
|
|
1013 |
compression.
|
|
1014 |
||
3879.3.1
by John Arbash Meinel
Change the return of add_inventory_by_delta to also return the Inventory. |
1015 |
:returns: (validator, new_inv)
|
1016 |
The validator(which is a sha1 digest, though what is sha'd is
|
|
1017 |
repository format specific) of the serialized inventory, and the
|
|
1018 |
resulting inventory.
|
|
3775.2.1
by Robert Collins
Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas. |
1019 |
"""
|
1020 |
if not self.is_in_write_group(): |
|
1021 |
raise AssertionError("%r not in write group" % (self,)) |
|
1022 |
_mod_revision.check_not_reserved_id(new_revision_id) |
|
1023 |
basis_tree = self.revision_tree(basis_revision_id) |
|
1024 |
basis_tree.lock_read() |
|
1025 |
try: |
|
1026 |
# Note that this mutates the inventory of basis_tree, which not all
|
|
1027 |
# inventory implementations may support: A better idiom would be to
|
|
1028 |
# return a new inventory, but as there is no revision tree cache in
|
|
1029 |
# repository this is safe for now - RBC 20081013
|
|
1030 |
basis_inv = basis_tree.inventory |
|
1031 |
basis_inv.apply_delta(delta) |
|
1032 |
basis_inv.revision_id = new_revision_id |
|
3879.3.1
by John Arbash Meinel
Change the return of add_inventory_by_delta to also return the Inventory. |
1033 |
return (self.add_inventory(new_revision_id, basis_inv, parents), |
1034 |
basis_inv) |
|
3775.2.1
by Robert Collins
Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas. |
1035 |
finally: |
1036 |
basis_tree.unlock() |
|
1037 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1038 |
def _inventory_add_lines(self, revision_id, parents, lines, |
2805.6.7
by Robert Collins
Review feedback. |
1039 |
check_content=True): |
2817.2.1
by Robert Collins
* Inventory serialisation no longer double-sha's the content. |
1040 |
"""Store lines in inv_vf and return the sha1 of the inventory.""" |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1041 |
parents = [(parent,) for parent in parents] |
1042 |
return self.inventories.add_lines((revision_id,), parents, lines, |
|
2817.2.1
by Robert Collins
* Inventory serialisation no longer double-sha's the content. |
1043 |
check_content=check_content)[0] |
1740.3.6
by Jelmer Vernooij
Move inventory writing to the commit builder. |
1044 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1045 |
def add_revision(self, revision_id, rev, inv=None, config=None): |
1046 |
"""Add rev to the revision store as revision_id. |
|
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1047 |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1048 |
:param revision_id: the revision id to use.
|
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1049 |
:param rev: The revision object.
|
1050 |
:param inv: The inventory for the revision. if None, it will be looked
|
|
1051 |
up in the inventory storer
|
|
1052 |
:param config: If None no digital signature will be created.
|
|
1053 |
If supplied its signature_needed method will be used
|
|
1054 |
to determine if a signature should be made.
|
|
1055 |
"""
|
|
2249.5.13
by John Arbash Meinel
Finish auditing Repository, and fix generate_ids to always generate utf8 ids. |
1056 |
# TODO: jam 20070210 Shouldn't we check rev.revision_id and
|
1057 |
# rev.parent_ids?
|
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1058 |
_mod_revision.check_not_reserved_id(revision_id) |
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1059 |
if config is not None and config.signature_needed(): |
1060 |
if inv is None: |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1061 |
inv = self.get_inventory(revision_id) |
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1062 |
plaintext = Testament(rev, inv).as_short_text() |
1063 |
self.store_revision_signature( |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1064 |
gpg.GPGStrategy(config), plaintext, revision_id) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1065 |
# check inventory present
|
1066 |
if not self.inventories.get_parent_map([(revision_id,)]): |
|
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1067 |
if inv is None: |
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
1068 |
raise errors.WeaveRevisionNotPresent(revision_id, |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1069 |
self.inventories) |
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1070 |
else: |
1071 |
# yes, this is not suitable for adding with ghosts.
|
|
3380.1.6
by Aaron Bentley
Ensure fetching munges sha1s |
1072 |
rev.inventory_sha1 = self.add_inventory(revision_id, inv, |
3305.1.1
by Jelmer Vernooij
Make sure that specifying the inv= argument to add_revision() sets the |
1073 |
rev.parent_ids) |
3380.1.6
by Aaron Bentley
Ensure fetching munges sha1s |
1074 |
else: |
3350.8.3
by Robert Collins
VF.get_sha1s needed changing to be stackable. |
1075 |
key = (revision_id,) |
1076 |
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key] |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1077 |
self._add_revision(rev) |
1570.1.2
by Robert Collins
Import bzrtools' 'fix' command as 'bzr reconcile.' |
1078 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1079 |
def _add_revision(self, revision): |
1080 |
text = self._serializer.write_revision_to_string(revision) |
|
1081 |
key = (revision.revision_id,) |
|
1082 |
parents = tuple((parent,) for parent in revision.parent_ids) |
|
1083 |
self.revisions.add_lines(key, parents, osutils.split_lines(text)) |
|
2520.4.10
by Aaron Bentley
Enable installation of revisions |
1084 |
|
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
1085 |
def all_revision_ids(self): |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1086 |
"""Returns a list of all the revision ids in the repository. |
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
1087 |
|
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
1088 |
This is conceptually deprecated because code should generally work on
|
1089 |
the graph reachable from a particular revision, and ignore any other
|
|
1090 |
revisions that might be present. There is no direct replacement
|
|
1091 |
method.
|
|
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
1092 |
"""
|
2592.3.114
by Robert Collins
More evil mutterings. |
1093 |
if 'evil' in debug.debug_flags: |
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1094 |
mutter_callsite(2, "all_revision_ids is linear with history.") |
3221.12.4
by Robert Collins
Implement basic repository supporting external references. |
1095 |
return self._all_revision_ids() |
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
1096 |
|
1097 |
def _all_revision_ids(self): |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1098 |
"""Returns a list of all the revision ids in the repository. |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1099 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1100 |
These are in as much topological order as the underlying store can
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1101 |
present.
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1102 |
"""
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1103 |
raise NotImplementedError(self._all_revision_ids) |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1104 |
|
1687.1.7
by Robert Collins
Teach Repository about break_lock. |
1105 |
def break_lock(self): |
1106 |
"""Break a lock if one is present from another instance. |
|
1107 |
||
1108 |
Uses the ui factory to ask for confirmation if the lock may be from
|
|
1109 |
an active process.
|
|
1110 |
"""
|
|
1111 |
self.control_files.break_lock() |
|
1112 |
||
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1113 |
@needs_read_lock
|
1114 |
def _eliminate_revisions_not_present(self, revision_ids): |
|
1115 |
"""Check every revision id in revision_ids to see if we have it. |
|
1116 |
||
1117 |
Returns a set of the present revisions.
|
|
1118 |
"""
|
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1119 |
result = [] |
3369.2.1
by John Arbash Meinel
Knit => knit fetching also has some very bad 'for x in revision_ids: has_revision_id()' calls |
1120 |
graph = self.get_graph() |
1121 |
parent_map = graph.get_parent_map(revision_ids) |
|
1122 |
# The old API returned a list, should this actually be a set?
|
|
1123 |
return parent_map.keys() |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1124 |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
1125 |
@staticmethod
|
1126 |
def create(a_bzrdir): |
|
1127 |
"""Construct the current default format repository in a_bzrdir.""" |
|
1128 |
return RepositoryFormat.get_default_format().initialize(a_bzrdir) |
|
1129 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1130 |
def __init__(self, _format, a_bzrdir, control_files): |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
1131 |
"""instantiate a Repository. |
1132 |
||
1133 |
:param _format: The format of the repository on disk.
|
|
1134 |
:param a_bzrdir: The BzrDir of the repository.
|
|
1135 |
||
1136 |
In the future we will have a single api for all stores for
|
|
1137 |
getting file texts, inventories and revisions, then
|
|
1138 |
this construct will accept instances of those things.
|
|
1139 |
"""
|
|
1608.2.1
by Martin Pool
[merge] Storage filename escaping |
1140 |
super(Repository, self).__init__() |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
1141 |
self._format = _format |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
1142 |
# the following are part of the public API for Repository:
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
1143 |
self.bzrdir = a_bzrdir |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
1144 |
self.control_files = control_files |
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
1145 |
self._transport = control_files._transport |
3407.2.14
by Martin Pool
Remove more cases of getting transport via control_files |
1146 |
self.base = self._transport.base |
2671.4.2
by Robert Collins
Review feedback. |
1147 |
# for tests
|
1148 |
self._reconcile_does_inventory_gc = True |
|
2745.6.16
by Aaron Bentley
Update from review |
1149 |
self._reconcile_fixes_text_parents = False |
2951.1.3
by Robert Collins
Partial support for native reconcile with packs. |
1150 |
self._reconcile_backsup_inventory = True |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1151 |
# not right yet - should be more semantically clear ?
|
1152 |
#
|
|
1608.2.1
by Martin Pool
[merge] Storage filename escaping |
1153 |
# TODO: make sure to construct the right store classes, etc, depending
|
1154 |
# on whether escaping is required.
|
|
1904.2.3
by Martin Pool
Give a warning on access to old repository formats |
1155 |
self._warn_if_deprecated() |
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1156 |
self._write_group = None |
3221.12.1
by Robert Collins
Backport development1 format (stackable packs) to before-shallow-branches. |
1157 |
# Additional places to query for data.
|
1158 |
self._fallback_repositories = [] |
|
3882.6.23
by John Arbash Meinel
Change the XMLSerializer.read_inventory_from_string api. |
1159 |
# An InventoryEntry cache, used during deserialization
|
1160 |
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024) |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1161 |
|
1668.1.3
by Martin Pool
[patch] use the correct transaction when committing snapshot (Malone: #43959) |
1162 |
def __repr__(self): |
2592.4.5
by Martin Pool
Add Repository.base on all repositories. |
1163 |
return '%s(%r)' % (self.__class__.__name__, |
1164 |
self.base) |
|
1668.1.3
by Martin Pool
[patch] use the correct transaction when committing snapshot (Malone: #43959) |
1165 |
|
2671.1.4
by Andrew Bennetts
Rename is_same_repository to has_same_location, thanks Aaron! |
1166 |
def has_same_location(self, other): |
2671.1.3
by Andrew Bennetts
Remove Repository.__eq__/__ne__ methods, replace with is_same_repository method. |
1167 |
"""Returns a boolean indicating if this repository is at the same |
1168 |
location as another repository.
|
|
1169 |
||
1170 |
This might return False even when two repository objects are accessing
|
|
1171 |
the same physical repository via different URLs.
|
|
1172 |
"""
|
|
2592.3.162
by Robert Collins
Remove some arbitrary differences from bzr.dev. |
1173 |
if self.__class__ is not other.__class__: |
1174 |
return False |
|
3407.2.3
by Martin Pool
Branch and Repository use their own ._transport rather than going through .control_files |
1175 |
return (self._transport.base == other._transport.base) |
2671.1.1
by Andrew Bennetts
Add support for comparing Repositories with == and != operators. |
1176 |
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1177 |
def is_in_write_group(self): |
1178 |
"""Return True if there is an open write group. |
|
1179 |
||
1180 |
:seealso: start_write_group.
|
|
1181 |
"""
|
|
1182 |
return self._write_group is not None |
|
1183 |
||
1694.2.6
by Martin Pool
[merge] bzr.dev |
1184 |
def is_locked(self): |
1185 |
return self.control_files.is_locked() |
|
1186 |
||
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1187 |
def is_write_locked(self): |
1188 |
"""Return True if this object is write locked.""" |
|
1189 |
return self.is_locked() and self.control_files._lock_mode == 'w' |
|
1190 |
||
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1191 |
def lock_write(self, token=None): |
1192 |
"""Lock this repository for writing. |
|
2617.6.8
by Robert Collins
Review feedback and documentation. |
1193 |
|
1194 |
This causes caching within the repository obejct to start accumlating
|
|
1195 |
data during reads, and allows a 'write_group' to be obtained. Write
|
|
1196 |
groups must be used for actual data insertion.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1197 |
|
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1198 |
:param token: if this is already locked, then lock_write will fail
|
1199 |
unless the token matches the existing lock.
|
|
1200 |
:returns: a token if this instance supports tokens, otherwise None.
|
|
1201 |
:raises TokenLockingNotSupported: when a token is given but this
|
|
1202 |
instance doesn't support using token locks.
|
|
1203 |
:raises MismatchedToken: if the specified token doesn't match the token
|
|
1204 |
of the existing lock.
|
|
2617.6.8
by Robert Collins
Review feedback and documentation. |
1205 |
:seealso: start_write_group.
|
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1206 |
|
2018.5.145
by Andrew Bennetts
Add a brief explanation of what tokens are used for to lock_write docstrings. |
1207 |
A token should be passed in if you know that you have locked the object
|
1208 |
some other way, and need to synchronise this object's state with that
|
|
1209 |
fact.
|
|
1210 |
||
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1211 |
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
|
1212 |
"""
|
|
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
1213 |
locked = self.is_locked() |
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1214 |
result = self.control_files.lock_write(token=token) |
3221.12.13
by Robert Collins
Implement generic stacking rather than pack-internals based stacking. |
1215 |
for repo in self._fallback_repositories: |
1216 |
# Writes don't affect fallback repos
|
|
1217 |
repo.lock_read() |
|
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
1218 |
if not locked: |
1219 |
self._refresh_data() |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1220 |
return result |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1221 |
|
1222 |
def lock_read(self): |
|
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
1223 |
locked = self.is_locked() |
1553.5.55
by Martin Pool
[revert] broken changes |
1224 |
self.control_files.lock_read() |
3221.12.13
by Robert Collins
Implement generic stacking rather than pack-internals based stacking. |
1225 |
for repo in self._fallback_repositories: |
1226 |
repo.lock_read() |
|
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
1227 |
if not locked: |
1228 |
self._refresh_data() |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1229 |
|
1694.2.6
by Martin Pool
[merge] bzr.dev |
1230 |
def get_physical_lock_status(self): |
1231 |
return self.control_files.get_physical_lock_status() |
|
1624.3.36
by Olaf Conradi
Rename is_transport_locked() to get_physical_lock_status() as the |
1232 |
|
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1233 |
def leave_lock_in_place(self): |
1234 |
"""Tell this repository not to release the physical lock when this |
|
1235 |
object is unlocked.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1236 |
|
2018.5.76
by Andrew Bennetts
Testing that repository.{dont_,}leave_lock_in_place raises NotImplementedError if lock_write returns None. |
1237 |
If lock_write doesn't return a token, then this method is not supported.
|
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1238 |
"""
|
1239 |
self.control_files.leave_in_place() |
|
1240 |
||
1241 |
def dont_leave_lock_in_place(self): |
|
1242 |
"""Tell this repository to release the physical lock when this |
|
1243 |
object is unlocked, even if it didn't originally acquire it.
|
|
2018.5.76
by Andrew Bennetts
Testing that repository.{dont_,}leave_lock_in_place raises NotImplementedError if lock_write returns None. |
1244 |
|
1245 |
If lock_write doesn't return a token, then this method is not supported.
|
|
2018.5.75
by Andrew Bennetts
Add Repository.{dont_,}leave_lock_in_place. |
1246 |
"""
|
1247 |
self.control_files.dont_leave_in_place() |
|
1248 |
||
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1249 |
@needs_read_lock
|
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1250 |
def gather_stats(self, revid=None, committers=None): |
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1251 |
"""Gather statistics from a revision id. |
1252 |
||
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1253 |
:param revid: The revision id to gather statistics from, if None, then
|
1254 |
no revision specific statistics are gathered.
|
|
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1255 |
:param committers: Optional parameter controlling whether to grab
|
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1256 |
a count of committers from the revision specific statistics.
|
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1257 |
:return: A dictionary of statistics. Currently this contains:
|
1258 |
committers: The number of committers if requested.
|
|
1259 |
firstrev: A tuple with timestamp, timezone for the penultimate left
|
|
1260 |
most ancestor of revid, if revid is not the NULL_REVISION.
|
|
1261 |
latestrev: A tuple with timestamp, timezone for revid, if revid is
|
|
1262 |
not the NULL_REVISION.
|
|
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1263 |
revisions: The total revision count in the repository.
|
1264 |
size: An estimate disk size of the repository in bytes.
|
|
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1265 |
"""
|
1266 |
result = {} |
|
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1267 |
if revid and committers: |
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1268 |
result['committers'] = 0 |
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1269 |
if revid and revid != _mod_revision.NULL_REVISION: |
1270 |
if committers: |
|
1271 |
all_committers = set() |
|
1272 |
revisions = self.get_ancestry(revid) |
|
1273 |
# pop the leading None
|
|
1274 |
revisions.pop(0) |
|
1275 |
first_revision = None |
|
1276 |
if not committers: |
|
1277 |
# ignore the revisions in the middle - just grab first and last
|
|
1278 |
revisions = revisions[0], revisions[-1] |
|
1279 |
for revision in self.get_revisions(revisions): |
|
1280 |
if not first_revision: |
|
1281 |
first_revision = revision |
|
1282 |
if committers: |
|
1283 |
all_committers.add(revision.committer) |
|
1284 |
last_revision = revision |
|
1285 |
if committers: |
|
1286 |
result['committers'] = len(all_committers) |
|
1287 |
result['firstrev'] = (first_revision.timestamp, |
|
1288 |
first_revision.timezone) |
|
1289 |
result['latestrev'] = (last_revision.timestamp, |
|
1290 |
last_revision.timezone) |
|
1291 |
||
1292 |
# now gather global repository information
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1293 |
# XXX: This is available for many repos regardless of listability.
|
2258.1.2
by Robert Collins
New version of gather_stats which gathers aggregate data too. |
1294 |
if self.bzrdir.root_transport.listable(): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1295 |
# XXX: do we want to __define len__() ?
|
3350.6.10
by Martin Pool
VersionedFiles review cleanups |
1296 |
# Maybe the versionedfiles object should provide a different
|
1297 |
# method to get the number of keys.
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1298 |
result['revisions'] = len(self.revisions.keys()) |
1299 |
# result['size'] = t
|
|
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1300 |
return result |
1301 |
||
3140.1.2
by Aaron Bentley
Add ability to find branches inside repositories |
1302 |
def find_branches(self, using=False): |
1303 |
"""Find branches underneath this repository. |
|
1304 |
||
3140.1.7
by Aaron Bentley
Update docs |
1305 |
This will include branches inside other branches.
|
1306 |
||
3140.1.2
by Aaron Bentley
Add ability to find branches inside repositories |
1307 |
:param using: If True, list only branches using this repository.
|
1308 |
"""
|
|
3140.1.9
by Aaron Bentley
Optimize find_branches for standalone repositories |
1309 |
if using and not self.is_shared(): |
1310 |
try: |
|
1311 |
return [self.bzrdir.open_branch()] |
|
1312 |
except errors.NotBranchError: |
|
1313 |
return [] |
|
3140.1.2
by Aaron Bentley
Add ability to find branches inside repositories |
1314 |
class Evaluator(object): |
1315 |
||
1316 |
def __init__(self): |
|
1317 |
self.first_call = True |
|
1318 |
||
1319 |
def __call__(self, bzrdir): |
|
1320 |
# On the first call, the parameter is always the bzrdir
|
|
1321 |
# containing the current repo.
|
|
1322 |
if not self.first_call: |
|
1323 |
try: |
|
1324 |
repository = bzrdir.open_repository() |
|
1325 |
except errors.NoRepositoryPresent: |
|
1326 |
pass
|
|
1327 |
else: |
|
1328 |
return False, (None, repository) |
|
1329 |
self.first_call = False |
|
1330 |
try: |
|
1331 |
value = (bzrdir.open_branch(), None) |
|
1332 |
except errors.NotBranchError: |
|
1333 |
value = (None, None) |
|
1334 |
return True, value |
|
1335 |
||
1336 |
branches = [] |
|
1337 |
for branch, repository in bzrdir.BzrDir.find_bzrdirs( |
|
1338 |
self.bzrdir.root_transport, evaluate=Evaluator()): |
|
1339 |
if branch is not None: |
|
1340 |
branches.append(branch) |
|
1341 |
if not using and repository is not None: |
|
1342 |
branches.extend(repository.find_branches()) |
|
1343 |
return branches |
|
1344 |
||
2258.1.1
by Robert Collins
Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins). |
1345 |
@needs_read_lock
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
1346 |
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True): |
1347 |
"""Return the revision ids that other has that this does not. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1348 |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
1349 |
These are returned in topological order.
|
1350 |
||
1351 |
revision_id: only return revision ids included by revision_id.
|
|
1352 |
"""
|
|
1353 |
return InterRepository.get(other, self).search_missing_revision_ids( |
|
1354 |
revision_id, find_ghosts) |
|
1355 |
||
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
1356 |
@staticmethod
|
1357 |
def open(base): |
|
1358 |
"""Open the repository rooted at base. |
|
1359 |
||
1360 |
For instance, if the repository is at URL/.bzr/repository,
|
|
1361 |
Repository.open(URL) -> a Repository instance.
|
|
1362 |
"""
|
|
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
1363 |
control = bzrdir.BzrDir.open(base) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
1364 |
return control.open_repository() |
1365 |
||
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
1366 |
def copy_content_into(self, destination, revision_id=None): |
1534.6.6
by Robert Collins
Move find_repository to bzrdir, its not quite ideal there but its simpler and until someone chooses to vary the search by branch type its completely sufficient. |
1367 |
"""Make a complete copy of the content in self into destination. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1368 |
|
1369 |
This is a destructive operation! Do not use it on existing
|
|
1534.6.6
by Robert Collins
Move find_repository to bzrdir, its not quite ideal there but its simpler and until someone chooses to vary the search by branch type its completely sufficient. |
1370 |
repositories.
|
1371 |
"""
|
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
1372 |
return InterRepository.get(self, destination).copy_content(revision_id) |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1373 |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1374 |
def commit_write_group(self): |
1375 |
"""Commit the contents accrued within the current write group. |
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1376 |
|
1377 |
:seealso: start_write_group.
|
|
1378 |
"""
|
|
1379 |
if self._write_group is not self.get_transaction(): |
|
1380 |
# has an unlock or relock occured ?
|
|
2592.3.38
by Robert Collins
All experimental format tests passing again. |
1381 |
raise errors.BzrError('mismatched lock context %r and ' |
1382 |
'write group %r.' % |
|
1383 |
(self.get_transaction(), self._write_group)) |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1384 |
self._commit_write_group() |
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1385 |
self._write_group = None |
1386 |
||
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1387 |
def _commit_write_group(self): |
1388 |
"""Template method for per-repository write group cleanup. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1389 |
|
1390 |
This is called before the write group is considered to be
|
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1391 |
finished and should ensure that all data handed to the repository
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1392 |
for writing during the write group is safely committed (to the
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1393 |
extent possible considering file system caching etc).
|
1394 |
"""
|
|
1395 |
||
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
1396 |
def suspend_write_group(self): |
1397 |
raise errors.UnsuspendableWriteGroup(self) |
|
1398 |
||
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
1399 |
def refresh_data(self): |
1400 |
"""Re-read any data needed to to synchronise with disk. |
|
1401 |
||
1402 |
This method is intended to be called after another repository instance
|
|
1403 |
(such as one used by a smart server) has inserted data into the
|
|
1404 |
repository. It may not be called during a write group, but may be
|
|
1405 |
called at any other time.
|
|
1406 |
"""
|
|
1407 |
if self.is_in_write_group(): |
|
1408 |
raise errors.InternalBzrError( |
|
1409 |
"May not refresh_data while in a write group.") |
|
1410 |
self._refresh_data() |
|
1411 |
||
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
1412 |
def resume_write_group(self, tokens): |
1413 |
if not self.is_write_locked(): |
|
1414 |
raise errors.NotWriteLocked(self) |
|
1415 |
if self._write_group: |
|
1416 |
raise errors.BzrError('already in a write group') |
|
1417 |
self._resume_write_group(tokens) |
|
1418 |
# so we can detect unlock/relock - the write group is now entered.
|
|
1419 |
self._write_group = self.get_transaction() |
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
1420 |
|
4002.1.1
by Andrew Bennetts
Implement suspend_write_group/resume_write_group. |
1421 |
def _resume_write_group(self, tokens): |
1422 |
raise errors.UnsuspendableWriteGroup(self) |
|
1423 |
||
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
1424 |
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False, |
1425 |
fetch_spec=None): |
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1426 |
"""Fetch the content required to construct revision_id from source. |
1427 |
||
4070.9.14
by Andrew Bennetts
Tweaks requested by Robert's review. |
1428 |
If revision_id is None and fetch_spec is None, then all content is
|
1429 |
copied.
|
|
1430 |
||
4145.1.1
by Robert Collins
Explicitly prevent fetching while the target repository is in a write group. |
1431 |
fetch() may not be used when the repository is in a write group -
|
1432 |
either finish the current write group before using fetch, or use
|
|
1433 |
fetch before starting the write group.
|
|
1434 |
||
2949.1.1
by Robert Collins
Change Repository.fetch to provide a find_ghosts parameter which triggers ghost filling. |
1435 |
:param find_ghosts: Find and copy revisions in the source that are
|
1436 |
ghosts in the target (and not reachable directly by walking out to
|
|
1437 |
the first-present revision in target from revision_id).
|
|
4070.9.14
by Andrew Bennetts
Tweaks requested by Robert's review. |
1438 |
:param revision_id: If specified, all the content needed for this
|
1439 |
revision ID will be copied to the target. Fetch will determine for
|
|
1440 |
itself which content needs to be copied.
|
|
1441 |
:param fetch_spec: If specified, a SearchResult or
|
|
1442 |
PendingAncestryResult that describes which revisions to copy. This
|
|
1443 |
allows copying multiple heads at once. Mutually exclusive with
|
|
1444 |
revision_id.
|
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1445 |
"""
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
1446 |
if fetch_spec is not None and revision_id is not None: |
1447 |
raise AssertionError( |
|
1448 |
"fetch_spec and revision_id are mutually exclusive.") |
|
4145.1.1
by Robert Collins
Explicitly prevent fetching while the target repository is in a write group. |
1449 |
if self.is_in_write_group(): |
4145.1.3
by Robert Collins
NEWS conflicts. |
1450 |
raise errors.InternalBzrError( |
1451 |
"May not fetch while in a write group.") |
|
2592.3.115
by Robert Collins
Move same repository check up to Repository.fetch to allow all fetch implementations to benefit. |
1452 |
# fast path same-url fetch operations
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
1453 |
if self.has_same_location(source) and fetch_spec is None: |
2592.3.115
by Robert Collins
Move same repository check up to Repository.fetch to allow all fetch implementations to benefit. |
1454 |
# check that last_revision is in 'from' and then return a
|
1455 |
# no-operation.
|
|
1456 |
if (revision_id is not None and |
|
1457 |
not _mod_revision.is_null(revision_id)): |
|
1458 |
self.get_revision(revision_id) |
|
1459 |
return 0, [] |
|
3582.1.3
by Martin Pool
Repository.fetch no longer needs to translate NotImplementedErro to IncompatibleRepositories |
1460 |
# if there is no specific appropriate InterRepository, this will get
|
1461 |
# the InterRepository base class, which raises an
|
|
1462 |
# IncompatibleRepositories when asked to fetch.
|
|
2323.8.3
by Aaron Bentley
Reduce scope of try/except, update NEWS |
1463 |
inter = InterRepository.get(source, self) |
3582.1.3
by Martin Pool
Repository.fetch no longer needs to translate NotImplementedErro to IncompatibleRepositories |
1464 |
return inter.fetch(revision_id=revision_id, pb=pb, |
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
1465 |
find_ghosts=find_ghosts, fetch_spec=fetch_spec) |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1466 |
|
2520.4.54
by Aaron Bentley
Hang a create_bundle method off repository |
1467 |
def create_bundle(self, target, base, fileobj, format=None): |
1468 |
return serializer.write_bundle(self, target, base, fileobj, format) |
|
1469 |
||
2803.2.1
by Robert Collins
* CommitBuilder now advertises itself as requiring the root entry to be |
1470 |
def get_commit_builder(self, branch, parents, config, timestamp=None, |
1471 |
timezone=None, committer=None, revprops=None, |
|
1740.3.7
by Jelmer Vernooij
Move committer, log, revprops, timestamp and timezone to CommitBuilder. |
1472 |
revision_id=None): |
1473 |
"""Obtain a CommitBuilder for this repository. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1474 |
|
1740.3.7
by Jelmer Vernooij
Move committer, log, revprops, timestamp and timezone to CommitBuilder. |
1475 |
:param branch: Branch to commit to.
|
1476 |
:param parents: Revision ids of the parents of the new revision.
|
|
1477 |
:param config: Configuration to use.
|
|
1478 |
:param timestamp: Optional timestamp recorded for commit.
|
|
1479 |
:param timezone: Optional timezone for timestamp.
|
|
1480 |
:param committer: Optional committer to set for commit.
|
|
1481 |
:param revprops: Optional dictionary of revision properties.
|
|
1482 |
:param revision_id: Optional revision id.
|
|
1483 |
"""
|
|
2818.3.2
by Robert Collins
Review feedback. |
1484 |
result = self._commit_builder_class(self, parents, config, |
2592.3.135
by Robert Collins
Do not create many transient knit objects, saving 4% on commit. |
1485 |
timestamp, timezone, committer, revprops, revision_id) |
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1486 |
self.start_write_group() |
1487 |
return result |
|
1740.3.1
by Jelmer Vernooij
Introduce and use CommitBuilder objects. |
1488 |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1489 |
def unlock(self): |
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1490 |
if (self.control_files._lock_count == 1 and |
1491 |
self.control_files._lock_mode == 'w'): |
|
1492 |
if self._write_group is not None: |
|
2592.3.244
by Martin Pool
unlock while in a write group now aborts the write group, unlocks, and errors. |
1493 |
self.abort_write_group() |
1494 |
self.control_files.unlock() |
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1495 |
raise errors.BzrError( |
1496 |
'Must end write groups before releasing write locks.') |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1497 |
self.control_files.unlock() |
3882.6.23
by John Arbash Meinel
Change the XMLSerializer.read_inventory_from_string api. |
1498 |
if self.control_files._lock_count == 0: |
1499 |
self._inventory_entry_cache.clear() |
|
3221.12.13
by Robert Collins
Implement generic stacking rather than pack-internals based stacking. |
1500 |
for repo in self._fallback_repositories: |
1501 |
repo.unlock() |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1502 |
|
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
1503 |
@needs_read_lock
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
1504 |
def clone(self, a_bzrdir, revision_id=None): |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1505 |
"""Clone this repository into a_bzrdir using the current format. |
1506 |
||
1507 |
Currently no check is made that the format of this repository and
|
|
1508 |
the bzrdir format are compatible. FIXME RBC 20060201.
|
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
1509 |
|
1510 |
:return: The newly created destination repository.
|
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1511 |
"""
|
2440.1.1
by Martin Pool
Add new Repository.sprout, |
1512 |
# TODO: deprecate after 0.16; cloning this with all its settings is
|
1513 |
# probably not very useful -- mbp 20070423
|
|
1514 |
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared()) |
|
1515 |
self.copy_content_into(dest_repo, revision_id) |
|
1516 |
return dest_repo |
|
1517 |
||
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1518 |
def start_write_group(self): |
1519 |
"""Start a write group in the repository. |
|
1520 |
||
1521 |
Write groups are used by repositories which do not have a 1:1 mapping
|
|
1522 |
between file ids and backend store to manage the insertion of data from
|
|
1523 |
both fetch and commit operations.
|
|
1524 |
||
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1525 |
A write lock is required around the start_write_group/commit_write_group
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1526 |
for the support of lock-requiring repository formats.
|
2617.6.8
by Robert Collins
Review feedback and documentation. |
1527 |
|
1528 |
One can only insert data into a repository inside a write group.
|
|
1529 |
||
2617.6.6
by Robert Collins
Some review feedback. |
1530 |
:return: None.
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1531 |
"""
|
2592.3.188
by Robert Collins
Allow pack repositories to have multiple writers active at one time, for greater concurrency. |
1532 |
if not self.is_write_locked(): |
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1533 |
raise errors.NotWriteLocked(self) |
1534 |
if self._write_group: |
|
1535 |
raise errors.BzrError('already in a write group') |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1536 |
self._start_write_group() |
1537 |
# so we can detect unlock/relock - the write group is now entered.
|
|
2617.6.1
by Robert Collins
* New method on Repository - ``start_write_group``, ``end_write_group`` |
1538 |
self._write_group = self.get_transaction() |
1539 |
||
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1540 |
def _start_write_group(self): |
1541 |
"""Template method for per-repository write group startup. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1542 |
|
1543 |
This is called before the write group is considered to be
|
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
1544 |
entered.
|
1545 |
"""
|
|
1546 |
||
2440.1.1
by Martin Pool
Add new Repository.sprout, |
1547 |
@needs_read_lock
|
1548 |
def sprout(self, to_bzrdir, revision_id=None): |
|
1549 |
"""Create a descendent repository for new development. |
|
1550 |
||
1551 |
Unlike clone, this does not copy the settings of the repository.
|
|
1552 |
"""
|
|
1553 |
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False) |
|
1554 |
dest_repo.fetch(self, revision_id=revision_id) |
|
1555 |
return dest_repo |
|
1556 |
||
1557 |
def _create_sprouting_repo(self, a_bzrdir, shared): |
|
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1558 |
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__): |
1559 |
# use target default format.
|
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
1560 |
dest_repo = a_bzrdir.create_repository() |
1534.4.50
by Robert Collins
Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running. |
1561 |
else: |
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
1562 |
# Most control formats need the repository to be specifically
|
1563 |
# created, but on some old all-in-one formats it's not needed
|
|
1564 |
try: |
|
2440.1.1
by Martin Pool
Add new Repository.sprout, |
1565 |
dest_repo = self._format.initialize(a_bzrdir, shared=shared) |
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
1566 |
except errors.UninitializableFormat: |
1567 |
dest_repo = a_bzrdir.open_repository() |
|
1568 |
return dest_repo |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1569 |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
1570 |
def _get_sink(self): |
1571 |
"""Return a sink for streaming into this repository.""" |
|
1572 |
return StreamSink(self) |
|
1573 |
||
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
1574 |
def _get_source(self, to_format): |
1575 |
"""Return a source for streaming from this repository.""" |
|
1576 |
return StreamSource(self, to_format) |
|
1577 |
||
1563.2.22
by Robert Collins
Move responsibility for repository.has_revision into RevisionStore |
1578 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1579 |
def has_revision(self, revision_id): |
1563.2.22
by Robert Collins
Move responsibility for repository.has_revision into RevisionStore |
1580 |
"""True if this repository has a copy of the revision.""" |
3172.3.1
by Robert Collins
Repository has a new method ``has_revisions`` which signals the presence |
1581 |
return revision_id in self.has_revisions((revision_id,)) |
1582 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1583 |
@needs_read_lock
|
3172.3.1
by Robert Collins
Repository has a new method ``has_revisions`` which signals the presence |
1584 |
def has_revisions(self, revision_ids): |
1585 |
"""Probe to find out the presence of multiple revisions. |
|
1586 |
||
1587 |
:param revision_ids: An iterable of revision_ids.
|
|
1588 |
:return: A set of the revision_ids that were present.
|
|
1589 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1590 |
parent_map = self.revisions.get_parent_map( |
1591 |
[(rev_id,) for rev_id in revision_ids]) |
|
1592 |
result = set() |
|
1593 |
if _mod_revision.NULL_REVISION in revision_ids: |
|
1594 |
result.add(_mod_revision.NULL_REVISION) |
|
1595 |
result.update([key[0] for key in parent_map]) |
|
1596 |
return result |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1597 |
|
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
1598 |
@needs_read_lock
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1599 |
def get_revision(self, revision_id): |
1600 |
"""Return the Revision object for a named revision.""" |
|
1601 |
return self.get_revisions([revision_id])[0] |
|
1602 |
||
1603 |
@needs_read_lock
|
|
1570.1.13
by Robert Collins
Check for incorrect revision parentage in the weave during revision access. |
1604 |
def get_revision_reconcile(self, revision_id): |
1605 |
"""'reconcile' helper routine that allows access to a revision always. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1606 |
|
1570.1.13
by Robert Collins
Check for incorrect revision parentage in the weave during revision access. |
1607 |
This variant of get_revision does not cross check the weave graph
|
1608 |
against the revision one as get_revision does: but it should only
|
|
1609 |
be used by reconcile, or reconcile-alike commands that are correcting
|
|
1610 |
or testing the revision graph.
|
|
1611 |
"""
|
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1612 |
return self._get_revisions([revision_id])[0] |
2249.5.13
by John Arbash Meinel
Finish auditing Repository, and fix generate_ids to always generate utf8 ids. |
1613 |
|
1756.1.2
by Aaron Bentley
Show logs using get_revisions |
1614 |
@needs_read_lock
|
1615 |
def get_revisions(self, revision_ids): |
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
1616 |
"""Get many revisions at once.""" |
1617 |
return self._get_revisions(revision_ids) |
|
1618 |
||
1619 |
@needs_read_lock
|
|
1620 |
def _get_revisions(self, revision_ids): |
|
1621 |
"""Core work logic to get many revisions without sanity checks.""" |
|
1622 |
for rev_id in revision_ids: |
|
1623 |
if not rev_id or not isinstance(rev_id, basestring): |
|
1624 |
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1625 |
keys = [(key,) for key in revision_ids] |
1626 |
stream = self.revisions.get_record_stream(keys, 'unordered', True) |
|
1627 |
revs = {} |
|
1628 |
for record in stream: |
|
1629 |
if record.storage_kind == 'absent': |
|
1630 |
raise errors.NoSuchRevision(self, record.key[0]) |
|
1631 |
text = record.get_bytes_as('fulltext') |
|
1632 |
rev = self._serializer.read_revision_from_string(text) |
|
1633 |
revs[record.key[0]] = rev |
|
1634 |
return [revs[revid] for revid in revision_ids] |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1635 |
|
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
1636 |
@needs_read_lock
|
1563.2.29
by Robert Collins
Remove all but fetch references to repository.revision_store. |
1637 |
def get_revision_xml(self, revision_id): |
4202.3.1
by Andrew Bennetts
Don't use get_revision_xml when writing a bundle, instead get all the revisions together. |
1638 |
"""Return the XML representation of a revision. |
1639 |
||
1640 |
:param revision_id: Revision for which to return the XML.
|
|
1641 |
:return: XML string
|
|
1642 |
"""
|
|
1643 |
return self._serializer.write_revision_to_string( |
|
1644 |
self.get_revision(revision_id)) |
|
1563.2.29
by Robert Collins
Remove all but fetch references to repository.revision_store. |
1645 |
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1646 |
def get_deltas_for_revisions(self, revisions, specific_fileids=None): |
1756.3.19
by Aaron Bentley
Documentation and cleanups |
1647 |
"""Produce a generator of revision deltas. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1648 |
|
1756.3.19
by Aaron Bentley
Documentation and cleanups |
1649 |
Note that the input is a sequence of REVISIONS, not revision_ids.
|
1650 |
Trees will be held in memory until the generator exits.
|
|
1651 |
Each delta is relative to the revision's lefthand predecessor.
|
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1652 |
|
1653 |
:param specific_fileids: if not None, the result is filtered
|
|
1654 |
so that only those file-ids, their parents and their
|
|
1655 |
children are included.
|
|
1756.3.19
by Aaron Bentley
Documentation and cleanups |
1656 |
"""
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1657 |
# Get the revision-ids of interest
|
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
1658 |
required_trees = set() |
1659 |
for revision in revisions: |
|
1660 |
required_trees.add(revision.revision_id) |
|
1661 |
required_trees.update(revision.parent_ids[:1]) |
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1662 |
|
1663 |
# Get the matching filtered trees. Note that it's more
|
|
1664 |
# efficient to pass filtered trees to changes_from() rather
|
|
1665 |
# than doing the filtering afterwards. changes_from() could
|
|
1666 |
# arguably do the filtering itself but it's path-based, not
|
|
1667 |
# file-id based, so filtering before or afterwards is
|
|
1668 |
# currently easier.
|
|
1669 |
if specific_fileids is None: |
|
1670 |
trees = dict((t.get_revision_id(), t) for |
|
1671 |
t in self.revision_trees(required_trees)) |
|
1672 |
else: |
|
1673 |
trees = dict((t.get_revision_id(), t) for |
|
1674 |
t in self._filtered_revision_trees(required_trees, |
|
1675 |
specific_fileids)) |
|
1676 |
||
1677 |
# Calculate the deltas
|
|
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
1678 |
for revision in revisions: |
1679 |
if not revision.parent_ids: |
|
3668.5.1
by Jelmer Vernooij
Use NULL_REVISION rather than None for Repository.revision_tree(). |
1680 |
old_tree = self.revision_tree(_mod_revision.NULL_REVISION) |
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
1681 |
else: |
1682 |
old_tree = trees[revision.parent_ids[0]] |
|
1852.10.3
by Robert Collins
Remove all uses of compare_trees and replace with Tree.changes_from throughout bzrlib. |
1683 |
yield trees[revision.revision_id].changes_from(old_tree) |
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
1684 |
|
1756.3.19
by Aaron Bentley
Documentation and cleanups |
1685 |
@needs_read_lock
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1686 |
def get_revision_delta(self, revision_id, specific_fileids=None): |
1744.2.2
by Johan Rydberg
Add get_revision_delta to Repository; and make Branch.get_revision_delta use it. |
1687 |
"""Return the delta for one revision. |
1688 |
||
1689 |
The delta is relative to the left-hand predecessor of the
|
|
1690 |
revision.
|
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1691 |
|
1692 |
:param specific_fileids: if not None, the result is filtered
|
|
1693 |
so that only those file-ids, their parents and their
|
|
1694 |
children are included.
|
|
1744.2.2
by Johan Rydberg
Add get_revision_delta to Repository; and make Branch.get_revision_delta use it. |
1695 |
"""
|
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
1696 |
r = self.get_revision(revision_id) |
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
1697 |
return list(self.get_deltas_for_revisions([r], |
1698 |
specific_fileids=specific_fileids))[0] |
|
1744.2.2
by Johan Rydberg
Add get_revision_delta to Repository; and make Branch.get_revision_delta use it. |
1699 |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1700 |
@needs_write_lock
|
1701 |
def store_revision_signature(self, gpg_strategy, plaintext, revision_id): |
|
1563.2.29
by Robert Collins
Remove all but fetch references to repository.revision_store. |
1702 |
signature = gpg_strategy.sign(plaintext) |
2996.2.4
by Aaron Bentley
Rename function to add_signature_text |
1703 |
self.add_signature_text(revision_id, signature) |
2996.2.3
by Aaron Bentley
Add tests for install_revisions and add_signature |
1704 |
|
1705 |
@needs_write_lock
|
|
2996.2.4
by Aaron Bentley
Rename function to add_signature_text |
1706 |
def add_signature_text(self, revision_id, signature): |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1707 |
self.signatures.add_lines((revision_id,), (), |
1708 |
osutils.split_lines(signature)) |
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
1709 |
|
2988.1.2
by Robert Collins
New Repository API find_text_key_references for use by reconcile and check. |
1710 |
def find_text_key_references(self): |
1711 |
"""Find the text key references within the repository. |
|
1712 |
||
1713 |
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
|
|
1714 |
to whether they were referred to by the inventory of the
|
|
1715 |
revision_id that they contain. The inventory texts from all present
|
|
1716 |
revision ids are assessed to generate this report.
|
|
1717 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1718 |
revision_keys = self.revisions.keys() |
1719 |
w = self.inventories |
|
2988.1.2
by Robert Collins
New Repository API find_text_key_references for use by reconcile and check. |
1720 |
pb = ui.ui_factory.nested_progress_bar() |
1721 |
try: |
|
1722 |
return self._find_text_key_references_from_xml_inventory_lines( |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1723 |
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb)) |
2988.1.2
by Robert Collins
New Repository API find_text_key_references for use by reconcile and check. |
1724 |
finally: |
1725 |
pb.finished() |
|
1726 |
||
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1727 |
def _find_text_key_references_from_xml_inventory_lines(self, |
1728 |
line_iterator): |
|
1729 |
"""Core routine for extracting references to texts from inventories. |
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1730 |
|
1731 |
This performs the translation of xml lines to revision ids.
|
|
1732 |
||
2975.3.1
by Robert Collins
Change (without backwards compatibility) the |
1733 |
:param line_iterator: An iterator of lines, origin_version_id
|
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1734 |
:return: A dictionary mapping text keys ((fileid, revision_id) tuples)
|
1735 |
to whether they were referred to by the inventory of the
|
|
1736 |
revision_id that they contain. Note that if that revision_id was
|
|
1737 |
not part of the line_iterator's output then False will be given -
|
|
1738 |
even though it may actually refer to that key.
|
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1739 |
"""
|
2988.2.2
by Robert Collins
Review feedback. |
1740 |
if not self._serializer.support_altered_by_hack: |
1741 |
raise AssertionError( |
|
1742 |
"_find_text_key_references_from_xml_inventory_lines only "
|
|
1743 |
"supported for branches which store inventory as unnested xml"
|
|
1744 |
", not on %r" % self) |
|
1694.2.6
by Martin Pool
[merge] bzr.dev |
1745 |
result = {} |
1563.2.35
by Robert Collins
cleanup deprecation warnings and finish conversion so the inventory is knit based too. |
1746 |
|
1694.2.6
by Martin Pool
[merge] bzr.dev |
1747 |
# this code needs to read every new line in every inventory for the
|
1748 |
# inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1749 |
# not present in one of those inventories is unnecessary but not
|
1594.2.6
by Robert Collins
Introduce a api specifically for looking at lines in some versions of the inventory, for fileid_involved. |
1750 |
# harmful because we are filtering by the revision id marker in the
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
1751 |
# inventory lines : we only select file ids altered in one of those
|
1759.2.2
by Jelmer Vernooij
Revert some of my spelling fixes and fix some typos after review by Aaron. |
1752 |
# revisions. We don't need to see all lines in the inventory because
|
1594.2.6
by Robert Collins
Introduce a api specifically for looking at lines in some versions of the inventory, for fileid_involved. |
1753 |
# only those added in an inventory in rev X can contain a revision=X
|
1754 |
# line.
|
|
2163.2.3
by John Arbash Meinel
Change to local variables to save another 300ms |
1755 |
unescape_revid_cache = {} |
1756 |
unescape_fileid_cache = {} |
|
1757 |
||
2163.2.5
by John Arbash Meinel
Inline the cache lookup, and explain why |
1758 |
# jam 20061218 In a big fetch, this handles hundreds of thousands
|
1759 |
# of lines, so it has had a lot of inlining and optimizing done.
|
|
1760 |
# Sorry that it is a little bit messy.
|
|
2163.2.3
by John Arbash Meinel
Change to local variables to save another 300ms |
1761 |
# Move several functions to be local variables, since this is a long
|
1762 |
# running loop.
|
|
1763 |
search = self._file_ids_altered_regex.search |
|
2163.2.5
by John Arbash Meinel
Inline the cache lookup, and explain why |
1764 |
unescape = _unescape_xml |
2163.2.3
by John Arbash Meinel
Change to local variables to save another 300ms |
1765 |
setdefault = result.setdefault |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1766 |
for line, line_key in line_iterator: |
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1767 |
match = search(line) |
1768 |
if match is None: |
|
1769 |
continue
|
|
1770 |
# One call to match.group() returning multiple items is quite a
|
|
1771 |
# bit faster than 2 calls to match.group() each returning 1
|
|
1772 |
file_id, revision_id = match.group('file_id', 'revision_id') |
|
1773 |
||
1774 |
# Inlining the cache lookups helps a lot when you make 170,000
|
|
1775 |
# lines and 350k ids, versus 8.4 unique ids.
|
|
1776 |
# Using a cache helps in 2 ways:
|
|
1777 |
# 1) Avoids unnecessary decoding calls
|
|
1778 |
# 2) Re-uses cached strings, which helps in future set and
|
|
1779 |
# equality checks.
|
|
1780 |
# (2) is enough that removing encoding entirely along with
|
|
1781 |
# the cache (so we are using plain strings) results in no
|
|
1782 |
# performance improvement.
|
|
1783 |
try: |
|
1784 |
revision_id = unescape_revid_cache[revision_id] |
|
1785 |
except KeyError: |
|
1786 |
unescaped = unescape(revision_id) |
|
1787 |
unescape_revid_cache[revision_id] = unescaped |
|
1788 |
revision_id = unescaped |
|
1789 |
||
2988.2.2
by Robert Collins
Review feedback. |
1790 |
# Note that unconditionally unescaping means that we deserialise
|
1791 |
# every fileid, which for general 'pull' is not great, but we don't
|
|
1792 |
# really want to have some many fulltexts that this matters anyway.
|
|
1793 |
# RBC 20071114.
|
|
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1794 |
try: |
1795 |
file_id = unescape_fileid_cache[file_id] |
|
1796 |
except KeyError: |
|
1797 |
unescaped = unescape(file_id) |
|
1798 |
unescape_fileid_cache[file_id] = unescaped |
|
1799 |
file_id = unescaped |
|
1800 |
||
1801 |
key = (file_id, revision_id) |
|
1802 |
setdefault(key, False) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1803 |
if revision_id == line_key[-1]: |
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1804 |
result[key] = True |
1805 |
return result |
|
1806 |
||
4098.4.1
by Robert Collins
Handle inconsistent inventory data more gracefully at a small performance cost during fetch. |
1807 |
def _inventory_xml_lines_for_keys(self, keys): |
1808 |
"""Get a line iterator of the sort needed for findind references. |
|
1809 |
||
1810 |
Not relevant for non-xml inventory repositories.
|
|
1811 |
||
1812 |
Ghosts in revision_keys are ignored.
|
|
1813 |
||
1814 |
:param revision_keys: The revision keys for the inventories to inspect.
|
|
1815 |
:return: An iterator over (inventory line, revid) for the fulltexts of
|
|
1816 |
all of the xml inventories specified by revision_keys.
|
|
1817 |
"""
|
|
1818 |
stream = self.inventories.get_record_stream(keys, 'unordered', True) |
|
1819 |
for record in stream: |
|
1820 |
if record.storage_kind != 'absent': |
|
1821 |
chunks = record.get_bytes_as('chunked') |
|
1822 |
revid = record.key[-1] |
|
1823 |
lines = osutils.chunks_to_lines(chunks) |
|
1824 |
for line in lines: |
|
1825 |
yield line, revid |
|
1826 |
||
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1827 |
def _find_file_ids_from_xml_inventory_lines(self, line_iterator, |
1828 |
revision_ids): |
|
1829 |
"""Helper routine for fileids_altered_by_revision_ids. |
|
1830 |
||
1831 |
This performs the translation of xml lines to revision ids.
|
|
1832 |
||
1833 |
:param line_iterator: An iterator of lines, origin_version_id
|
|
1834 |
:param revision_ids: The revision ids to filter for. This should be a
|
|
1835 |
set or other type which supports efficient __contains__ lookups, as
|
|
1836 |
the revision id from each parsed line will be looked up in the
|
|
1837 |
revision_ids filter.
|
|
1838 |
:return: a dictionary mapping altered file-ids to an iterable of
|
|
1839 |
revision_ids. Each altered file-ids has the exact revision_ids that
|
|
1840 |
altered it listed explicitly.
|
|
1841 |
"""
|
|
4098.4.1
by Robert Collins
Handle inconsistent inventory data more gracefully at a small performance cost during fetch. |
1842 |
seen = set(self._find_text_key_references_from_xml_inventory_lines( |
1843 |
line_iterator).iterkeys()) |
|
1844 |
# Note that revision_ids are revision keys.
|
|
1845 |
parent_maps = self.revisions.get_parent_map(revision_ids) |
|
1846 |
parents = set() |
|
1847 |
map(parents.update, parent_maps.itervalues()) |
|
1848 |
parents.difference_update(revision_ids) |
|
1849 |
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines( |
|
1850 |
self._inventory_xml_lines_for_keys(parents))) |
|
1851 |
new_keys = seen - parent_seen |
|
2988.1.1
by Robert Collins
Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch. |
1852 |
result = {} |
1853 |
setdefault = result.setdefault |
|
4098.4.1
by Robert Collins
Handle inconsistent inventory data more gracefully at a small performance cost during fetch. |
1854 |
for key in new_keys: |
1855 |
setdefault(key[0], set()).add(key[-1]) |
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1856 |
return result |
1857 |
||
3422.1.1
by John Arbash Meinel
merge in bzr-1.5rc1, revert the transaction cache change |
1858 |
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None): |
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1859 |
"""Find the file ids and versions affected by revisions. |
1860 |
||
1861 |
:param revisions: an iterable containing revision ids.
|
|
3422.1.1
by John Arbash Meinel
merge in bzr-1.5rc1, revert the transaction cache change |
1862 |
:param _inv_weave: The inventory weave from this repository or None.
|
1863 |
If None, the inventory weave will be opened automatically.
|
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1864 |
:return: a dictionary mapping altered file-ids to an iterable of
|
1865 |
revision_ids. Each altered file-ids has the exact revision_ids that
|
|
1866 |
altered it listed explicitly.
|
|
1867 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1868 |
selected_keys = set((revid,) for revid in revision_ids) |
1869 |
w = _inv_weave or self.inventories |
|
2039.1.1
by Aaron Bentley
Clean up progress properly when interrupted during fetch (#54000) |
1870 |
pb = ui.ui_factory.nested_progress_bar() |
1871 |
try: |
|
2592.3.110
by Robert Collins
Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching. |
1872 |
return self._find_file_ids_from_xml_inventory_lines( |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1873 |
w.iter_lines_added_or_present_in_keys( |
1874 |
selected_keys, pb=pb), |
|
1875 |
selected_keys) |
|
2039.1.1
by Aaron Bentley
Clean up progress properly when interrupted during fetch (#54000) |
1876 |
finally: |
1877 |
pb.finished() |
|
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
1878 |
|
2708.1.7
by Aaron Bentley
Rename extract_files_bytes to iter_files_bytes |
1879 |
def iter_files_bytes(self, desired_files): |
2708.1.9
by Aaron Bentley
Clean-up docs and imports |
1880 |
"""Iterate through file versions. |
1881 |
||
2708.1.10
by Aaron Bentley
Update docstrings |
1882 |
Files will not necessarily be returned in the order they occur in
|
1883 |
desired_files. No specific order is guaranteed.
|
|
1884 |
||
2708.1.9
by Aaron Bentley
Clean-up docs and imports |
1885 |
Yields pairs of identifier, bytes_iterator. identifier is an opaque
|
2708.1.10
by Aaron Bentley
Update docstrings |
1886 |
value supplied by the caller as part of desired_files. It should
|
1887 |
uniquely identify the file version in the caller's context. (Examples:
|
|
1888 |
an index number or a TreeTransform trans_id.)
|
|
1889 |
||
1890 |
bytes_iterator is an iterable of bytestrings for the file. The
|
|
1891 |
kind of iterable and length of the bytestrings are unspecified, but for
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1892 |
this implementation, it is a list of bytes produced by
|
1893 |
VersionedFile.get_record_stream().
|
|
2708.1.10
by Aaron Bentley
Update docstrings |
1894 |
|
2708.1.9
by Aaron Bentley
Clean-up docs and imports |
1895 |
:param desired_files: a list of (file_id, revision_id, identifier)
|
2708.1.10
by Aaron Bentley
Update docstrings |
1896 |
triples
|
2708.1.9
by Aaron Bentley
Clean-up docs and imports |
1897 |
"""
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1898 |
text_keys = {} |
2708.1.3
by Aaron Bentley
Implement extract_files_bytes on Repository |
1899 |
for file_id, revision_id, callable_data in desired_files: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
1900 |
text_keys[(file_id, revision_id)] = callable_data |
1901 |
for record in self.texts.get_record_stream(text_keys, 'unordered', True): |
|
1902 |
if record.storage_kind == 'absent': |
|
1903 |
raise errors.RevisionNotPresent(record.key, self) |
|
4202.1.1
by John Arbash Meinel
Update Repository.iter_files_bytes() to return an iterable of bytestrings. |
1904 |
yield text_keys[record.key], record.get_bytes_as('chunked') |
2708.1.3
by Aaron Bentley
Implement extract_files_bytes on Repository |
1905 |
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
1906 |
def _generate_text_key_index(self, text_key_references=None, |
1907 |
ancestors=None): |
|
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1908 |
"""Generate a new text key index for the repository. |
1909 |
||
1910 |
This is an expensive function that will take considerable time to run.
|
|
1911 |
||
1912 |
:return: A dict mapping text keys ((file_id, revision_id) tuples) to a
|
|
1913 |
list of parents, also text keys. When a given key has no parents,
|
|
1914 |
the parents list will be [NULL_REVISION].
|
|
1915 |
"""
|
|
1916 |
# All revisions, to find inventory parents.
|
|
3063.2.1
by Robert Collins
Solve reconciling erroring when multiple portions of a single delta chain are being reinserted. |
1917 |
if ancestors is None: |
3287.6.1
by Robert Collins
* ``VersionedFile.get_graph`` is deprecated, with no replacement method. |
1918 |
graph = self.get_graph() |
1919 |
ancestors = graph.get_parent_map(self.all_revision_ids()) |
|
2951.2.9
by Robert Collins
* ``pack-0.92`` repositories can now be reconciled. |
1920 |
if text_key_references is None: |
1921 |
text_key_references = self.find_text_key_references() |
|
2988.3.1
by Robert Collins
Handle the progress bar in _generate_text_key_index correctly. |
1922 |
pb = ui.ui_factory.nested_progress_bar() |
1923 |
try: |
|
1924 |
return self._do_generate_text_key_index(ancestors, |
|
1925 |
text_key_references, pb) |
|
1926 |
finally: |
|
1927 |
pb.finished() |
|
1928 |
||
1929 |
def _do_generate_text_key_index(self, ancestors, text_key_references, pb): |
|
1930 |
"""Helper for _generate_text_key_index to avoid deep nesting.""" |
|
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1931 |
revision_order = tsort.topo_sort(ancestors) |
1932 |
invalid_keys = set() |
|
1933 |
revision_keys = {} |
|
1934 |
for revision_id in revision_order: |
|
1935 |
revision_keys[revision_id] = set() |
|
1936 |
text_count = len(text_key_references) |
|
1937 |
# a cache of the text keys to allow reuse; costs a dict of all the
|
|
1938 |
# keys, but saves a 2-tuple for every child of a given key.
|
|
1939 |
text_key_cache = {} |
|
1940 |
for text_key, valid in text_key_references.iteritems(): |
|
1941 |
if not valid: |
|
1942 |
invalid_keys.add(text_key) |
|
1943 |
else: |
|
1944 |
revision_keys[text_key[1]].add(text_key) |
|
1945 |
text_key_cache[text_key] = text_key |
|
1946 |
del text_key_references |
|
1947 |
text_index = {} |
|
1948 |
text_graph = graph.Graph(graph.DictParentsProvider(text_index)) |
|
1949 |
NULL_REVISION = _mod_revision.NULL_REVISION |
|
2988.1.5
by Robert Collins
Use a LRU cache when generating the text index to reduce inventory deserialisations. |
1950 |
# Set a cache with a size of 10 - this suffices for bzr.dev but may be
|
1951 |
# too small for large or very branchy trees. However, for 55K path
|
|
1952 |
# trees, it would be easy to use too much memory trivially. Ideally we
|
|
1953 |
# could gauge this by looking at available real memory etc, but this is
|
|
1954 |
# always a tricky proposition.
|
|
1955 |
inventory_cache = lru_cache.LRUCache(10) |
|
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1956 |
batch_size = 10 # should be ~150MB on a 55K path tree |
1957 |
batch_count = len(revision_order) / batch_size + 1 |
|
1958 |
processed_texts = 0 |
|
4103.3.2
by Martin Pool
Remove trailing punctuation from progress messages |
1959 |
pb.update("Calculating text parents", processed_texts, text_count) |
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1960 |
for offset in xrange(batch_count): |
1961 |
to_query = revision_order[offset * batch_size:(offset + 1) * |
|
1962 |
batch_size] |
|
1963 |
if not to_query: |
|
1964 |
break
|
|
1965 |
for rev_tree in self.revision_trees(to_query): |
|
1966 |
revision_id = rev_tree.get_revision_id() |
|
1967 |
parent_ids = ancestors[revision_id] |
|
1968 |
for text_key in revision_keys[revision_id]: |
|
4103.3.2
by Martin Pool
Remove trailing punctuation from progress messages |
1969 |
pb.update("Calculating text parents", processed_texts) |
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1970 |
processed_texts += 1 |
1971 |
candidate_parents = [] |
|
1972 |
for parent_id in parent_ids: |
|
1973 |
parent_text_key = (text_key[0], parent_id) |
|
1974 |
try: |
|
1975 |
check_parent = parent_text_key not in \ |
|
1976 |
revision_keys[parent_id] |
|
1977 |
except KeyError: |
|
1978 |
# the parent parent_id is a ghost:
|
|
1979 |
check_parent = False |
|
1980 |
# truncate the derived graph against this ghost.
|
|
1981 |
parent_text_key = None |
|
1982 |
if check_parent: |
|
1983 |
# look at the parent commit details inventories to
|
|
1984 |
# determine possible candidates in the per file graph.
|
|
1985 |
# TODO: cache here.
|
|
2988.1.5
by Robert Collins
Use a LRU cache when generating the text index to reduce inventory deserialisations. |
1986 |
try: |
1987 |
inv = inventory_cache[parent_id] |
|
1988 |
except KeyError: |
|
1989 |
inv = self.revision_tree(parent_id).inventory |
|
1990 |
inventory_cache[parent_id] = inv |
|
1991 |
parent_entry = inv._byid.get(text_key[0], None) |
|
2988.1.3
by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check. |
1992 |
if parent_entry is not None: |
1993 |
parent_text_key = ( |
|
1994 |
text_key[0], parent_entry.revision) |
|
1995 |
else: |
|
1996 |
parent_text_key = None |
|
1997 |
if parent_text_key is not None: |
|
1998 |
candidate_parents.append( |
|
1999 |
text_key_cache[parent_text_key]) |
|
2000 |
parent_heads = text_graph.heads(candidate_parents) |
|
2001 |
new_parents = list(parent_heads) |
|
2002 |
new_parents.sort(key=lambda x:candidate_parents.index(x)) |
|
2003 |
if new_parents == []: |
|
2004 |
new_parents = [NULL_REVISION] |
|
2005 |
text_index[text_key] = new_parents |
|
2006 |
||
2007 |
for text_key in invalid_keys: |
|
2008 |
text_index[text_key] = [NULL_REVISION] |
|
2009 |
return text_index |
|
2010 |
||
2668.2.8
by Andrew Bennetts
Rename get_data_to_fetch_for_revision_ids as item_keys_introduced_by. |
2011 |
def item_keys_introduced_by(self, revision_ids, _files_pb=None): |
2012 |
"""Get an iterable listing the keys of all the data introduced by a set |
|
2013 |
of revision IDs.
|
|
2014 |
||
2015 |
The keys will be ordered so that the corresponding items can be safely
|
|
2016 |
fetched and inserted in that order.
|
|
2017 |
||
2018 |
:returns: An iterable producing tuples of (knit-kind, file-id,
|
|
2019 |
versions). knit-kind is one of 'file', 'inventory', 'signatures',
|
|
2020 |
'revisions'. file-id is None unless knit-kind is 'file'.
|
|
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2021 |
"""
|
2022 |
# XXX: it's a bit weird to control the inventory weave caching in this
|
|
2535.3.7
by Andrew Bennetts
Remove now unused _fetch_weave_texts, make progress reporting closer to how it was before I refactored __fetch. |
2023 |
# generator. Ideally the caching would be done in fetch.py I think. Or
|
2024 |
# maybe this generator should explicitly have the contract that it
|
|
2025 |
# should not be iterated until the previously yielded item has been
|
|
2026 |
# processed?
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2027 |
inv_w = self.inventories |
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2028 |
|
2029 |
# file ids that changed
|
|
3422.1.1
by John Arbash Meinel
merge in bzr-1.5rc1, revert the transaction cache change |
2030 |
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w) |
2535.3.8
by Andrew Bennetts
Unbreak progress reporting. |
2031 |
count = 0 |
2032 |
num_file_ids = len(file_ids) |
|
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2033 |
for file_id, altered_versions in file_ids.iteritems(): |
2668.2.8
by Andrew Bennetts
Rename get_data_to_fetch_for_revision_ids as item_keys_introduced_by. |
2034 |
if _files_pb is not None: |
2035 |
_files_pb.update("fetch texts", count, num_file_ids) |
|
2535.3.8
by Andrew Bennetts
Unbreak progress reporting. |
2036 |
count += 1 |
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2037 |
yield ("file", file_id, altered_versions) |
2535.3.9
by Andrew Bennetts
More comments. |
2038 |
# We're done with the files_pb. Note that it finished by the caller,
|
2039 |
# just as it was created by the caller.
|
|
2668.2.8
by Andrew Bennetts
Rename get_data_to_fetch_for_revision_ids as item_keys_introduced_by. |
2040 |
del _files_pb |
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2041 |
|
2042 |
# inventory
|
|
2043 |
yield ("inventory", None, revision_ids) |
|
2044 |
||
2045 |
# signatures
|
|
3825.5.2
by Andrew Bennetts
Ensure that item_keys_introduced_by returns the |
2046 |
# XXX: Note ATM no callers actually pay attention to this return
|
2047 |
# instead they just use the list of revision ids and ignore
|
|
2048 |
# missing sigs. Consider removing this work entirely
|
|
2049 |
revisions_with_signatures = set(self.signatures.get_parent_map( |
|
2050 |
[(r,) for r in revision_ids])) |
|
3825.5.1
by Andrew Bennetts
Improve determining signatures to transfer in item_keys_introduced_by. |
2051 |
revisions_with_signatures = set( |
3825.5.2
by Andrew Bennetts
Ensure that item_keys_introduced_by returns the |
2052 |
[r for (r,) in revisions_with_signatures]) |
3825.5.1
by Andrew Bennetts
Improve determining signatures to transfer in item_keys_introduced_by. |
2053 |
revisions_with_signatures.intersection_update(revision_ids) |
2535.3.25
by Andrew Bennetts
Fetch signatures too. |
2054 |
yield ("signatures", None, revisions_with_signatures) |
2535.3.6
by Andrew Bennetts
Move some "what repo data to fetch logic" from RepoFetcher to Repository. |
2055 |
|
2056 |
# revisions
|
|
2057 |
yield ("revisions", None, revision_ids) |
|
2058 |
||
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
2059 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2060 |
def get_inventory(self, revision_id): |
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
2061 |
"""Get Inventory object by revision id.""" |
2062 |
return self.iter_inventories([revision_id]).next() |
|
2063 |
||
2064 |
def iter_inventories(self, revision_ids): |
|
2065 |
"""Get many inventories by revision_ids. |
|
2066 |
||
2067 |
This will buffer some or all of the texts used in constructing the
|
|
2068 |
inventories in memory, but will only parse a single inventory at a
|
|
2069 |
time.
|
|
2070 |
||
4202.2.1
by Ian Clatworthy
get directory logging working again |
2071 |
:param revision_ids: The expected revision ids of the inventories.
|
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
2072 |
:return: An iterator of inventories.
|
2073 |
"""
|
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
2074 |
if ((None in revision_ids) |
2075 |
or (_mod_revision.NULL_REVISION in revision_ids)): |
|
2076 |
raise ValueError('cannot get null revision inventory') |
|
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
2077 |
return self._iter_inventories(revision_ids) |
2078 |
||
2079 |
def _iter_inventories(self, revision_ids): |
|
2080 |
"""single-document based inventory iteration.""" |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2081 |
for text, revision_id in self._iter_inventory_xmls(revision_ids): |
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
2082 |
yield self.deserialise_inventory(revision_id, text) |
1740.2.3
by Aaron Bentley
Only reserialize the working tree basis inventory when needed. |
2083 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2084 |
def _iter_inventory_xmls(self, revision_ids): |
2085 |
keys = [(revision_id,) for revision_id in revision_ids] |
|
2086 |
stream = self.inventories.get_record_stream(keys, 'unordered', True) |
|
3890.2.3
by John Arbash Meinel
Use the 'chunked' interface to keep memory consumption minimal during revision_trees() |
2087 |
text_chunks = {} |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2088 |
for record in stream: |
2089 |
if record.storage_kind != 'absent': |
|
3890.2.3
by John Arbash Meinel
Use the 'chunked' interface to keep memory consumption minimal during revision_trees() |
2090 |
text_chunks[record.key] = record.get_bytes_as('chunked') |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2091 |
else: |
2092 |
raise errors.NoSuchRevision(self, record.key) |
|
2093 |
for key in keys: |
|
3890.2.3
by John Arbash Meinel
Use the 'chunked' interface to keep memory consumption minimal during revision_trees() |
2094 |
chunks = text_chunks.pop(key) |
2095 |
yield ''.join(chunks), key[-1] |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2096 |
|
1740.2.3
by Aaron Bentley
Only reserialize the working tree basis inventory when needed. |
2097 |
def deserialise_inventory(self, revision_id, xml): |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2098 |
"""Transform the xml into an inventory object. |
1740.2.3
by Aaron Bentley
Only reserialize the working tree basis inventory when needed. |
2099 |
|
2100 |
:param revision_id: The expected revision id of the inventory.
|
|
2101 |
:param xml: A serialised inventory.
|
|
2102 |
"""
|
|
3882.6.23
by John Arbash Meinel
Change the XMLSerializer.read_inventory_from_string api. |
2103 |
result = self._serializer.read_inventory_from_string(xml, revision_id, |
2104 |
entry_cache=self._inventory_entry_cache) |
|
3169.2.3
by Robert Collins
Use an if, not an assert, as we test with -O. |
2105 |
if result.revision_id != revision_id: |
2106 |
raise AssertionError('revision id mismatch %s != %s' % ( |
|
2107 |
result.revision_id, revision_id)) |
|
3169.2.2
by Robert Collins
Add a test to Repository.deserialise_inventory that the resulting ivnentory is the one asked for, and update relevant tests. Also tweak the model 1 to 2 regenerate inventories logic to use the revision trees parent marker which is more accurate in some cases. |
2108 |
return result |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2109 |
|
1910.2.22
by Aaron Bentley
Make commits preserve root entry data |
2110 |
def serialise_inventory(self, inv): |
1910.2.48
by Aaron Bentley
Update from review comments |
2111 |
return self._serializer.write_inventory_to_string(inv) |
1910.2.22
by Aaron Bentley
Make commits preserve root entry data |
2112 |
|
2817.2.1
by Robert Collins
* Inventory serialisation no longer double-sha's the content. |
2113 |
def _serialise_inventory_to_lines(self, inv): |
2114 |
return self._serializer.write_inventory_to_lines(inv) |
|
2115 |
||
2520.4.113
by Aaron Bentley
Avoid peeking at Repository._serializer |
2116 |
def get_serializer_format(self): |
2117 |
return self._serializer.format_num |
|
2118 |
||
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
2119 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2120 |
def get_inventory_xml(self, revision_id): |
2121 |
"""Get inventory XML as a file object.""" |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2122 |
texts = self._iter_inventory_xmls([revision_id]) |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2123 |
try: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2124 |
text, revision_id = texts.next() |
2125 |
except StopIteration: |
|
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
2126 |
raise errors.HistoryMissing(self, 'inventory', revision_id) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2127 |
return text |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2128 |
|
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
2129 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2130 |
def get_inventory_sha1(self, revision_id): |
2131 |
"""Return the sha1 hash of the inventory entry |
|
2132 |
"""
|
|
2133 |
return self.get_revision(revision_id).inventory_sha1 |
|
2134 |
||
2230.3.54
by Aaron Bentley
Move reverse history iteration to repository |
2135 |
def iter_reverse_revision_history(self, revision_id): |
2136 |
"""Iterate backwards through revision ids in the lefthand history |
|
2137 |
||
2138 |
:param revision_id: The revision id to start with. All its lefthand
|
|
2139 |
ancestors will be traversed.
|
|
2140 |
"""
|
|
3287.5.2
by Robert Collins
Deprecate VersionedFile.get_parents, breaking pulling from a ghost containing knit or pack repository to weaves, which improves correctness and allows simplification of core code. |
2141 |
graph = self.get_graph() |
2230.3.54
by Aaron Bentley
Move reverse history iteration to repository |
2142 |
next_id = revision_id |
2143 |
while True: |
|
3287.5.2
by Robert Collins
Deprecate VersionedFile.get_parents, breaking pulling from a ghost containing knit or pack repository to weaves, which improves correctness and allows simplification of core code. |
2144 |
if next_id in (None, _mod_revision.NULL_REVISION): |
2145 |
return
|
|
2230.3.54
by Aaron Bentley
Move reverse history iteration to repository |
2146 |
yield next_id |
3287.5.10
by Robert Collins
Note iter_reverse_revision_history exception decision. |
2147 |
# Note: The following line may raise KeyError in the event of
|
2148 |
# truncated history. We decided not to have a try:except:raise
|
|
2149 |
# RevisionNotPresent here until we see a use for it, because of the
|
|
2150 |
# cost in an inner loop that is by its very nature O(history).
|
|
2151 |
# Robert Collins 20080326
|
|
3287.5.2
by Robert Collins
Deprecate VersionedFile.get_parents, breaking pulling from a ghost containing knit or pack repository to weaves, which improves correctness and allows simplification of core code. |
2152 |
parents = graph.get_parent_map([next_id])[next_id] |
2230.3.54
by Aaron Bentley
Move reverse history iteration to repository |
2153 |
if len(parents) == 0: |
2154 |
return
|
|
2155 |
else: |
|
2156 |
next_id = parents[0] |
|
2157 |
||
1594.2.3
by Robert Collins
bugfix revision.MultipleRevisionSources.get_revision_graph to integrate ghosts between sources. [slow on weaves, fast on knits. |
2158 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2159 |
def get_revision_inventory(self, revision_id): |
2160 |
"""Return inventory of a past revision.""" |
|
2161 |
# TODO: Unify this with get_inventory()
|
|
2162 |
# bzr 0.0.6 and later imposes the constraint that the inventory_id
|
|
2163 |
# must be the same as its revision, so this is trivial.
|
|
1534.4.28
by Robert Collins
first cut at merge from integration. |
2164 |
if revision_id is None: |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2165 |
# This does not make sense: if there is no revision,
|
2166 |
# then it is the current tree inventory surely ?!
|
|
2167 |
# and thus get_root_id() is something that looks at the last
|
|
2168 |
# commit on the branch, and the get_root_id is an inventory check.
|
|
2169 |
raise NotImplementedError |
|
2170 |
# return Inventory(self.get_root_id())
|
|
2171 |
else: |
|
2172 |
return self.get_inventory(revision_id) |
|
2173 |
||
1534.6.3
by Robert Collins
find_repository sufficiently robust. |
2174 |
def is_shared(self): |
2175 |
"""Return True if this repository is flagged as a shared repository.""" |
|
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2176 |
raise NotImplementedError(self.is_shared) |
1534.6.3
by Robert Collins
find_repository sufficiently robust. |
2177 |
|
1594.2.7
by Robert Collins
Add versionedfile.fix_parents api for correcting data post hoc. |
2178 |
@needs_write_lock
|
1692.1.1
by Robert Collins
* Repository.reconcile now takes a thorough keyword parameter to allow |
2179 |
def reconcile(self, other=None, thorough=False): |
1594.2.7
by Robert Collins
Add versionedfile.fix_parents api for correcting data post hoc. |
2180 |
"""Reconcile this repository.""" |
2181 |
from bzrlib.reconcile import RepoReconciler |
|
1692.1.1
by Robert Collins
* Repository.reconcile now takes a thorough keyword parameter to allow |
2182 |
reconciler = RepoReconciler(self, thorough=thorough) |
1594.2.7
by Robert Collins
Add versionedfile.fix_parents api for correcting data post hoc. |
2183 |
reconciler.reconcile() |
2184 |
return reconciler |
|
2440.1.1
by Martin Pool
Add new Repository.sprout, |
2185 |
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
2186 |
def _refresh_data(self): |
2187 |
"""Helper called from lock_* to ensure coherency with disk. |
|
2188 |
||
2189 |
The default implementation does nothing; it is however possible
|
|
2190 |
for repositories to maintain loaded indices across multiple locks
|
|
2191 |
by checking inside their implementation of this method to see
|
|
2192 |
whether their indices are still valid. This depends of course on
|
|
4145.1.2
by Robert Collins
Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances. |
2193 |
the disk format being validatable in this manner. This method is
|
2194 |
also called by the refresh_data() public interface to cause a refresh
|
|
2195 |
to occur while in a write lock so that data inserted by a smart server
|
|
2196 |
push operation is visible on the client's instance of the physical
|
|
2197 |
repository.
|
|
2617.6.2
by Robert Collins
Add abort_write_group and wire write_groups into fetch and commit. |
2198 |
"""
|
2199 |
||
1534.6.3
by Robert Collins
find_repository sufficiently robust. |
2200 |
@needs_read_lock
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2201 |
def revision_tree(self, revision_id): |
2202 |
"""Return Tree for a revision on this branch. |
|
2203 |
||
3668.5.2
by Jelmer Vernooij
Fix docstring. |
2204 |
`revision_id` may be NULL_REVISION for the empty tree revision.
|
1852.5.1
by Robert Collins
Deprecate EmptyTree in favour of using Repository.revision_tree. |
2205 |
"""
|
3668.5.1
by Jelmer Vernooij
Use NULL_REVISION rather than None for Repository.revision_tree(). |
2206 |
revision_id = _mod_revision.ensure_null(revision_id) |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2207 |
# TODO: refactor this to use an existing revision object
|
2208 |
# so we don't need to read it in twice.
|
|
3668.5.1
by Jelmer Vernooij
Use NULL_REVISION rather than None for Repository.revision_tree(). |
2209 |
if revision_id == _mod_revision.NULL_REVISION: |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2210 |
return RevisionTree(self, Inventory(root_id=None), |
1731.1.61
by Aaron Bentley
Merge bzr.dev |
2211 |
_mod_revision.NULL_REVISION) |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2212 |
else: |
2213 |
inv = self.get_revision_inventory(revision_id) |
|
1185.65.17
by Robert Collins
Merge from integration, mode-changes are broken. |
2214 |
return RevisionTree(self, inv, revision_id) |
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2215 |
|
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
2216 |
def revision_trees(self, revision_ids): |
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
2217 |
"""Return Trees for revisions in this repository. |
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
2218 |
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
2219 |
:param revision_ids: a sequence of revision-ids;
|
2220 |
a revision-id may not be None or 'null:'
|
|
2221 |
"""
|
|
3169.2.1
by Robert Collins
New method ``iter_inventories`` on Repository for access to many |
2222 |
inventories = self.iter_inventories(revision_ids) |
2223 |
for inv in inventories: |
|
2224 |
yield RevisionTree(self, inv, inv.revision_id) |
|
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
2225 |
|
4137.3.2
by Ian Clatworthy
Repository.get_deltas_for_revisions() now supports file-id filtering |
2226 |
def _filtered_revision_trees(self, revision_ids, file_ids): |
2227 |
"""Return Tree for a revision on this branch with only some files. |
|
2228 |
||
2229 |
:param revision_ids: a sequence of revision-ids;
|
|
2230 |
a revision-id may not be None or 'null:'
|
|
2231 |
:param file_ids: if not None, the result is filtered
|
|
2232 |
so that only those file-ids, their parents and their
|
|
2233 |
children are included.
|
|
2234 |
"""
|
|
2235 |
inventories = self.iter_inventories(revision_ids) |
|
2236 |
for inv in inventories: |
|
2237 |
# Should we introduce a FilteredRevisionTree class rather
|
|
2238 |
# than pre-filter the inventory here?
|
|
2239 |
filtered_inv = inv.filter(file_ids) |
|
2240 |
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id) |
|
2241 |
||
1756.3.3
by Aaron Bentley
More refactoring, introduce revision_trees. |
2242 |
@needs_read_lock
|
2530.1.1
by Aaron Bentley
Make topological sorting optional for get_ancestry |
2243 |
def get_ancestry(self, revision_id, topo_sorted=True): |
1185.66.2
by Aaron Bentley
Moved get_ancestry to RevisionStorage |
2244 |
"""Return a list of revision-ids integrated by a revision. |
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
2245 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2246 |
The first element of the list is always None, indicating the origin
|
2247 |
revision. This might change when we have history horizons, or
|
|
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
2248 |
perhaps we should have a new API.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2249 |
|
1185.66.2
by Aaron Bentley
Moved get_ancestry to RevisionStorage |
2250 |
This is topologically sorted.
|
2251 |
"""
|
|
2598.5.1
by Aaron Bentley
Start eliminating the use of None to indicate null revision |
2252 |
if _mod_revision.is_null(revision_id): |
1185.66.2
by Aaron Bentley
Moved get_ancestry to RevisionStorage |
2253 |
return [None] |
1534.4.41
by Robert Collins
Branch now uses BzrDir reasonably sanely. |
2254 |
if not self.has_revision(revision_id): |
2255 |
raise errors.NoSuchRevision(self, revision_id) |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2256 |
graph = self.get_graph() |
2257 |
keys = set() |
|
2258 |
search = graph._make_breadth_first_searcher([revision_id]) |
|
2259 |
while True: |
|
2260 |
try: |
|
2261 |
found, ghosts = search.next_with_ghosts() |
|
2262 |
except StopIteration: |
|
2263 |
break
|
|
2264 |
keys.update(found) |
|
2265 |
if _mod_revision.NULL_REVISION in keys: |
|
2266 |
keys.remove(_mod_revision.NULL_REVISION) |
|
2267 |
if topo_sorted: |
|
2268 |
parent_map = graph.get_parent_map(keys) |
|
2269 |
keys = tsort.topo_sort(parent_map) |
|
2270 |
return [None] + list(keys) |
|
1185.66.2
by Aaron Bentley
Moved get_ancestry to RevisionStorage |
2271 |
|
2604.2.1
by Robert Collins
(robertc) Introduce a pack command. |
2272 |
def pack(self): |
2273 |
"""Compress the data within the repository. |
|
2274 |
||
2275 |
This operation only makes sense for some repository types. For other
|
|
2276 |
types it should be a no-op that just returns.
|
|
2277 |
||
2278 |
This stub method does not require a lock, but subclasses should use
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2279 |
@needs_write_lock as this is a long running call its reasonable to
|
2604.2.1
by Robert Collins
(robertc) Introduce a pack command. |
2280 |
implicitly lock for the user.
|
2281 |
"""
|
|
2282 |
||
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2283 |
def get_transaction(self): |
2284 |
return self.control_files.get_transaction() |
|
2285 |
||
3517.4.17
by Martin Pool
Redo base Repository.get_parent_map to use .revisions graph |
2286 |
def get_parent_map(self, revision_ids): |
3099.3.1
by John Arbash Meinel
Implement get_parent_map for ParentProviders |
2287 |
"""See graph._StackedParentsProvider.get_parent_map""" |
3517.4.17
by Martin Pool
Redo base Repository.get_parent_map to use .revisions graph |
2288 |
# revisions index works in keys; this just works in revisions
|
2289 |
# therefore wrap and unwrap
|
|
2290 |
query_keys = [] |
|
2291 |
result = {} |
|
2292 |
for revision_id in revision_ids: |
|
2293 |
if revision_id == _mod_revision.NULL_REVISION: |
|
2294 |
result[revision_id] = () |
|
2295 |
elif revision_id is None: |
|
3373.5.2
by John Arbash Meinel
Add repository_implementation tests for get_parent_map |
2296 |
raise ValueError('get_parent_map(None) is not valid') |
3517.4.17
by Martin Pool
Redo base Repository.get_parent_map to use .revisions graph |
2297 |
else: |
2298 |
query_keys.append((revision_id ,)) |
|
2299 |
for ((revision_id,), parent_keys) in \ |
|
2300 |
self.revisions.get_parent_map(query_keys).iteritems(): |
|
2301 |
if parent_keys: |
|
2302 |
result[revision_id] = tuple(parent_revid |
|
2303 |
for (parent_revid,) in parent_keys) |
|
2304 |
else: |
|
2305 |
result[revision_id] = (_mod_revision.NULL_REVISION,) |
|
2306 |
return result |
|
2490.2.13
by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept |
2307 |
|
2308 |
def _make_parents_provider(self): |
|
2309 |
return self |
|
2310 |
||
2490.2.21
by Aaron Bentley
Rename graph to deprecated_graph |
2311 |
def get_graph(self, other_repository=None): |
2490.2.13
by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept |
2312 |
"""Return the graph walker for this repository format""" |
2313 |
parents_provider = self._make_parents_provider() |
|
2490.2.14
by Aaron Bentley
Avoid StackedParentsProvider when underlying repos match |
2314 |
if (other_repository is not None and |
3211.3.1
by Jelmer Vernooij
Use convenience function to check whether two repository handles are referring to the same repository. |
2315 |
not self.has_same_location(other_repository)): |
2490.2.21
by Aaron Bentley
Rename graph to deprecated_graph |
2316 |
parents_provider = graph._StackedParentsProvider( |
2490.2.13
by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept |
2317 |
[parents_provider, other_repository._make_parents_provider()]) |
2490.2.22
by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher |
2318 |
return graph.Graph(parents_provider) |
2490.2.13
by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept |
2319 |
|
4145.2.1
by Ian Clatworthy
faster check |
2320 |
def _get_versioned_file_checker(self, text_key_references=None): |
2321 |
"""Return an object suitable for checking versioned files. |
|
2322 |
|
|
2323 |
:param text_key_references: if non-None, an already built
|
|
2324 |
dictionary mapping text keys ((fileid, revision_id) tuples)
|
|
2325 |
to whether they were referred to by the inventory of the
|
|
2326 |
revision_id that they contain. If None, this will be
|
|
2327 |
calculated.
|
|
2328 |
"""
|
|
2329 |
return _VersionedFileChecker(self, |
|
2330 |
text_key_references=text_key_references) |
|
2745.6.47
by Andrew Bennetts
Move check_parents out of VersionedFile. |
2331 |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
2332 |
def revision_ids_to_search_result(self, result_set): |
2333 |
"""Convert a set of revision ids to a graph SearchResult.""" |
|
2334 |
result_parents = set() |
|
2335 |
for parents in self.get_graph().get_parent_map( |
|
2336 |
result_set).itervalues(): |
|
2337 |
result_parents.update(parents) |
|
2338 |
included_keys = result_set.intersection(result_parents) |
|
2339 |
start_keys = result_set.difference(included_keys) |
|
2340 |
exclude_keys = result_parents.difference(result_set) |
|
2341 |
result = graph.SearchResult(start_keys, exclude_keys, |
|
2342 |
len(result_set), result_set) |
|
2343 |
return result |
|
2344 |
||
1185.65.27
by Robert Collins
Tweak storage towards mergability. |
2345 |
@needs_write_lock
|
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
2346 |
def set_make_working_trees(self, new_value): |
2347 |
"""Set the policy flag for making working trees when creating branches. |
|
2348 |
||
2349 |
This only applies to branches that use this repository.
|
|
2350 |
||
2351 |
The default is 'True'.
|
|
2352 |
:param new_value: True to restore the default, False to disable making
|
|
2353 |
working trees.
|
|
2354 |
"""
|
|
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2355 |
raise NotImplementedError(self.set_make_working_trees) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2356 |
|
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
2357 |
def make_working_trees(self): |
2358 |
"""Returns the policy for making working trees on new branches.""" |
|
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2359 |
raise NotImplementedError(self.make_working_trees) |
1534.6.5
by Robert Collins
Cloning of repos preserves shared and make-working-tree attributes. |
2360 |
|
2361 |
@needs_write_lock
|
|
1185.65.1
by Aaron Bentley
Refactored out ControlFiles and RevisionStore from _Branch |
2362 |
def sign_revision(self, revision_id, gpg_strategy): |
2363 |
plaintext = Testament.from_revision(self, revision_id).as_short_text() |
|
2364 |
self.store_revision_signature(gpg_strategy, plaintext, revision_id) |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2365 |
|
1563.2.29
by Robert Collins
Remove all but fetch references to repository.revision_store. |
2366 |
@needs_read_lock
|
2367 |
def has_signature_for_revision_id(self, revision_id): |
|
2368 |
"""Query for a revision signature for revision_id in the repository.""" |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2369 |
if not self.has_revision(revision_id): |
2370 |
raise errors.NoSuchRevision(self, revision_id) |
|
2371 |
sig_present = (1 == len( |
|
2372 |
self.signatures.get_parent_map([(revision_id,)]))) |
|
2373 |
return sig_present |
|
1563.2.29
by Robert Collins
Remove all but fetch references to repository.revision_store. |
2374 |
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
2375 |
@needs_read_lock
|
2376 |
def get_signature_text(self, revision_id): |
|
2377 |
"""Return the text for a signature.""" |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2378 |
stream = self.signatures.get_record_stream([(revision_id,)], |
2379 |
'unordered', True) |
|
2380 |
record = stream.next() |
|
2381 |
if record.storage_kind == 'absent': |
|
2382 |
raise errors.NoSuchRevision(self, revision_id) |
|
2383 |
return record.get_bytes_as('fulltext') |
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
2384 |
|
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
2385 |
@needs_read_lock
|
2745.6.36
by Andrew Bennetts
Deprecate revision_ids arg to Repository.check and other tweaks. |
2386 |
def check(self, revision_ids=None): |
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
2387 |
"""Check consistency of all history of given revision_ids. |
2388 |
||
2389 |
Different repository implementations should override _check().
|
|
2390 |
||
2391 |
:param revision_ids: A non-empty list of revision_ids whose ancestry
|
|
2392 |
will be checked. Typically the last revision_id of a branch.
|
|
2393 |
"""
|
|
2394 |
return self._check(revision_ids) |
|
2395 |
||
2396 |
def _check(self, revision_ids): |
|
1773.4.1
by Martin Pool
Add pyflakes makefile target; fix many warnings |
2397 |
result = check.Check(self) |
1732.2.4
by Martin Pool
Split check into Branch.check and Repository.check |
2398 |
result.check() |
2399 |
return result |
|
2400 |
||
1904.2.3
by Martin Pool
Give a warning on access to old repository formats |
2401 |
def _warn_if_deprecated(self): |
1904.2.5
by Martin Pool
Fix format warning inside test suite and add test |
2402 |
global _deprecation_warning_done |
2403 |
if _deprecation_warning_done: |
|
2404 |
return
|
|
2405 |
_deprecation_warning_done = True |
|
1904.2.3
by Martin Pool
Give a warning on access to old repository formats |
2406 |
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance" |
2407 |
% (self._format, self.bzrdir.transport.base)) |
|
2408 |
||
1910.2.63
by Aaron Bentley
Add supports_rich_root member to repository |
2409 |
def supports_rich_root(self): |
2410 |
return self._format.rich_root_data |
|
2411 |
||
2150.2.2
by Robert Collins
Change the commit builder selected-revision-id test to use a unicode revision id where possible, leading to stricter testing of the hypothetical unicode revision id support in bzr. |
2412 |
def _check_ascii_revisionid(self, revision_id, method): |
2413 |
"""Private helper for ascii-only repositories.""" |
|
2414 |
# weave repositories refuse to store revisionids that are non-ascii.
|
|
2415 |
if revision_id is not None: |
|
2416 |
# weaves require ascii revision ids.
|
|
2417 |
if isinstance(revision_id, unicode): |
|
2418 |
try: |
|
2419 |
revision_id.encode('ascii') |
|
2420 |
except UnicodeEncodeError: |
|
2421 |
raise errors.NonAsciiRevisionId(method, self) |
|
2249.5.12
by John Arbash Meinel
Change the APIs for VersionedFile, Store, and some of Repository into utf-8 |
2422 |
else: |
2423 |
try: |
|
2424 |
revision_id.decode('ascii') |
|
2425 |
except UnicodeDecodeError: |
|
2426 |
raise errors.NonAsciiRevisionId(method, self) |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2427 |
|
2819.2.4
by Andrew Bennetts
Add a 'revision_graph_can_have_wrong_parents' method to repository. |
2428 |
def revision_graph_can_have_wrong_parents(self): |
2429 |
"""Is it possible for this repository to have a revision graph with |
|
2430 |
incorrect parents?
|
|
2150.2.2
by Robert Collins
Change the commit builder selected-revision-id test to use a unicode revision id where possible, leading to stricter testing of the hypothetical unicode revision id support in bzr. |
2431 |
|
2819.2.4
by Andrew Bennetts
Add a 'revision_graph_can_have_wrong_parents' method to repository. |
2432 |
If True, then this repository must also implement
|
2433 |
_find_inconsistent_revision_parents so that check and reconcile can
|
|
2434 |
check for inconsistencies before proceeding with other checks that may
|
|
2435 |
depend on the revision index being consistent.
|
|
2436 |
"""
|
|
2437 |
raise NotImplementedError(self.revision_graph_can_have_wrong_parents) |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
2438 |
|
2439 |
||
2241.1.18
by mbp at sourcefrog
Restore use of deprecating delegator for old formats in bzrlib.repository. |
2440 |
# remove these delegates a while after bzr 0.15
|
2441 |
def __make_delegated(name, from_module): |
|
2442 |
def _deprecated_repository_forwarder(): |
|
2443 |
symbol_versioning.warn('%s moved to %s in bzr 0.15' |
|
2444 |
% (name, from_module), |
|
2241.1.20
by mbp at sourcefrog
update tests for new locations of weave repos |
2445 |
DeprecationWarning, |
2446 |
stacklevel=2) |
|
2241.1.18
by mbp at sourcefrog
Restore use of deprecating delegator for old formats in bzrlib.repository. |
2447 |
m = __import__(from_module, globals(), locals(), [name]) |
2448 |
try: |
|
2449 |
return getattr(m, name) |
|
2450 |
except AttributeError: |
|
2451 |
raise AttributeError('module %s has no name %s' |
|
2452 |
% (m, name)) |
|
2453 |
globals()[name] = _deprecated_repository_forwarder |
|
2454 |
||
2455 |
for _name in [ |
|
2456 |
'AllInOneRepository', |
|
2457 |
'WeaveMetaDirRepository', |
|
2458 |
'PreSplitOutRepositoryFormat', |
|
2459 |
'RepositoryFormat4', |
|
2460 |
'RepositoryFormat5', |
|
2461 |
'RepositoryFormat6', |
|
2462 |
'RepositoryFormat7', |
|
2463 |
]:
|
|
2464 |
__make_delegated(_name, 'bzrlib.repofmt.weaverepo') |
|
2465 |
||
2466 |
for _name in [ |
|
2467 |
'KnitRepository', |
|
2468 |
'RepositoryFormatKnit', |
|
2469 |
'RepositoryFormatKnit1', |
|
2470 |
]:
|
|
2471 |
__make_delegated(_name, 'bzrlib.repofmt.knitrepo') |
|
2472 |
||
2473 |
||
2996.2.2
by Aaron Bentley
Create install_revisions function |
2474 |
def install_revision(repository, rev, revision_tree): |
2475 |
"""Install all revision data into a repository.""" |
|
2476 |
install_revisions(repository, [(rev, revision_tree, None)]) |
|
2477 |
||
2478 |
||
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
2479 |
def install_revisions(repository, iterable, num_revisions=None, pb=None): |
2996.2.4
by Aaron Bentley
Rename function to add_signature_text |
2480 |
"""Install all revision data into a repository. |
2481 |
||
2482 |
Accepts an iterable of revision, tree, signature tuples. The signature
|
|
2483 |
may be None.
|
|
2484 |
"""
|
|
2592.3.96
by Robert Collins
Merge index improvements (includes bzr.dev). |
2485 |
repository.start_write_group() |
2486 |
try: |
|
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
2487 |
for n, (revision, revision_tree, signature) in enumerate(iterable): |
2996.2.2
by Aaron Bentley
Create install_revisions function |
2488 |
_install_revision(repository, revision, revision_tree, signature) |
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
2489 |
if pb is not None: |
2490 |
pb.update('Transferring revisions', n + 1, num_revisions) |
|
2592.3.96
by Robert Collins
Merge index improvements (includes bzr.dev). |
2491 |
except: |
2492 |
repository.abort_write_group() |
|
2592.3.101
by Robert Collins
Correctly propogate exceptions from repository.install_revisions. |
2493 |
raise
|
2592.3.96
by Robert Collins
Merge index improvements (includes bzr.dev). |
2494 |
else: |
2495 |
repository.commit_write_group() |
|
2496 |
||
2497 |
||
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
2498 |
def _install_revision(repository, rev, revision_tree, signature): |
2592.3.96
by Robert Collins
Merge index improvements (includes bzr.dev). |
2499 |
"""Install all revision data into a repository.""" |
1185.82.84
by Aaron Bentley
Moved stuff around |
2500 |
present_parents = [] |
2501 |
parent_trees = {} |
|
2502 |
for p_id in rev.parent_ids: |
|
2503 |
if repository.has_revision(p_id): |
|
2504 |
present_parents.append(p_id) |
|
2505 |
parent_trees[p_id] = repository.revision_tree(p_id) |
|
2506 |
else: |
|
3668.5.1
by Jelmer Vernooij
Use NULL_REVISION rather than None for Repository.revision_tree(). |
2507 |
parent_trees[p_id] = repository.revision_tree( |
2508 |
_mod_revision.NULL_REVISION) |
|
1185.82.84
by Aaron Bentley
Moved stuff around |
2509 |
|
2510 |
inv = revision_tree.inventory |
|
1910.2.51
by Aaron Bentley
Bundles now corrupt repositories |
2511 |
entries = inv.iter_entries() |
2617.6.6
by Robert Collins
Some review feedback. |
2512 |
# backwards compatibility hack: skip the root id.
|
1910.2.63
by Aaron Bentley
Add supports_rich_root member to repository |
2513 |
if not repository.supports_rich_root(): |
1910.2.60
by Aaron Bentley
Ensure that new-model revisions aren't installed into old-model repos |
2514 |
path, root = entries.next() |
2515 |
if root.revision != rev.revision_id: |
|
1910.2.63
by Aaron Bentley
Add supports_rich_root member to repository |
2516 |
raise errors.IncompatibleRevision(repr(repository)) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2517 |
text_keys = {} |
2518 |
for path, ie in entries: |
|
2519 |
text_keys[(ie.file_id, ie.revision)] = ie |
|
2520 |
text_parent_map = repository.texts.get_parent_map(text_keys) |
|
2521 |
missing_texts = set(text_keys) - set(text_parent_map) |
|
1185.82.84
by Aaron Bentley
Moved stuff around |
2522 |
# Add the texts that are not already present
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2523 |
for text_key in missing_texts: |
2524 |
ie = text_keys[text_key] |
|
2525 |
text_parents = [] |
|
2526 |
# FIXME: TODO: The following loop overlaps/duplicates that done by
|
|
2527 |
# commit to determine parents. There is a latent/real bug here where
|
|
2528 |
# the parents inserted are not those commit would do - in particular
|
|
2529 |
# they are not filtered by heads(). RBC, AB
|
|
2530 |
for revision, tree in parent_trees.iteritems(): |
|
2531 |
if ie.file_id not in tree: |
|
2532 |
continue
|
|
2533 |
parent_id = tree.inventory[ie.file_id].revision |
|
2534 |
if parent_id in text_parents: |
|
2535 |
continue
|
|
2536 |
text_parents.append((ie.file_id, parent_id)) |
|
2537 |
lines = revision_tree.get_file(ie.file_id).readlines() |
|
2538 |
repository.texts.add_lines(text_key, text_parents, lines) |
|
1185.82.84
by Aaron Bentley
Moved stuff around |
2539 |
try: |
2540 |
# install the inventory
|
|
2541 |
repository.add_inventory(rev.revision_id, inv, present_parents) |
|
2542 |
except errors.RevisionAlreadyPresent: |
|
2543 |
pass
|
|
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
2544 |
if signature is not None: |
2996.2.8
by Aaron Bentley
Fix add_signature discrepancies |
2545 |
repository.add_signature_text(rev.revision_id, signature) |
1185.82.84
by Aaron Bentley
Moved stuff around |
2546 |
repository.add_revision(rev.revision_id, rev, inv) |
2547 |
||
2548 |
||
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2549 |
class MetaDirRepository(Repository): |
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
2550 |
"""Repositories in the new meta-dir layout. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2551 |
|
3407.2.13
by Martin Pool
Remove indirection through control_files to get transports |
2552 |
:ivar _transport: Transport for access to repository control files,
|
2553 |
typically pointing to .bzr/repository.
|
|
2554 |
"""
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2555 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2556 |
def __init__(self, _format, a_bzrdir, control_files): |
2557 |
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files) |
|
3407.2.3
by Martin Pool
Branch and Repository use their own ._transport rather than going through .control_files |
2558 |
self._transport = control_files._transport |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2559 |
|
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2560 |
def is_shared(self): |
2561 |
"""Return True if this repository is flagged as a shared repository.""" |
|
3407.2.3
by Martin Pool
Branch and Repository use their own ._transport rather than going through .control_files |
2562 |
return self._transport.has('shared-storage') |
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2563 |
|
2564 |
@needs_write_lock
|
|
2565 |
def set_make_working_trees(self, new_value): |
|
2566 |
"""Set the policy flag for making working trees when creating branches. |
|
2567 |
||
2568 |
This only applies to branches that use this repository.
|
|
2569 |
||
2570 |
The default is 'True'.
|
|
2571 |
:param new_value: True to restore the default, False to disable making
|
|
2572 |
working trees.
|
|
2573 |
"""
|
|
2574 |
if new_value: |
|
2575 |
try: |
|
3407.2.3
by Martin Pool
Branch and Repository use their own ._transport rather than going through .control_files |
2576 |
self._transport.delete('no-working-trees') |
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2577 |
except errors.NoSuchFile: |
2578 |
pass
|
|
2579 |
else: |
|
3407.2.5
by Martin Pool
Deprecate LockableFiles.put_utf8 |
2580 |
self._transport.put_bytes('no-working-trees', '', |
3407.2.18
by Martin Pool
BzrDir takes responsibility for default file/dir modes |
2581 |
mode=self.bzrdir._get_file_mode()) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2582 |
|
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2583 |
def make_working_trees(self): |
2584 |
"""Returns the policy for making working trees on new branches.""" |
|
3407.2.3
by Martin Pool
Branch and Repository use their own ._transport rather than going through .control_files |
2585 |
return not self._transport.has('no-working-trees') |
1596.2.12
by Robert Collins
Merge and make Knit Repository use the revision store for all possible queries. |
2586 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2587 |
|
3316.2.3
by Robert Collins
Remove manual notification of transaction finishing on versioned files. |
2588 |
class MetaDirVersionedFileRepository(MetaDirRepository): |
2589 |
"""Repositories in a meta-dir, that work via versioned file objects.""" |
|
2590 |
||
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2591 |
def __init__(self, _format, a_bzrdir, control_files): |
3316.2.5
by Robert Collins
Review feedback. |
2592 |
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir, |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
2593 |
control_files) |
3316.2.3
by Robert Collins
Remove manual notification of transaction finishing on versioned files. |
2594 |
|
2595 |
||
4032.3.1
by Robert Collins
Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls. |
2596 |
network_format_registry = registry.FormatRegistry() |
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2597 |
"""Registry of formats indexed by their network name.
|
2598 |
||
2599 |
The network name for a repository format is an identifier that can be used when
|
|
2600 |
referring to formats with smart server operations. See
|
|
2601 |
RepositoryFormat.network_name() for more detail.
|
|
2602 |
"""
|
|
3990.5.1
by Andrew Bennetts
Add network_name() to RepositoryFormat. |
2603 |
|
2604 |
||
4032.3.1
by Robert Collins
Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls. |
2605 |
format_registry = registry.FormatRegistry(network_format_registry) |
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2606 |
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
|
2241.1.11
by Martin Pool
Get rid of RepositoryFormat*_instance objects. Instead the format |
2607 |
|
2608 |
This can contain either format instances themselves, or classes/factories that
|
|
2609 |
can be called to obtain one.
|
|
2610 |
"""
|
|
2241.1.2
by Martin Pool
change to using external Repository format registry |
2611 |
|
2220.2.3
by Martin Pool
Add tag: revision namespace. |
2612 |
|
2613 |
#####################################################################
|
|
2614 |
# Repository Formats
|
|
1910.2.46
by Aaron Bentley
Whitespace fix |
2615 |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2616 |
class RepositoryFormat(object): |
2617 |
"""A repository format. |
|
2618 |
||
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2619 |
Formats provide four things:
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2620 |
* An initialization routine to construct repository data on disk.
|
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2621 |
* a optional format string which is used when the BzrDir supports
|
2622 |
versioned children.
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2623 |
* an open routine which returns a Repository instance.
|
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2624 |
* A network name for referring to the format in smart server RPC
|
2625 |
methods.
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2626 |
|
2889.1.2
by Robert Collins
Review feedback. |
2627 |
There is one and only one Format subclass for each on-disk format. But
|
2628 |
there can be one Repository subclass that is used for several different
|
|
2629 |
formats. The _format attribute on a Repository instance can be used to
|
|
2630 |
determine the disk format.
|
|
2889.1.1
by Robert Collins
* The class ``bzrlib.repofmt.knitrepo.KnitRepository3`` has been folded into |
2631 |
|
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2632 |
Formats are placed in a registry by their format string for reference
|
2633 |
during opening. These should be subclasses of RepositoryFormat for
|
|
2634 |
consistency.
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2635 |
|
2636 |
Once a format is deprecated, just deprecate the initialize and open
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2637 |
methods on the format class. Do not deprecate the object, as the
|
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2638 |
object may be created even when a repository instnace hasn't been
|
2639 |
created.
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2640 |
|
2641 |
Common instance attributes:
|
|
2642 |
_matchingbzrdir - the bzrdir format that the repository format was
|
|
2643 |
originally written to work with. This can be used if manually
|
|
2644 |
constructing a bzrdir and repository, or more commonly for test suite
|
|
3128.1.3
by Vincent Ladeuil
Since we are there s/parameteris.*/parameteriz&/. |
2645 |
parameterization.
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2646 |
"""
|
2647 |
||
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
2648 |
# Set to True or False in derived classes. True indicates that the format
|
2649 |
# supports ghosts gracefully.
|
|
2650 |
supports_ghosts = None |
|
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
2651 |
# Can this repository be given external locations to lookup additional
|
2652 |
# data. Set to True or False in derived classes.
|
|
2653 |
supports_external_lookups = None |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
2654 |
# What order should fetch operations request streams in?
|
2655 |
# The default is unordered as that is the cheapest for an origin to
|
|
2656 |
# provide.
|
|
2657 |
_fetch_order = 'unordered' |
|
2658 |
# Does this repository format use deltas that can be fetched as-deltas ?
|
|
2659 |
# (E.g. knits, where the knit deltas can be transplanted intact.
|
|
2660 |
# We default to False, which will ensure that enough data to get
|
|
2661 |
# a full text out of any fetch stream will be grabbed.
|
|
2662 |
_fetch_uses_deltas = False |
|
2663 |
# Should fetch trigger a reconcile after the fetch? Only needed for
|
|
2664 |
# some repository formats that can suffer internal inconsistencies.
|
|
2665 |
_fetch_reconcile = False |
|
4183.5.1
by Robert Collins
Add RepositoryFormat.fast_deltas to signal fast delta creation. |
2666 |
# Does this format have < O(tree_size) delta generation. Used to hint what
|
2667 |
# code path for commit, amongst other things.
|
|
2668 |
fast_deltas = None |
|
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
2669 |
|
1904.2.3
by Martin Pool
Give a warning on access to old repository formats |
2670 |
def __str__(self): |
2671 |
return "<%s>" % self.__class__.__name__ |
|
2672 |
||
2241.1.11
by Martin Pool
Get rid of RepositoryFormat*_instance objects. Instead the format |
2673 |
def __eq__(self, other): |
2674 |
# format objects are generally stateless
|
|
2675 |
return isinstance(other, self.__class__) |
|
2676 |
||
2100.3.35
by Aaron Bentley
equality operations on bzrdir |
2677 |
def __ne__(self, other): |
2100.3.31
by Aaron Bentley
Merged bzr.dev (17 tests failing) |
2678 |
return not self == other |
2679 |
||
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2680 |
@classmethod
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2681 |
def find_format(klass, a_bzrdir): |
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2682 |
"""Return the format for the repository object in a_bzrdir. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2683 |
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2684 |
This is used by bzr native formats that have a "format" file in
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2685 |
the repository. Other methods may be used by different types of
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2686 |
control directory.
|
2687 |
"""
|
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2688 |
try: |
2689 |
transport = a_bzrdir.get_repository_transport(None) |
|
2690 |
format_string = transport.get("format").read() |
|
2241.1.2
by Martin Pool
change to using external Repository format registry |
2691 |
return format_registry.get(format_string) |
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2692 |
except errors.NoSuchFile: |
2693 |
raise errors.NoRepositoryPresent(a_bzrdir) |
|
2694 |
except KeyError: |
|
3246.3.2
by Daniel Watkins
Modified uses of errors.UnknownFormatError. |
2695 |
raise errors.UnknownFormatError(format=format_string, |
2696 |
kind='repository') |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2697 |
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2698 |
@classmethod
|
2241.1.2
by Martin Pool
change to using external Repository format registry |
2699 |
def register_format(klass, format): |
2700 |
format_registry.register(format.get_format_string(), format) |
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2701 |
|
2702 |
@classmethod
|
|
2703 |
def unregister_format(klass, format): |
|
2241.1.2
by Martin Pool
change to using external Repository format registry |
2704 |
format_registry.remove(format.get_format_string()) |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2705 |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2706 |
@classmethod
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2707 |
def get_default_format(klass): |
2708 |
"""Return the current default format.""" |
|
2204.5.3
by Aaron Bentley
zap old repository default handling |
2709 |
from bzrlib import bzrdir |
2710 |
return bzrdir.format_registry.make_bzrdir('default').repository_format |
|
2241.1.1
by Martin Pool
Change RepositoryFormat to use a Registry rather than ad-hoc dictionary |
2711 |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2712 |
def get_format_string(self): |
2713 |
"""Return the ASCII format string that identifies this format. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2714 |
|
2715 |
Note that in pre format ?? repositories the format string is
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2716 |
not permitted nor written to disk.
|
2717 |
"""
|
|
2718 |
raise NotImplementedError(self.get_format_string) |
|
2719 |
||
1624.3.19
by Olaf Conradi
New call get_format_description to give a user-friendly description of a |
2720 |
def get_format_description(self): |
1759.2.1
by Jelmer Vernooij
Fix some types (found using aspell). |
2721 |
"""Return the short description for this format.""" |
1624.3.19
by Olaf Conradi
New call get_format_description to give a user-friendly description of a |
2722 |
raise NotImplementedError(self.get_format_description) |
2723 |
||
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
2724 |
# TODO: this shouldn't be in the base class, it's specific to things that
|
2725 |
# use weaves or knits -- mbp 20070207
|
|
1563.2.17
by Robert Collins
Change knits repositories to use a knit versioned file store for file texts. |
2726 |
def _get_versioned_file_store(self, |
2727 |
name, |
|
2728 |
transport, |
|
2729 |
control_files, |
|
2730 |
prefixed=True, |
|
2241.1.10
by Martin Pool
Remove more references to weaves from the repository.py file |
2731 |
versionedfile_class=None, |
1946.2.5
by John Arbash Meinel
Make knit stores delay creation, but not control stores |
2732 |
versionedfile_kwargs={}, |
1608.2.12
by Martin Pool
Store-escaping must quote uppercase characters too, so that they're safely |
2733 |
escaped=False): |
2241.1.10
by Martin Pool
Remove more references to weaves from the repository.py file |
2734 |
if versionedfile_class is None: |
2735 |
versionedfile_class = self._versionedfile_class |
|
1563.2.17
by Robert Collins
Change knits repositories to use a knit versioned file store for file texts. |
2736 |
weave_transport = control_files._transport.clone(name) |
2737 |
dir_mode = control_files._dir_mode |
|
2738 |
file_mode = control_files._file_mode |
|
2739 |
return VersionedFileStore(weave_transport, prefixed=prefixed, |
|
1608.2.12
by Martin Pool
Store-escaping must quote uppercase characters too, so that they're safely |
2740 |
dir_mode=dir_mode, |
2741 |
file_mode=file_mode, |
|
2742 |
versionedfile_class=versionedfile_class, |
|
1946.2.5
by John Arbash Meinel
Make knit stores delay creation, but not control stores |
2743 |
versionedfile_kwargs=versionedfile_kwargs, |
1608.2.12
by Martin Pool
Store-escaping must quote uppercase characters too, so that they're safely |
2744 |
escaped=escaped) |
1563.2.17
by Robert Collins
Change knits repositories to use a knit versioned file store for file texts. |
2745 |
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
2746 |
def initialize(self, a_bzrdir, shared=False): |
2747 |
"""Initialize a repository of this format in a_bzrdir. |
|
2748 |
||
2749 |
:param a_bzrdir: The bzrdir to put the new repository in it.
|
|
2750 |
:param shared: The repository should be initialized as a sharable one.
|
|
1752.2.52
by Andrew Bennetts
Flesh out more Remote* methods needed to open and initialise remote branches/trees/repositories. |
2751 |
:returns: The new repository object.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2752 |
|
1534.6.1
by Robert Collins
allow API creation of shared repositories |
2753 |
This may raise UninitializableFormat if shared repository are not
|
2754 |
compatible the a_bzrdir.
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2755 |
"""
|
1752.2.52
by Andrew Bennetts
Flesh out more Remote* methods needed to open and initialise remote branches/trees/repositories. |
2756 |
raise NotImplementedError(self.initialize) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2757 |
|
2758 |
def is_supported(self): |
|
2759 |
"""Is this format supported? |
|
2760 |
||
2761 |
Supported formats must be initializable and openable.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2762 |
Unsupported formats may not support initialization or committing or
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2763 |
some other features depending on the reason for not being supported.
|
2764 |
"""
|
|
2765 |
return True |
|
2766 |
||
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2767 |
def network_name(self): |
2768 |
"""A simple byte string uniquely identifying this format for RPC calls. |
|
2769 |
||
2770 |
MetaDir repository formats use their disk format string to identify the
|
|
2771 |
repository over the wire. All in one formats such as bzr < 0.8, and
|
|
2772 |
foreign formats like svn/git and hg should use some marker which is
|
|
2773 |
unique and immutable.
|
|
2774 |
"""
|
|
2775 |
raise NotImplementedError(self.network_name) |
|
2776 |
||
1910.2.12
by Aaron Bentley
Implement knit repo format 2 |
2777 |
def check_conversion_target(self, target_format): |
2778 |
raise NotImplementedError(self.check_conversion_target) |
|
2779 |
||
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2780 |
def open(self, a_bzrdir, _found=False): |
2781 |
"""Return an instance of this format for the bzrdir a_bzrdir. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2782 |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2783 |
_found is a private parameter, do not use it.
|
2784 |
"""
|
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2785 |
raise NotImplementedError(self.open) |
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2786 |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2787 |
|
2788 |
class MetaDirRepositoryFormat(RepositoryFormat): |
|
1759.2.1
by Jelmer Vernooij
Fix some types (found using aspell). |
2789 |
"""Common base class for the new repositories using the metadir layout.""" |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2790 |
|
1910.2.14
by Aaron Bentley
Fail when trying to use interrepository on Knit2 and Knit1 |
2791 |
rich_root_data = False |
2323.5.17
by Martin Pool
Add supports_tree_reference to all repo formats (robert) |
2792 |
supports_tree_reference = False |
3221.3.1
by Robert Collins
* Repository formats have a new supported-feature attribute |
2793 |
supports_external_lookups = False |
3845.1.1
by John Arbash Meinel
Ensure that RepositoryFormat._matchingbzrdir.repository_format matches. |
2794 |
|
2795 |
@property
|
|
2796 |
def _matchingbzrdir(self): |
|
2797 |
matching = bzrdir.BzrDirMetaFormat1() |
|
2798 |
matching.repository_format = self |
|
2799 |
return matching |
|
1910.2.14
by Aaron Bentley
Fail when trying to use interrepository on Knit2 and Knit1 |
2800 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
2801 |
def __init__(self): |
2802 |
super(MetaDirRepositoryFormat, self).__init__() |
|
2803 |
||
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2804 |
def _create_control_files(self, a_bzrdir): |
2805 |
"""Create the required files and the initial control_files object.""" |
|
1759.2.2
by Jelmer Vernooij
Revert some of my spelling fixes and fix some typos after review by Aaron. |
2806 |
# FIXME: RBC 20060125 don't peek under the covers
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2807 |
# NB: no need to escape relative paths that are url safe.
|
2808 |
repository_transport = a_bzrdir.get_repository_transport(self) |
|
1996.3.4
by John Arbash Meinel
lazy_import bzrlib/repository.py |
2809 |
control_files = lockable_files.LockableFiles(repository_transport, |
2810 |
'lock', lockdir.LockDir) |
|
1553.5.61
by Martin Pool
Locks protecting LockableFiles must now be explicitly created before use. |
2811 |
control_files.create_lock() |
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2812 |
return control_files |
2813 |
||
2814 |
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared): |
|
2815 |
"""Upload the initial blank content.""" |
|
2816 |
control_files = self._create_control_files(a_bzrdir) |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2817 |
control_files.lock_write() |
3407.2.4
by Martin Pool
Small cleanups to initial creation of repository files |
2818 |
transport = control_files._transport |
2819 |
if shared == True: |
|
2820 |
utf8_files += [('shared-storage', '')] |
|
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2821 |
try: |
3407.2.18
by Martin Pool
BzrDir takes responsibility for default file/dir modes |
2822 |
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode()) |
3407.2.4
by Martin Pool
Small cleanups to initial creation of repository files |
2823 |
for (filename, content_stream) in files: |
2824 |
transport.put_file(filename, content_stream, |
|
3407.2.18
by Martin Pool
BzrDir takes responsibility for default file/dir modes |
2825 |
mode=a_bzrdir._get_file_mode()) |
3407.2.4
by Martin Pool
Small cleanups to initial creation of repository files |
2826 |
for (filename, content_bytes) in utf8_files: |
2827 |
transport.put_bytes_non_atomic(filename, content_bytes, |
|
3407.2.18
by Martin Pool
BzrDir takes responsibility for default file/dir modes |
2828 |
mode=a_bzrdir._get_file_mode()) |
1534.4.47
by Robert Collins
Split out repository into .bzr/repository |
2829 |
finally: |
2830 |
control_files.unlock() |
|
1556.1.3
by Robert Collins
Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids |
2831 |
|
3990.5.1
by Andrew Bennetts
Add network_name() to RepositoryFormat. |
2832 |
def network_name(self): |
2833 |
"""Metadir formats have matching disk and network format strings.""" |
|
2834 |
return self.get_format_string() |
|
2835 |
||
2836 |
||
3990.5.3
by Robert Collins
Docs and polish on RepositoryFormat.network_name. |
2837 |
# Pre-0.8 formats that don't have a disk format string (because they are
|
2838 |
# versioned by the matching control directory). We use the control directories
|
|
2839 |
# disk format string as a key for the network_name because they meet the
|
|
2840 |
# constraints (simple string, unique, immmutable).
|
|
3990.5.1
by Andrew Bennetts
Add network_name() to RepositoryFormat. |
2841 |
network_format_registry.register_lazy( |
2842 |
"Bazaar-NG branch, format 5\n", |
|
2843 |
'bzrlib.repofmt.weaverepo', |
|
2844 |
'RepositoryFormat5', |
|
2845 |
)
|
|
2846 |
network_format_registry.register_lazy( |
|
2847 |
"Bazaar-NG branch, format 6\n", |
|
2848 |
'bzrlib.repofmt.weaverepo', |
|
2849 |
'RepositoryFormat6', |
|
2850 |
)
|
|
2851 |
||
2852 |
# formats which have no format string are not discoverable or independently
|
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
2853 |
# creatable on disk, so are not registered in format_registry. They're
|
2241.1.11
by Martin Pool
Get rid of RepositoryFormat*_instance objects. Instead the format |
2854 |
# all in bzrlib.repofmt.weaverepo now. When an instance of one of these is
|
2855 |
# needed, it's constructed directly by the BzrDir. Non-native formats where
|
|
2856 |
# the repository is not separately opened are similar.
|
|
2857 |
||
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
2858 |
format_registry.register_lazy( |
2859 |
'Bazaar-NG Repository format 7', |
|
2860 |
'bzrlib.repofmt.weaverepo', |
|
2241.1.11
by Martin Pool
Get rid of RepositoryFormat*_instance objects. Instead the format |
2861 |
'RepositoryFormat7'
|
2241.1.4
by Martin Pool
Moved old weave-based repository formats into bzrlib.repofmt.weaverepo. |
2862 |
)
|
2592.3.22
by Robert Collins
Add new experimental repository formats. |
2863 |
|
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
2864 |
format_registry.register_lazy( |
2865 |
'Bazaar-NG Knit Repository Format 1', |
|
2866 |
'bzrlib.repofmt.knitrepo', |
|
2241.1.11
by Martin Pool
Get rid of RepositoryFormat*_instance objects. Instead the format |
2867 |
'RepositoryFormatKnit1', |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
2868 |
)
|
2869 |
||
2241.1.5
by Martin Pool
Move KnitFormat2 into repofmt |
2870 |
format_registry.register_lazy( |
2255.2.230
by Robert Collins
Update tree format signatures to mention introducing bzr version. |
2871 |
'Bazaar Knit Repository Format 3 (bzr 0.15)\n', |
2100.3.31
by Aaron Bentley
Merged bzr.dev (17 tests failing) |
2872 |
'bzrlib.repofmt.knitrepo', |
2873 |
'RepositoryFormatKnit3', |
|
2874 |
)
|
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2875 |
|
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
2876 |
format_registry.register_lazy( |
2877 |
'Bazaar Knit Repository Format 4 (bzr 1.0)\n', |
|
2878 |
'bzrlib.repofmt.knitrepo', |
|
2879 |
'RepositoryFormatKnit4', |
|
2880 |
)
|
|
2881 |
||
2939.2.1
by Ian Clatworthy
use 'knitpack' naming instead of 'experimental' for pack formats |
2882 |
# Pack-based formats. There is one format for pre-subtrees, and one for
|
2883 |
# post-subtrees to allow ease of testing.
|
|
3152.2.1
by Robert Collins
* A new repository format 'development' has been added. This format will |
2884 |
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
|
2592.3.22
by Robert Collins
Add new experimental repository formats. |
2885 |
format_registry.register_lazy( |
2939.2.6
by Ian Clatworthy
more review feedback from lifeless and poolie |
2886 |
'Bazaar pack repository format 1 (needs bzr 0.92)\n', |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2887 |
'bzrlib.repofmt.pack_repo', |
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
2888 |
'RepositoryFormatKnitPack1', |
2592.3.22
by Robert Collins
Add new experimental repository formats. |
2889 |
)
|
2890 |
format_registry.register_lazy( |
|
2939.2.6
by Ian Clatworthy
more review feedback from lifeless and poolie |
2891 |
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n', |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
2892 |
'bzrlib.repofmt.pack_repo', |
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
2893 |
'RepositoryFormatKnitPack3', |
2592.3.22
by Robert Collins
Add new experimental repository formats. |
2894 |
)
|
2996.2.11
by Aaron Bentley
Implement rich-root-pack format ( #164639) |
2895 |
format_registry.register_lazy( |
2896 |
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n', |
|
2897 |
'bzrlib.repofmt.pack_repo', |
|
2898 |
'RepositoryFormatKnitPack4', |
|
2899 |
)
|
|
3549.1.5
by Martin Pool
Add stable format names for stacked branches |
2900 |
format_registry.register_lazy( |
2901 |
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n', |
|
2902 |
'bzrlib.repofmt.pack_repo', |
|
2903 |
'RepositoryFormatKnitPack5', |
|
2904 |
)
|
|
2905 |
format_registry.register_lazy( |
|
3606.10.1
by John Arbash Meinel
Create a new --1.6-rich-root, deprecate the old one. |
2906 |
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n', |
2907 |
'bzrlib.repofmt.pack_repo', |
|
2908 |
'RepositoryFormatKnitPack5RichRoot', |
|
2909 |
)
|
|
2910 |
format_registry.register_lazy( |
|
3549.1.6
by Martin Pool
Change stacked-subtree to stacked-rich-root |
2911 |
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n', |
3549.1.5
by Martin Pool
Add stable format names for stacked branches |
2912 |
'bzrlib.repofmt.pack_repo', |
3606.10.1
by John Arbash Meinel
Create a new --1.6-rich-root, deprecate the old one. |
2913 |
'RepositoryFormatKnitPack5RichRootBroken', |
3549.1.5
by Martin Pool
Add stable format names for stacked branches |
2914 |
)
|
3805.3.1
by John Arbash Meinel
Add repository 1.9 format, and update the documentation. |
2915 |
format_registry.register_lazy( |
2916 |
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n', |
|
2917 |
'bzrlib.repofmt.pack_repo', |
|
2918 |
'RepositoryFormatKnitPack6', |
|
2919 |
)
|
|
2920 |
format_registry.register_lazy( |
|
2921 |
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n', |
|
2922 |
'bzrlib.repofmt.pack_repo', |
|
2923 |
'RepositoryFormatKnitPack6RichRoot', |
|
2924 |
)
|
|
3549.1.5
by Martin Pool
Add stable format names for stacked branches |
2925 |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2926 |
# Development formats.
|
3735.1.1
by Robert Collins
Add development2 formats using BTree indices. |
2927 |
# 1.7->1.8 go below here
|
2928 |
format_registry.register_lazy( |
|
2929 |
"Bazaar development format 2 (needs bzr.dev from before 1.8)\n", |
|
2930 |
'bzrlib.repofmt.pack_repo', |
|
2931 |
'RepositoryFormatPackDevelopment2', |
|
2932 |
)
|
|
2933 |
format_registry.register_lazy( |
|
2934 |
("Bazaar development format 2 with subtree support " |
|
2935 |
"(needs bzr.dev from before 1.8)\n"), |
|
2936 |
'bzrlib.repofmt.pack_repo', |
|
2937 |
'RepositoryFormatPackDevelopment2Subtree', |
|
2938 |
)
|
|
2592.3.22
by Robert Collins
Add new experimental repository formats. |
2939 |
|
1534.4.40
by Robert Collins
Add RepositoryFormats and allow bzrdir.open or create _repository to be used. |
2940 |
|
1563.2.12
by Robert Collins
Checkpointing: created InterObject to factor out common inter object worker code, added InterVersionedFile and tests to allow making join work between any versionedfile. |
2941 |
class InterRepository(InterObject): |
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
2942 |
"""This class represents operations taking place between two repositories. |
2943 |
||
1534.1.33
by Robert Collins
Move copy_content_into into InterRepository and InterWeaveRepo, and disable the default codepath test as we have optimised paths for all current combinations. |
2944 |
Its instances have methods like copy_content and fetch, and contain
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
2945 |
references to the source and target repositories these operations can be
|
1534.1.27
by Robert Collins
Start InterRepository with InterRepository.get. |
2946 |
carried out on.
|
2947 |
||
2948 |
Often we will provide convenience methods on 'repository' which carry out
|
|
2949 |
operations with another repository - they will always forward to
|
|
2950 |
InterRepository.get(other).method_name(parameters).
|
|
2951 |
"""
|
|
2952 |
||
4144.2.1
by Andrew Bennetts
Always batch revisions to ask of target when doing _walk_to_common_revisions, rather than special-casing in Inter*Remote*. |
2953 |
_walk_to_common_revisions_batch_size = 50 |
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
2954 |
_optimisers = [] |
1534.1.28
by Robert Collins
Allow for optimised InterRepository selection. |
2955 |
"""The available optimised InterRepository types.""" |
2956 |
||
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
2957 |
@needs_write_lock
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
2958 |
def copy_content(self, revision_id=None): |
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
2959 |
"""Make a complete copy of the content in self into destination. |
2960 |
||
2961 |
This is a destructive operation! Do not use it on existing
|
|
2962 |
repositories.
|
|
2963 |
||
2964 |
:param revision_id: Only copy the content needed to construct
|
|
2965 |
revision_id and its parents.
|
|
2966 |
"""
|
|
2967 |
try: |
|
2968 |
self.target.set_make_working_trees(self.source.make_working_trees()) |
|
2969 |
except NotImplementedError: |
|
2970 |
pass
|
|
2971 |
self.target.fetch(self.source, revision_id=revision_id) |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
2972 |
|
4110.2.23
by Martin Pool
blackbox hpss test should check repository was remotely locked |
2973 |
@needs_write_lock
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
2974 |
def fetch(self, revision_id=None, pb=None, find_ghosts=False, |
2975 |
fetch_spec=None): |
|
1534.1.31
by Robert Collins
Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo. |
2976 |
"""Fetch the content required to construct revision_id. |
2977 |
||
1910.7.17
by Andrew Bennetts
Various cosmetic changes. |
2978 |
The content is copied from self.source to self.target.
|
1534.1.31
by Robert Collins
Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo. |
2979 |
|
2980 |
:param revision_id: if None all content is copied, if NULL_REVISION no
|
|
2981 |
content is copied.
|
|
2982 |
:param pb: optional progress bar to use for progress reports. If not
|
|
2983 |
provided a default one will be created.
|
|
4065.1.1
by Robert Collins
Change the return value of fetch() to None. |
2984 |
:return: None.
|
1534.1.31
by Robert Collins
Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo. |
2985 |
"""
|
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
2986 |
from bzrlib.fetch import RepoFetcher |
2987 |
f = RepoFetcher(to_repository=self.target, |
|
2988 |
from_repository=self.source, |
|
2989 |
last_revision=revision_id, |
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
2990 |
fetch_spec=fetch_spec, |
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
2991 |
pb=pb, find_ghosts=find_ghosts) |
3172.4.4
by Robert Collins
Review feedback. |
2992 |
|
2993 |
def _walk_to_common_revisions(self, revision_ids): |
|
2994 |
"""Walk out from revision_ids in source to revisions target has. |
|
2995 |
||
2996 |
:param revision_ids: The start point for the search.
|
|
2997 |
:return: A set of revision ids.
|
|
2998 |
"""
|
|
4144.3.12
by Andrew Bennetts
Remove target_get_graph and target_get_parent_map attributes from InterRepository; nothing overrides them anymore. |
2999 |
target_graph = self.target.get_graph() |
1551.19.41
by Aaron Bentley
Accelerate no-op pull |
3000 |
revision_ids = frozenset(revision_ids) |
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3001 |
# Fast path for the case where all the revisions are already in the
|
3002 |
# target repo.
|
|
3003 |
# (Although this does incur an extra round trip for the
|
|
3004 |
# fairly common case where the target doesn't already have the revision
|
|
3005 |
# we're pushing.)
|
|
1551.19.41
by Aaron Bentley
Accelerate no-op pull |
3006 |
if set(target_graph.get_parent_map(revision_ids)) == revision_ids: |
3007 |
return graph.SearchResult(revision_ids, set(), 0, set()) |
|
3172.4.4
by Robert Collins
Review feedback. |
3008 |
missing_revs = set() |
1551.19.41
by Aaron Bentley
Accelerate no-op pull |
3009 |
source_graph = self.source.get_graph() |
3172.4.4
by Robert Collins
Review feedback. |
3010 |
# ensure we don't pay silly lookup costs.
|
1551.19.41
by Aaron Bentley
Accelerate no-op pull |
3011 |
searcher = source_graph._make_breadth_first_searcher(revision_ids) |
3172.4.4
by Robert Collins
Review feedback. |
3012 |
null_set = frozenset([_mod_revision.NULL_REVISION]) |
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3013 |
searcher_exhausted = False |
3172.4.4
by Robert Collins
Review feedback. |
3014 |
while True: |
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3015 |
next_revs = set() |
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3016 |
ghosts = set() |
3017 |
# Iterate the searcher until we have enough next_revs
|
|
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3018 |
while len(next_revs) < self._walk_to_common_revisions_batch_size: |
3019 |
try: |
|
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3020 |
next_revs_part, ghosts_part = searcher.next_with_ghosts() |
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3021 |
next_revs.update(next_revs_part) |
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3022 |
ghosts.update(ghosts_part) |
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3023 |
except StopIteration: |
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3024 |
searcher_exhausted = True |
3452.2.6
by Andrew Bennetts
Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to |
3025 |
break
|
3731.4.3
by Andrew Bennetts
Rework ghost checking in _walk_to_common_revisions. |
3026 |
# If there are ghosts in the source graph, and the caller asked for
|
3027 |
# them, make sure that they are present in the target.
|
|
3731.4.5
by Andrew Bennetts
Clarify the code slightly. |
3028 |
# We don't care about other ghosts as we can't fetch them and
|
3029 |
# haven't been asked to.
|
|
3030 |
ghosts_to_check = set(revision_ids.intersection(ghosts)) |
|
3031 |
revs_to_get = set(next_revs).union(ghosts_to_check) |
|
3032 |
if revs_to_get: |
|
3033 |
have_revs = set(target_graph.get_parent_map(revs_to_get)) |
|
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3034 |
# we always have NULL_REVISION present.
|
3731.4.5
by Andrew Bennetts
Clarify the code slightly. |
3035 |
have_revs = have_revs.union(null_set) |
3036 |
# Check if the target is missing any ghosts we need.
|
|
3731.4.3
by Andrew Bennetts
Rework ghost checking in _walk_to_common_revisions. |
3037 |
ghosts_to_check.difference_update(have_revs) |
3038 |
if ghosts_to_check: |
|
3039 |
# One of the caller's revision_ids is a ghost in both the
|
|
3040 |
# source and the target.
|
|
3041 |
raise errors.NoSuchRevision( |
|
3042 |
self.source, ghosts_to_check.pop()) |
|
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3043 |
missing_revs.update(next_revs - have_revs) |
3808.1.4
by John Arbash Meinel
make _walk_to_common responsible for stopping ancestors |
3044 |
# Because we may have walked past the original stop point, make
|
3045 |
# sure everything is stopped
|
|
3046 |
stop_revs = searcher.find_seen_ancestors(have_revs) |
|
3047 |
searcher.stop_searching_any(stop_revs) |
|
3731.4.2
by Andrew Bennetts
Move ghost check out of the inner loop. |
3048 |
if searcher_exhausted: |
3172.4.4
by Robert Collins
Review feedback. |
3049 |
break
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3050 |
return searcher.get_result() |
3808.1.4
by John Arbash Meinel
make _walk_to_common responsible for stopping ancestors |
3051 |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3052 |
@needs_read_lock
|
3053 |
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True): |
|
3054 |
"""Return the revision ids that source has that target does not. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3055 |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3056 |
:param revision_id: only return revision ids included by this
|
3057 |
revision_id.
|
|
3058 |
:param find_ghosts: If True find missing revisions in deep history
|
|
3059 |
rather than just finding the surface difference.
|
|
3060 |
:return: A bzrlib.graph.SearchResult.
|
|
3061 |
"""
|
|
3172.4.1
by Robert Collins
* Fetching via bzr+ssh will no longer fill ghosts by default (this is |
3062 |
# stop searching at found target revisions.
|
3063 |
if not find_ghosts and revision_id is not None: |
|
3172.4.4
by Robert Collins
Review feedback. |
3064 |
return self._walk_to_common_revisions([revision_id]) |
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3065 |
# generic, possibly worst case, slow code path.
|
3066 |
target_ids = set(self.target.all_revision_ids()) |
|
3067 |
if revision_id is not None: |
|
3068 |
source_ids = self.source.get_ancestry(revision_id) |
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
3069 |
if source_ids[0] is not None: |
3070 |
raise AssertionError() |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3071 |
source_ids.pop(0) |
3072 |
else: |
|
3073 |
source_ids = self.source.all_revision_ids() |
|
3074 |
result_set = set(source_ids).difference(target_ids) |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3075 |
return self.source.revision_ids_to_search_result(result_set) |
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3076 |
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3077 |
@staticmethod
|
3078 |
def _same_model(source, target): |
|
3582.1.2
by Martin Pool
Default InterRepository.fetch raises IncompatibleRepositories |
3079 |
"""True if source and target have the same data representation. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3080 |
|
3582.1.2
by Martin Pool
Default InterRepository.fetch raises IncompatibleRepositories |
3081 |
Note: this is always called on the base class; overriding it in a
|
3082 |
subclass will have no effect.
|
|
3083 |
"""
|
|
3084 |
try: |
|
3085 |
InterRepository._assert_same_model(source, target) |
|
3086 |
return True |
|
3087 |
except errors.IncompatibleRepositories, e: |
|
3088 |
return False |
|
3089 |
||
3090 |
@staticmethod
|
|
3091 |
def _assert_same_model(source, target): |
|
3092 |
"""Raise an exception if two repositories do not use the same model. |
|
3093 |
"""
|
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3094 |
if source.supports_rich_root() != target.supports_rich_root(): |
3582.1.2
by Martin Pool
Default InterRepository.fetch raises IncompatibleRepositories |
3095 |
raise errors.IncompatibleRepositories(source, target, |
3096 |
"different rich-root support") |
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3097 |
if source._serializer != target._serializer: |
3582.1.2
by Martin Pool
Default InterRepository.fetch raises IncompatibleRepositories |
3098 |
raise errors.IncompatibleRepositories(source, target, |
3099 |
"different serializers") |
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3100 |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3101 |
|
3102 |
class InterSameDataRepository(InterRepository): |
|
3103 |
"""Code for converting between repositories that represent the same data. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3104 |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3105 |
Data format and model must match for this to work.
|
3106 |
"""
|
|
3107 |
||
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
3108 |
@classmethod
|
2241.1.7
by Martin Pool
rename method |
3109 |
def _get_repo_format_to_test(self): |
2814.1.1
by Robert Collins
* Pushing, pulling and branching branches with subtree references was not |
3110 |
"""Repository format for testing with. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3111 |
|
2814.1.1
by Robert Collins
* Pushing, pulling and branching branches with subtree references was not |
3112 |
InterSameData can pull from subtree to subtree and from non-subtree to
|
3113 |
non-subtree, so we test this with the richest repository format.
|
|
3114 |
"""
|
|
3115 |
from bzrlib.repofmt import knitrepo |
|
3116 |
return knitrepo.RepositoryFormatKnit3() |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3117 |
|
1910.2.14
by Aaron Bentley
Fail when trying to use interrepository on Knit2 and Knit1 |
3118 |
@staticmethod
|
3119 |
def is_compatible(source, target): |
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3120 |
return InterRepository._same_model(source, target) |
1910.2.14
by Aaron Bentley
Fail when trying to use interrepository on Knit2 and Knit1 |
3121 |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3122 |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3123 |
class InterWeaveRepo(InterSameDataRepository): |
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
3124 |
"""Optimised code paths between Weave based repositories. |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3125 |
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
3126 |
This should be in bzrlib/repofmt/weaverepo.py but we have not yet
|
3127 |
implemented lazy inter-object optimisation.
|
|
3128 |
"""
|
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3129 |
|
2241.1.13
by Martin Pool
Re-register InterWeaveRepo, fix test integration, add test for it |
3130 |
@classmethod
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3131 |
def _get_repo_format_to_test(self): |
3132 |
from bzrlib.repofmt import weaverepo |
|
3133 |
return weaverepo.RepositoryFormat7() |
|
3134 |
||
3135 |
@staticmethod
|
|
3136 |
def is_compatible(source, target): |
|
3137 |
"""Be compatible with known Weave formats. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3138 |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3139 |
We don't test for the stores being of specific types because that
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3140 |
could lead to confusing results, and there is no need to be
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3141 |
overly general.
|
3142 |
"""
|
|
3143 |
from bzrlib.repofmt.weaverepo import ( |
|
3144 |
RepositoryFormat5, |
|
3145 |
RepositoryFormat6, |
|
3146 |
RepositoryFormat7, |
|
3147 |
)
|
|
3148 |
try: |
|
3149 |
return (isinstance(source._format, (RepositoryFormat5, |
|
3150 |
RepositoryFormat6, |
|
3151 |
RepositoryFormat7)) and |
|
3152 |
isinstance(target._format, (RepositoryFormat5, |
|
3153 |
RepositoryFormat6, |
|
3154 |
RepositoryFormat7))) |
|
3155 |
except AttributeError: |
|
3156 |
return False |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3157 |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3158 |
@needs_write_lock
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3159 |
def copy_content(self, revision_id=None): |
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3160 |
"""See InterRepository.copy_content().""" |
3161 |
# weave specific optimised path:
|
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3162 |
try: |
3163 |
self.target.set_make_working_trees(self.source.make_working_trees()) |
|
3349.1.2
by Aaron Bentley
Change ValueError to RepositoryUpgradeRequired |
3164 |
except (errors.RepositoryUpgradeRequired, NotImplemented): |
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3165 |
pass
|
3166 |
# FIXME do not peek!
|
|
3407.2.14
by Martin Pool
Remove more cases of getting transport via control_files |
3167 |
if self.source._transport.listable(): |
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3168 |
pb = ui.ui_factory.nested_progress_bar() |
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3169 |
try: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3170 |
self.target.texts.insert_record_stream( |
3171 |
self.source.texts.get_record_stream( |
|
3172 |
self.source.texts.keys(), 'topological', False)) |
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3173 |
pb.update('copying inventory', 0, 1) |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3174 |
self.target.inventories.insert_record_stream( |
3175 |
self.source.inventories.get_record_stream( |
|
3176 |
self.source.inventories.keys(), 'topological', False)) |
|
3177 |
self.target.signatures.insert_record_stream( |
|
3178 |
self.source.signatures.get_record_stream( |
|
3179 |
self.source.signatures.keys(), |
|
3180 |
'unordered', True)) |
|
3181 |
self.target.revisions.insert_record_stream( |
|
3182 |
self.source.revisions.get_record_stream( |
|
3183 |
self.source.revisions.keys(), |
|
3184 |
'topological', True)) |
|
2387.1.1
by Robert Collins
Remove the --basis parameter to clone etc. (Robert Collins) |
3185 |
finally: |
3186 |
pb.finished() |
|
3187 |
else: |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3188 |
self.target.fetch(self.source, revision_id=revision_id) |
3189 |
||
3190 |
@needs_read_lock
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3191 |
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True): |
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3192 |
"""See InterRepository.missing_revision_ids().""" |
3193 |
# we want all revisions to satisfy revision_id in source.
|
|
3194 |
# but we don't want to stat every file here and there.
|
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3195 |
# we want then, all revisions other needs to satisfy revision_id
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3196 |
# checked, but not those that we have locally.
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3197 |
# so the first thing is to get a subset of the revisions to
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3198 |
# satisfy revision_id in source, and then eliminate those that
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3199 |
# we do already have.
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3200 |
# this is slow on high latency connection to self, but as as this
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3201 |
# disk format scales terribly for push anyway due to rewriting
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3202 |
# inventory.weave, this is considered acceptable.
|
3203 |
# - RBC 20060209
|
|
3204 |
if revision_id is not None: |
|
3205 |
source_ids = self.source.get_ancestry(revision_id) |
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
3206 |
if source_ids[0] is not None: |
3207 |
raise AssertionError() |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3208 |
source_ids.pop(0) |
3209 |
else: |
|
3210 |
source_ids = self.source._all_possible_ids() |
|
3211 |
source_ids_set = set(source_ids) |
|
3212 |
# source_ids is the worst possible case we may need to pull.
|
|
3213 |
# now we want to filter source_ids against what we actually
|
|
3214 |
# have in target, but don't try to check for existence where we know
|
|
3215 |
# we do not have a revision as that would be pointless.
|
|
3216 |
target_ids = set(self.target._all_possible_ids()) |
|
3217 |
possibly_present_revisions = target_ids.intersection(source_ids_set) |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3218 |
actually_present_revisions = set( |
3219 |
self.target._eliminate_revisions_not_present(possibly_present_revisions)) |
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3220 |
required_revisions = source_ids_set.difference(actually_present_revisions) |
3221 |
if revision_id is not None: |
|
3222 |
# we used get_ancestry to determine source_ids then we are assured all
|
|
3223 |
# revisions referenced are present as they are installed in topological order.
|
|
3224 |
# and the tip revision was validated by get_ancestry.
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3225 |
result_set = required_revisions |
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3226 |
else: |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3227 |
# if we just grabbed the possibly available ids, then
|
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3228 |
# we only have an estimate of whats available and need to validate
|
3229 |
# that against the revision records.
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3230 |
result_set = set( |
3231 |
self.source._eliminate_revisions_not_present(required_revisions)) |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3232 |
return self.source.revision_ids_to_search_result(result_set) |
2241.1.12
by Martin Pool
Restore InterWeaveRepo |
3233 |
|
3234 |
||
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3235 |
class InterKnitRepo(InterSameDataRepository): |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3236 |
"""Optimised code paths between Knit based repositories.""" |
3237 |
||
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
3238 |
@classmethod
|
2241.1.7
by Martin Pool
rename method |
3239 |
def _get_repo_format_to_test(self): |
2241.1.6
by Martin Pool
Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and |
3240 |
from bzrlib.repofmt import knitrepo |
3241 |
return knitrepo.RepositoryFormatKnit1() |
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3242 |
|
3243 |
@staticmethod
|
|
3244 |
def is_compatible(source, target): |
|
3245 |
"""Be compatible with known Knit formats. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3246 |
|
1759.2.2
by Jelmer Vernooij
Revert some of my spelling fixes and fix some typos after review by Aaron. |
3247 |
We don't test for the stores being of specific types because that
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3248 |
could lead to confusing results, and there is no need to be
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3249 |
overly general.
|
3250 |
"""
|
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3251 |
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3252 |
try: |
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3253 |
are_knits = (isinstance(source._format, RepositoryFormatKnit) and |
3254 |
isinstance(target._format, RepositoryFormatKnit)) |
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3255 |
except AttributeError: |
3256 |
return False |
|
2592.3.28
by Robert Collins
Make InterKnitOptimiser be used between any same-model knit repository. |
3257 |
return are_knits and InterRepository._same_model(source, target) |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3258 |
|
3259 |
@needs_read_lock
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3260 |
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True): |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3261 |
"""See InterRepository.missing_revision_ids().""" |
3262 |
if revision_id is not None: |
|
3263 |
source_ids = self.source.get_ancestry(revision_id) |
|
3376.2.4
by Martin Pool
Remove every assert statement from bzrlib! |
3264 |
if source_ids[0] is not None: |
3265 |
raise AssertionError() |
|
1668.1.14
by Martin Pool
merge olaf - InvalidRevisionId fixes |
3266 |
source_ids.pop(0) |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3267 |
else: |
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
3268 |
source_ids = self.source.all_revision_ids() |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3269 |
source_ids_set = set(source_ids) |
3270 |
# source_ids is the worst possible case we may need to pull.
|
|
3271 |
# now we want to filter source_ids against what we actually
|
|
1759.2.2
by Jelmer Vernooij
Revert some of my spelling fixes and fix some typos after review by Aaron. |
3272 |
# have in target, but don't try to check for existence where we know
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3273 |
# we do not have a revision as that would be pointless.
|
2850.3.1
by Robert Collins
Move various weave specific code out of the base Repository class to weaverepo.py. |
3274 |
target_ids = set(self.target.all_revision_ids()) |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3275 |
possibly_present_revisions = target_ids.intersection(source_ids_set) |
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3276 |
actually_present_revisions = set( |
3277 |
self.target._eliminate_revisions_not_present(possibly_present_revisions)) |
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3278 |
required_revisions = source_ids_set.difference(actually_present_revisions) |
3279 |
if revision_id is not None: |
|
3280 |
# we used get_ancestry to determine source_ids then we are assured all
|
|
3281 |
# revisions referenced are present as they are installed in topological order.
|
|
3282 |
# and the tip revision was validated by get_ancestry.
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3283 |
result_set = required_revisions |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3284 |
else: |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3285 |
# if we just grabbed the possibly available ids, then
|
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3286 |
# we only have an estimate of whats available and need to validate
|
3287 |
# that against the revision records.
|
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3288 |
result_set = set( |
3289 |
self.source._eliminate_revisions_not_present(required_revisions)) |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3290 |
return self.source.revision_ids_to_search_result(result_set) |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3291 |
|
1910.2.17
by Aaron Bentley
Get fetching from 1 to 2 under test |
3292 |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3293 |
class InterPackRepo(InterSameDataRepository): |
3294 |
"""Optimised code paths between Pack based repositories.""" |
|
3295 |
||
3296 |
@classmethod
|
|
3297 |
def _get_repo_format_to_test(self): |
|
3298 |
from bzrlib.repofmt import pack_repo |
|
2592.3.224
by Martin Pool
Rename GraphKnitRepository etc to KnitPackRepository |
3299 |
return pack_repo.RepositoryFormatKnitPack1() |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3300 |
|
3301 |
@staticmethod
|
|
3302 |
def is_compatible(source, target): |
|
3303 |
"""Be compatible with known Pack formats. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3304 |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3305 |
We don't test for the stores being of specific types because that
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3306 |
could lead to confusing results, and there is no need to be
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3307 |
overly general.
|
3308 |
"""
|
|
3309 |
from bzrlib.repofmt.pack_repo import RepositoryFormatPack |
|
3310 |
try: |
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
3311 |
are_packs = (isinstance(source._format, RepositoryFormatPack) and |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3312 |
isinstance(target._format, RepositoryFormatPack)) |
3313 |
except AttributeError: |
|
3314 |
return False |
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
3315 |
return are_packs and InterRepository._same_model(source, target) |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3316 |
|
3317 |
@needs_write_lock
|
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
3318 |
def fetch(self, revision_id=None, pb=None, find_ghosts=False, |
3319 |
fetch_spec=None): |
|
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3320 |
"""See InterRepository.fetch().""" |
3582.2.1
by Jonathan Lange
Fix up problems with fetching revisions. Almost entirely abentley's work. |
3321 |
if (len(self.source._fallback_repositories) > 0 or |
3322 |
len(self.target._fallback_repositories) > 0): |
|
3582.1.13
by Martin Pool
Better comment about fetching to or from stacked repositories |
3323 |
# The pack layer is not aware of fallback repositories, so when
|
3324 |
# fetching from a stacked repository or into a stacked repository
|
|
3325 |
# we use the generic fetch logic which uses the VersionedFiles
|
|
3326 |
# attributes on repository.
|
|
3565.3.3
by Robert Collins
* Fetching data between repositories that have the same model but no |
3327 |
from bzrlib.fetch import RepoFetcher |
3328 |
fetcher = RepoFetcher(self.target, self.source, revision_id, |
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
3329 |
pb, find_ghosts, fetch_spec=fetch_spec) |
3330 |
if fetch_spec is not None: |
|
4070.9.16
by Andrew Bennetts
Fix small error introduced when doing review tweaks. |
3331 |
if len(list(fetch_spec.heads)) != 1: |
3332 |
raise AssertionError( |
|
3333 |
"InterPackRepo.fetch doesn't support "
|
|
3334 |
"fetching multiple heads yet.") |
|
4152.1.2
by Robert Collins
Add streaming from a stacked branch when the sort order is compatible with doing so. |
3335 |
revision_id = list(fetch_spec.heads)[0] |
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
3336 |
fetch_spec = None |
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
3337 |
if revision_id is None: |
3338 |
# TODO:
|
|
3339 |
# everything to do - use pack logic
|
|
3340 |
# to fetch from all packs to one without
|
|
2592.3.93
by Robert Collins
Steps toward filtering revisions/inventories/texts during fetch. |
3341 |
# inventory parsing etc, IFF nothing to be copied is in the target.
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
3342 |
# till then:
|
3221.13.2
by Robert Collins
Add a shallow parameter to bzrdir.sprout, which involved fixing a lateny bug in pack to pack fetching with ghost discovery. |
3343 |
source_revision_ids = frozenset(self.source.all_revision_ids()) |
3344 |
revision_ids = source_revision_ids - \ |
|
4144.3.12
by Andrew Bennetts
Remove target_get_graph and target_get_parent_map attributes from InterRepository; nothing overrides them anymore. |
3345 |
frozenset(self.target.get_parent_map(source_revision_ids)) |
1551.19.36
by Aaron Bentley
Prevent fetch all from causing pack collisions |
3346 |
revision_keys = [(revid,) for revid in revision_ids] |
4144.3.11
by Andrew Bennetts
Remove InterPackToRemotePack too. |
3347 |
index = self.target._pack_collection.revision_index.combined_index |
1551.19.36
by Aaron Bentley
Prevent fetch all from causing pack collisions |
3348 |
present_revision_ids = set(item[1][0] for item in |
3349 |
index.iter_entries(revision_keys)) |
|
3350 |
revision_ids = set(revision_ids) - present_revision_ids |
|
2592.3.93
by Robert Collins
Steps toward filtering revisions/inventories/texts during fetch. |
3351 |
# implementing the TODO will involve:
|
3352 |
# - detecting when all of a pack is selected
|
|
3353 |
# - avoiding as much as possible pre-selection, so the
|
|
3354 |
# more-core routines such as create_pack_from_packs can filter in
|
|
3355 |
# a just-in-time fashion. (though having a HEADS list on a
|
|
3356 |
# repository might make this a lot easier, because we could
|
|
3357 |
# sensibly detect 'new revisions' without doing a full index scan.
|
|
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
3358 |
elif _mod_revision.is_null(revision_id): |
3359 |
# nothing to do:
|
|
3010.1.5
by Robert Collins
Test that missing_revision_ids handles the case of the source not having the requested revision correctly with and without find_ghosts. |
3360 |
return (0, []) |
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
3361 |
else: |
3362 |
try: |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3363 |
revision_ids = self.search_missing_revision_ids(revision_id, |
3364 |
find_ghosts=find_ghosts).get_keys() |
|
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
3365 |
except errors.NoSuchRevision: |
3366 |
raise errors.InstallFailed([revision_id]) |
|
1551.19.41
by Aaron Bentley
Accelerate no-op pull |
3367 |
if len(revision_ids) == 0: |
3368 |
return (0, []) |
|
3452.2.2
by Andrew Bennetts
Experimental PackRepository.{check_references,autopack} RPCs. |
3369 |
return self._pack(self.source, self.target, revision_ids) |
3452.2.1
by Andrew Bennetts
An experimental InterRepo for remote packs. |
3370 |
|
3371 |
def _pack(self, source, target, revision_ids): |
|
3372 |
from bzrlib.repofmt.pack_repo import Packer |
|
3373 |
packs = source._pack_collection.all_packs() |
|
4144.3.11
by Andrew Bennetts
Remove InterPackToRemotePack too. |
3374 |
pack = Packer(self.target._pack_collection, packs, '.fetch', |
2951.1.2
by Robert Collins
Partial refactoring of pack_repo to create a Packer object for packing. |
3375 |
revision_ids).pack() |
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
3376 |
if pack is not None: |
4144.3.11
by Andrew Bennetts
Remove InterPackToRemotePack too. |
3377 |
self.target._pack_collection._save_pack_names() |
3801.1.14
by Andrew Bennetts
Reduce duplication in InterPackToRemotePack. |
3378 |
copied_revs = pack.get_revision_count() |
2592.3.108
by Robert Collins
Autopack after pack to pack fetching too. |
3379 |
# Trigger an autopack. This may duplicate effort as we've just done
|
3380 |
# a pack creation, but for now it is simpler to think about as
|
|
3381 |
# 'upload data, then repack if needed'.
|
|
4144.3.11
by Andrew Bennetts
Remove InterPackToRemotePack too. |
3382 |
self.target._pack_collection.autopack() |
3801.1.14
by Andrew Bennetts
Reduce duplication in InterPackToRemotePack. |
3383 |
return (copied_revs, []) |
2592.3.91
by Robert Collins
Incrementally closing in on a correct fetch for packs. |
3384 |
else: |
3010.1.5
by Robert Collins
Test that missing_revision_ids handles the case of the source not having the requested revision correctly with and without find_ghosts. |
3385 |
return (0, []) |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3386 |
|
3387 |
@needs_read_lock
|
|
4070.9.3
by Andrew Bennetts
Fix a bug, remove some cruft, reduce some ratchets. |
3388 |
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True): |
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
3389 |
"""See InterRepository.missing_revision_ids(). |
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3390 |
|
3172.4.1
by Robert Collins
* Fetching via bzr+ssh will no longer fill ghosts by default (this is |
3391 |
:param find_ghosts: Find ghosts throughout the ancestry of
|
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
3392 |
revision_id.
|
3393 |
"""
|
|
4070.9.3
by Andrew Bennetts
Fix a bug, remove some cruft, reduce some ratchets. |
3394 |
if not find_ghosts and revision_id is not None: |
3395 |
return self._walk_to_common_revisions([revision_id]) |
|
2949.1.2
by Robert Collins
* Fetch with pack repositories will no longer read the entire history graph. |
3396 |
elif revision_id is not None: |
3221.13.2
by Robert Collins
Add a shallow parameter to bzrdir.sprout, which involved fixing a lateny bug in pack to pack fetching with ghost discovery. |
3397 |
# Find ghosts: search for revisions pointing from one repository to
|
3221.18.1
by Ian Clatworthy
tweaks by ianc during review |
3398 |
# the other, and vice versa, anywhere in the history of revision_id.
|
4144.3.12
by Andrew Bennetts
Remove target_get_graph and target_get_parent_map attributes from InterRepository; nothing overrides them anymore. |
3399 |
graph = self.target.get_graph(other_repository=self.source) |
3221.13.7
by Ian Clatworthy
ensure NoSuchRevision raised when needed in InterPackRepo |
3400 |
searcher = graph._make_breadth_first_searcher([revision_id]) |
3401 |
found_ids = set() |
|
3402 |
while True: |
|
3403 |
try: |
|
3404 |
next_revs, ghosts = searcher.next_with_ghosts() |
|
3405 |
except StopIteration: |
|
3406 |
break
|
|
3407 |
if revision_id in ghosts: |
|
3408 |
raise errors.NoSuchRevision(self.source, revision_id) |
|
3409 |
found_ids.update(next_revs) |
|
3410 |
found_ids.update(ghosts) |
|
3411 |
found_ids = frozenset(found_ids) |
|
3221.13.2
by Robert Collins
Add a shallow parameter to bzrdir.sprout, which involved fixing a lateny bug in pack to pack fetching with ghost discovery. |
3412 |
# Double query here: should be able to avoid this by changing the
|
3413 |
# graph api further.
|
|
3414 |
result_set = found_ids - frozenset( |
|
4144.3.12
by Andrew Bennetts
Remove target_get_graph and target_get_parent_map attributes from InterRepository; nothing overrides them anymore. |
3415 |
self.target.get_parent_map(found_ids)) |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3416 |
else: |
2592.3.151
by Robert Collins
Use the revision index, not the inventory index, for missing and fetch operations. |
3417 |
source_ids = self.source.all_revision_ids() |
3221.13.2
by Robert Collins
Add a shallow parameter to bzrdir.sprout, which involved fixing a lateny bug in pack to pack fetching with ghost discovery. |
3418 |
# source_ids is the worst possible case we may need to pull.
|
3419 |
# now we want to filter source_ids against what we actually
|
|
3420 |
# have in target, but don't try to check for existence where we know
|
|
3421 |
# we do not have a revision as that would be pointless.
|
|
3422 |
target_ids = set(self.target.all_revision_ids()) |
|
3423 |
result_set = set(source_ids).difference(target_ids) |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3424 |
return self.source.revision_ids_to_search_result(result_set) |
2592.3.88
by Robert Collins
Move Pack repository logic to bzrlib.repofmt.pack_repo. |
3425 |
|
3426 |
||
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
3427 |
class InterDifferingSerializer(InterKnitRepo): |
3428 |
||
3429 |
@classmethod
|
|
3430 |
def _get_repo_format_to_test(self): |
|
3431 |
return None |
|
3432 |
||
3433 |
@staticmethod
|
|
3434 |
def is_compatible(source, target): |
|
3435 |
"""Be compatible with Knit2 source and Knit3 target""" |
|
3436 |
if source.supports_rich_root() != target.supports_rich_root(): |
|
3437 |
return False |
|
3438 |
# Ideally, we'd support fetching if the source had no tree references
|
|
3439 |
# even if it supported them...
|
|
3440 |
if (getattr(source, '_format.supports_tree_reference', False) and |
|
3441 |
not getattr(target, '_format.supports_tree_reference', False)): |
|
3442 |
return False |
|
3443 |
return True |
|
3444 |
||
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3445 |
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache): |
3446 |
"""Get the best delta and base for this revision. |
|
3447 |
||
3448 |
:return: (basis_id, delta)
|
|
3449 |
"""
|
|
3450 |
possible_trees = [(parent_id, cache[parent_id]) |
|
3451 |
for parent_id in parent_ids |
|
3452 |
if parent_id in cache] |
|
3453 |
if len(possible_trees) == 0: |
|
3454 |
# There either aren't any parents, or the parents aren't in the
|
|
3455 |
# cache, so just use the last converted tree
|
|
3456 |
possible_trees.append((basis_id, cache[basis_id])) |
|
3457 |
deltas = [] |
|
3458 |
for basis_id, basis_tree in possible_trees: |
|
3459 |
delta = tree.inventory._make_delta(basis_tree.inventory) |
|
3460 |
deltas.append((len(delta), basis_id, delta)) |
|
3461 |
deltas.sort() |
|
3462 |
return deltas[0][1:] |
|
3463 |
||
3464 |
def _fetch_batch(self, revision_ids, basis_id, cache): |
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3465 |
"""Fetch across a few revisions. |
3466 |
||
3467 |
:param revision_ids: The revisions to copy
|
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3468 |
:param basis_id: The revision_id of a tree that must be in cache, used
|
3469 |
as a basis for delta when no other base is available
|
|
3470 |
:param cache: A cache of RevisionTrees that we can use.
|
|
3471 |
:return: The revision_id of the last converted tree. The RevisionTree
|
|
3472 |
for it will be in cache
|
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3473 |
"""
|
3474 |
# Walk though all revisions; get inventory deltas, copy referenced
|
|
3475 |
# texts that delta references, insert the delta, revision and
|
|
3476 |
# signature.
|
|
3477 |
text_keys = set() |
|
3478 |
pending_deltas = [] |
|
3479 |
pending_revisions = [] |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3480 |
parent_map = self.source.get_parent_map(revision_ids) |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3481 |
for tree in self.source.revision_trees(revision_ids): |
3482 |
current_revision_id = tree.get_revision_id() |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3483 |
parent_ids = parent_map.get(current_revision_id, ()) |
3484 |
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, |
|
3485 |
basis_id, cache) |
|
3486 |
# Find text entries that need to be copied
|
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3487 |
for old_path, new_path, file_id, entry in delta: |
3488 |
if new_path is not None: |
|
3489 |
if not (new_path or self.target.supports_rich_root()): |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3490 |
# We don't copy the text for the root node unless the
|
3491 |
# target supports_rich_root.
|
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3492 |
continue
|
4098.4.1
by Robert Collins
Handle inconsistent inventory data more gracefully at a small performance cost during fetch. |
3493 |
text_keys.add((file_id, entry.revision)) |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3494 |
revision = self.source.get_revision(current_revision_id) |
3495 |
pending_deltas.append((basis_id, delta, |
|
3496 |
current_revision_id, revision.parent_ids)) |
|
3497 |
pending_revisions.append(revision) |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3498 |
cache[current_revision_id] = tree |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3499 |
basis_id = current_revision_id |
3500 |
# Copy file texts
|
|
3501 |
from_texts = self.source.texts |
|
3502 |
to_texts = self.target.texts |
|
3503 |
to_texts.insert_record_stream(from_texts.get_record_stream( |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
3504 |
text_keys, self.target._format._fetch_order, |
3505 |
not self.target._format._fetch_uses_deltas)) |
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3506 |
# insert deltas
|
3507 |
for delta in pending_deltas: |
|
3508 |
self.target.add_inventory_by_delta(*delta) |
|
3509 |
# insert signatures and revisions
|
|
3510 |
for revision in pending_revisions: |
|
3511 |
try: |
|
3512 |
signature = self.source.get_signature_text( |
|
3513 |
revision.revision_id) |
|
3514 |
self.target.add_signature_text(revision.revision_id, |
|
3515 |
signature) |
|
3516 |
except errors.NoSuchRevision: |
|
3517 |
pass
|
|
3518 |
self.target.add_revision(revision.revision_id, revision) |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3519 |
return basis_id |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3520 |
|
3521 |
def _fetch_all_revisions(self, revision_ids, pb): |
|
3522 |
"""Fetch everything for the list of revisions. |
|
3523 |
||
3524 |
:param revision_ids: The list of revisions to fetch. Must be in
|
|
3525 |
topological order.
|
|
3526 |
:param pb: A ProgressBar
|
|
3527 |
:return: None
|
|
3528 |
"""
|
|
3529 |
basis_id, basis_tree = self._get_basis(revision_ids[0]) |
|
3530 |
batch_size = 100 |
|
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3531 |
cache = lru_cache.LRUCache(100) |
3532 |
cache[basis_id] = basis_tree |
|
3533 |
del basis_tree # We don't want to hang on to it here |
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3534 |
for offset in range(0, len(revision_ids), batch_size): |
3535 |
self.target.start_write_group() |
|
3536 |
try: |
|
3537 |
pb.update('Transferring revisions', offset, |
|
3879.2.13
by John Arbash Meinel
There was a test that asserted we called pb.update() with the last revision. |
3538 |
len(revision_ids)) |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3539 |
batch = revision_ids[offset:offset+batch_size] |
4017.4.1
by John Arbash Meinel
Change the generic fetch logic to improve delta selection. |
3540 |
basis_id = self._fetch_batch(batch, basis_id, cache) |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3541 |
except: |
3542 |
self.target.abort_write_group() |
|
3543 |
raise
|
|
3544 |
else: |
|
3545 |
self.target.commit_write_group() |
|
3879.2.13
by John Arbash Meinel
There was a test that asserted we called pb.update() with the last revision. |
3546 |
pb.update('Transferring revisions', len(revision_ids), |
3547 |
len(revision_ids)) |
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3548 |
|
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
3549 |
@needs_write_lock
|
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
3550 |
def fetch(self, revision_id=None, pb=None, find_ghosts=False, |
3551 |
fetch_spec=None): |
|
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
3552 |
"""See InterRepository.fetch().""" |
4070.9.2
by Andrew Bennetts
Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations. |
3553 |
if fetch_spec is not None: |
3554 |
raise AssertionError("Not implemented yet...") |
|
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3555 |
revision_ids = self.target.search_missing_revision_ids(self.source, |
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3556 |
revision_id, find_ghosts=find_ghosts).get_keys() |
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3557 |
if not revision_ids: |
3558 |
return 0, 0 |
|
3184.1.8
by Robert Collins
* ``InterRepository.missing_revision_ids`` is now deprecated in favour of |
3559 |
revision_ids = tsort.topo_sort( |
3184.1.9
by Robert Collins
* ``Repository.get_data_stream`` is now deprecated in favour of |
3560 |
self.source.get_graph().get_parent_map(revision_ids)) |
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
3561 |
if pb is None: |
3562 |
my_pb = ui.ui_factory.nested_progress_bar() |
|
3563 |
pb = my_pb |
|
3564 |
else: |
|
4110.2.5
by Martin Pool
Deprecate passing pbs in to fetch() |
3565 |
symbol_versioning.warn( |
3566 |
symbol_versioning.deprecated_in((1, 14, 0)) |
|
3567 |
% "pb parameter to fetch()") |
|
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
3568 |
my_pb = None |
3569 |
try: |
|
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3570 |
self._fetch_all_revisions(revision_ids, pb) |
3146.6.1
by Aaron Bentley
InterDifferingSerializer shows a progress bar |
3571 |
finally: |
3572 |
if my_pb is not None: |
|
3573 |
my_pb.finished() |
|
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
3574 |
return len(revision_ids), 0 |
3575 |
||
3879.2.8
by John Arbash Meinel
Bring in the CHK inter-differing-serializer fetch code. |
3576 |
def _get_basis(self, first_revision_id): |
3577 |
"""Get a revision and tree which exists in the target. |
|
3578 |
||
3579 |
This assumes that first_revision_id is selected for transmission
|
|
3580 |
because all other ancestors are already present. If we can't find an
|
|
3581 |
ancestor we fall back to NULL_REVISION since we know that is safe.
|
|
3582 |
||
3583 |
:return: (basis_id, basis_tree)
|
|
3584 |
"""
|
|
3585 |
first_rev = self.source.get_revision(first_revision_id) |
|
3586 |
try: |
|
3587 |
basis_id = first_rev.parent_ids[0] |
|
3588 |
# only valid as a basis if the target has it
|
|
3589 |
self.target.get_revision(basis_id) |
|
3590 |
# Try to get a basis tree - if its a ghost it will hit the
|
|
3591 |
# NoSuchRevision case.
|
|
3592 |
basis_tree = self.source.revision_tree(basis_id) |
|
3593 |
except (IndexError, errors.NoSuchRevision): |
|
3594 |
basis_id = _mod_revision.NULL_REVISION |
|
3595 |
basis_tree = self.source.revision_tree(basis_id) |
|
3596 |
return basis_id, basis_tree |
|
3597 |
||
2996.2.1
by Aaron Bentley
Add KnitRepositoryFormat4 |
3598 |
|
3599 |
InterRepository.register_optimiser(InterDifferingSerializer) |
|
1910.2.15
by Aaron Bentley
Back out inter.get changes, make optimizers an ordered list |
3600 |
InterRepository.register_optimiser(InterSameDataRepository) |
2241.1.13
by Martin Pool
Re-register InterWeaveRepo, fix test integration, add test for it |
3601 |
InterRepository.register_optimiser(InterWeaveRepo) |
1563.2.31
by Robert Collins
Convert Knit repositories to use knits. |
3602 |
InterRepository.register_optimiser(InterKnitRepo) |
2592.3.90
by Robert Collins
Slightly broken, but branch and fetch performance is now roughly on par (for bzr.dev) with knits - should be much faster for large repos. |
3603 |
InterRepository.register_optimiser(InterPackRepo) |
1534.1.31
by Robert Collins
Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo. |
3604 |
|
3605 |
||
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
3606 |
class CopyConverter(object): |
3607 |
"""A repository conversion tool which just performs a copy of the content. |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3608 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
3609 |
This is slow but quite reliable.
|
3610 |
"""
|
|
3611 |
||
3612 |
def __init__(self, target_format): |
|
3613 |
"""Create a CopyConverter. |
|
3614 |
||
3615 |
:param target_format: The format the resulting repository should be.
|
|
3616 |
"""
|
|
3617 |
self.target_format = target_format |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3618 |
|
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
3619 |
def convert(self, repo, pb): |
3620 |
"""Perform the conversion of to_convert, giving feedback via pb. |
|
3621 |
||
3622 |
:param to_convert: The disk object to convert.
|
|
3623 |
:param pb: a progress bar to use for progress information.
|
|
3624 |
"""
|
|
3625 |
self.pb = pb |
|
3626 |
self.count = 0 |
|
1596.2.22
by Robert Collins
Fetch changes to use new pb. |
3627 |
self.total = 4 |
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
3628 |
# this is only useful with metadir layouts - separated repo content.
|
3629 |
# trigger an assertion if not such
|
|
3630 |
repo._format.get_format_string() |
|
3631 |
self.repo_dir = repo.bzrdir |
|
3632 |
self.step('Moving repository to repository.backup') |
|
3633 |
self.repo_dir.transport.move('repository', 'repository.backup') |
|
3634 |
backup_transport = self.repo_dir.transport.clone('repository.backup') |
|
1910.2.12
by Aaron Bentley
Implement knit repo format 2 |
3635 |
repo._format.check_conversion_target(self.target_format) |
1556.1.4
by Robert Collins
Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same. |
3636 |
self.source_repo = repo._format.open(self.repo_dir, |
3637 |
_found=True, |
|
3638 |
_override_transport=backup_transport) |
|
3639 |
self.step('Creating new repository') |
|
3640 |
converted = self.target_format.initialize(self.repo_dir, |
|
3641 |
self.source_repo.is_shared()) |
|
3642 |
converted.lock_write() |
|
3643 |
try: |
|
3644 |
self.step('Copying content into repository.') |
|
3645 |
self.source_repo.copy_content_into(converted) |
|
3646 |
finally: |
|
3647 |
converted.unlock() |
|
3648 |
self.step('Deleting old repository content.') |
|
3649 |
self.repo_dir.transport.delete_tree('repository.backup') |
|
3650 |
self.pb.note('repository converted') |
|
3651 |
||
3652 |
def step(self, message): |
|
3653 |
"""Update the pb by a step.""" |
|
3654 |
self.count +=1 |
|
3655 |
self.pb.update(message, self.count, self.total) |
|
1596.1.1
by Martin Pool
Use simple xml unescaping rather than importing xml.sax |
3656 |
|
3657 |
||
1843.2.4
by Aaron Bentley
Switch to John Meinel's _unescape_xml implementation |
3658 |
_unescape_map = { |
3659 |
'apos':"'", |
|
3660 |
'quot':'"', |
|
3661 |
'amp':'&', |
|
3662 |
'lt':'<', |
|
3663 |
'gt':'>' |
|
3664 |
}
|
|
3665 |
||
3666 |
||
3667 |
def _unescaper(match, _map=_unescape_map): |
|
2294.1.2
by John Arbash Meinel
Track down and add tests that all tree.commit() can handle |
3668 |
code = match.group(1) |
3669 |
try: |
|
3670 |
return _map[code] |
|
3671 |
except KeyError: |
|
3672 |
if not code.startswith('#'): |
|
3673 |
raise
|
|
2294.1.10
by John Arbash Meinel
Switch all apis over to utf8 file ids. All tests pass |
3674 |
return unichr(int(code[1:])).encode('utf8') |
1843.2.4
by Aaron Bentley
Switch to John Meinel's _unescape_xml implementation |
3675 |
|
3676 |
||
3677 |
_unescape_re = None |
|
3678 |
||
3679 |
||
1596.1.1
by Martin Pool
Use simple xml unescaping rather than importing xml.sax |
3680 |
def _unescape_xml(data): |
1843.2.4
by Aaron Bentley
Switch to John Meinel's _unescape_xml implementation |
3681 |
"""Unescape predefined XML entities in a string of data.""" |
3682 |
global _unescape_re |
|
3683 |
if _unescape_re is None: |
|
2120.2.1
by John Arbash Meinel
Remove tabs from source files, and add a test to keep it that way. |
3684 |
_unescape_re = re.compile('\&([^;]*);') |
1843.2.4
by Aaron Bentley
Switch to John Meinel's _unescape_xml implementation |
3685 |
return _unescape_re.sub(_unescaper, data) |
2745.6.3
by Aaron Bentley
Implement versionedfile checking for bzr check |
3686 |
|
3687 |
||
3036.1.3
by Robert Collins
Privatise VersionedFileChecker. |
3688 |
class _VersionedFileChecker(object): |
2745.6.47
by Andrew Bennetts
Move check_parents out of VersionedFile. |
3689 |
|
4145.2.1
by Ian Clatworthy
faster check |
3690 |
def __init__(self, repository, text_key_references=None): |
2745.6.47
by Andrew Bennetts
Move check_parents out of VersionedFile. |
3691 |
self.repository = repository |
4145.2.1
by Ian Clatworthy
faster check |
3692 |
self.text_index = self.repository._generate_text_key_index( |
3693 |
text_key_references=text_key_references) |
|
3943.8.1
by Marius Kruger
remove all trailing whitespace from bzr source |
3694 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3695 |
def calculate_file_version_parents(self, text_key): |
2927.2.10
by Andrew Bennetts
More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft. |
3696 |
"""Calculate the correct parents for a file version according to |
3697 |
the inventories.
|
|
3698 |
"""
|
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3699 |
parent_keys = self.text_index[text_key] |
2988.1.8
by Robert Collins
Change check and reconcile to use the new _generate_text_key_index rather |
3700 |
if parent_keys == [_mod_revision.NULL_REVISION]: |
3701 |
return () |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3702 |
return tuple(parent_keys) |
2745.6.47
by Andrew Bennetts
Move check_parents out of VersionedFile. |
3703 |
|
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3704 |
def check_file_version_parents(self, texts, progress_bar=None): |
2927.2.10
by Andrew Bennetts
More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft. |
3705 |
"""Check the parents stored in a versioned file are correct. |
3706 |
||
3707 |
It also detects file versions that are not referenced by their
|
|
3708 |
corresponding revision's inventory.
|
|
3709 |
||
2927.2.14
by Andrew Bennetts
Tweaks suggested by review. |
3710 |
:returns: A tuple of (wrong_parents, dangling_file_versions).
|
2927.2.10
by Andrew Bennetts
More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft. |
3711 |
wrong_parents is a dict mapping {revision_id: (stored_parents,
|
3712 |
correct_parents)} for each revision_id where the stored parents
|
|
2927.2.14
by Andrew Bennetts
Tweaks suggested by review. |
3713 |
are not correct. dangling_file_versions is a set of (file_id,
|
3714 |
revision_id) tuples for versions that are present in this versioned
|
|
3715 |
file, but not used by the corresponding inventory.
|
|
2927.2.10
by Andrew Bennetts
More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft. |
3716 |
"""
|
2927.2.3
by Andrew Bennetts
Add fulltexts to avoid bug 155730. |
3717 |
wrong_parents = {} |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3718 |
self.file_ids = set([file_id for file_id, _ in |
3719 |
self.text_index.iterkeys()]) |
|
3720 |
# text keys is now grouped by file_id
|
|
3721 |
n_weaves = len(self.file_ids) |
|
3722 |
files_in_revisions = {} |
|
3723 |
revisions_of_files = {} |
|
3724 |
n_versions = len(self.text_index) |
|
3725 |
progress_bar.update('loading text store', 0, n_versions) |
|
3726 |
parent_map = self.repository.texts.get_parent_map(self.text_index) |
|
3727 |
# On unlistable transports this could well be empty/error...
|
|
3728 |
text_keys = self.repository.texts.keys() |
|
3729 |
unused_keys = frozenset(text_keys) - set(self.text_index) |
|
3730 |
for num, key in enumerate(self.text_index.iterkeys()): |
|
3731 |
if progress_bar is not None: |
|
3732 |
progress_bar.update('checking text graph', num, n_versions) |
|
3733 |
correct_parents = self.calculate_file_version_parents(key) |
|
2927.2.6
by Andrew Bennetts
Make some more check tests pass. |
3734 |
try: |
3350.6.4
by Robert Collins
First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores. |
3735 |
knit_parents = parent_map[key] |
3736 |
except errors.RevisionNotPresent: |
|
3737 |
# Missing text!
|
|
3738 |
knit_parents = None |
|
3739 |
if correct_parents != knit_parents: |
|
3740 |
wrong_parents[key] = (knit_parents, correct_parents) |
|
3741 |
return wrong_parents, unused_keys |
|
3287.6.8
by Robert Collins
Reduce code duplication as per review. |
3742 |
|
3743 |
||
3744 |
def _old_get_graph(repository, revision_id): |
|
3745 |
"""DO NOT USE. That is all. I'm serious.""" |
|
3746 |
graph = repository.get_graph() |
|
3747 |
revision_graph = dict(((key, value) for key, value in |
|
3748 |
graph.iter_ancestry([revision_id]) if value is not None)) |
|
3749 |
return _strip_NULL_ghosts(revision_graph) |
|
3750 |
||
3751 |
||
3752 |
def _strip_NULL_ghosts(revision_graph): |
|
3753 |
"""Also don't use this. more compatibility code for unmigrated clients.""" |
|
3754 |
# Filter ghosts, and null:
|
|
3755 |
if _mod_revision.NULL_REVISION in revision_graph: |
|
3756 |
del revision_graph[_mod_revision.NULL_REVISION] |
|
3757 |
for key, parents in revision_graph.items(): |
|
3758 |
revision_graph[key] = tuple(parent for parent in parents if parent |
|
3759 |
in revision_graph) |
|
3760 |
return revision_graph |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3761 |
|
3762 |
||
3763 |
class StreamSink(object): |
|
3764 |
"""An object that can insert a stream into a repository. |
|
3765 |
||
3766 |
This interface handles the complexity of reserialising inventories and
|
|
3767 |
revisions from different formats, and allows unidirectional insertion into
|
|
3768 |
stacked repositories without looking for the missing basis parents
|
|
3769 |
beforehand.
|
|
3770 |
"""
|
|
3771 |
||
3772 |
def __init__(self, target_repo): |
|
3773 |
self.target_repo = target_repo |
|
3774 |
||
4032.3.7
by Robert Collins
Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink. |
3775 |
def insert_stream(self, stream, src_format, resume_tokens): |
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3776 |
"""Insert a stream's content into the target repository. |
3777 |
||
3778 |
:param src_format: a bzr repository format.
|
|
3779 |
||
4032.3.7
by Robert Collins
Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink. |
3780 |
:return: a list of resume tokens and an iterable of keys additional
|
3781 |
items required before the insertion can be completed.
|
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3782 |
"""
|
4032.3.7
by Robert Collins
Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink. |
3783 |
self.target_repo.lock_write() |
3784 |
try: |
|
3785 |
if resume_tokens: |
|
3786 |
self.target_repo.resume_write_group(resume_tokens) |
|
3787 |
else: |
|
3788 |
self.target_repo.start_write_group() |
|
3789 |
try: |
|
3790 |
# locked_insert_stream performs a commit|suspend.
|
|
3791 |
return self._locked_insert_stream(stream, src_format) |
|
3792 |
except: |
|
3793 |
self.target_repo.abort_write_group(suppress_errors=True) |
|
3794 |
raise
|
|
3795 |
finally: |
|
3796 |
self.target_repo.unlock() |
|
3797 |
||
3798 |
def _locked_insert_stream(self, stream, src_format): |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3799 |
to_serializer = self.target_repo._format._serializer |
3800 |
src_serializer = src_format._serializer |
|
4187.3.2
by Andrew Bennetts
Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors. |
3801 |
if to_serializer == src_serializer: |
3802 |
# If serializers match and the target is a pack repository, set the
|
|
3803 |
# write cache size on the new pack. This avoids poor performance
|
|
3804 |
# on transports where append is unbuffered (such as
|
|
4187.3.4
by Andrew Bennetts
Better docstrings and comments. |
3805 |
# RemoteTransport). This is safe to do because nothing should read
|
4187.3.2
by Andrew Bennetts
Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors. |
3806 |
# back from the target repository while a stream with matching
|
3807 |
# serialization is being inserted.
|
|
4187.3.4
by Andrew Bennetts
Better docstrings and comments. |
3808 |
# The exception is that a delta record from the source that should
|
3809 |
# be a fulltext may need to be expanded by the target (see
|
|
3810 |
# test_fetch_revisions_with_deltas_into_pack); but we take care to
|
|
3811 |
# explicitly flush any buffered writes first in that rare case.
|
|
4187.3.2
by Andrew Bennetts
Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors. |
3812 |
try: |
3813 |
new_pack = self.target_repo._pack_collection._new_pack |
|
3814 |
except AttributeError: |
|
3815 |
# Not a pack repository
|
|
3816 |
pass
|
|
3817 |
else: |
|
3818 |
new_pack.set_write_cache_size(1024*1024) |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3819 |
for substream_type, substream in stream: |
3820 |
if substream_type == 'texts': |
|
3821 |
self.target_repo.texts.insert_record_stream(substream) |
|
3822 |
elif substream_type == 'inventories': |
|
3823 |
if src_serializer == to_serializer: |
|
3824 |
self.target_repo.inventories.insert_record_stream( |
|
3825 |
substream) |
|
3826 |
else: |
|
3827 |
self._extract_and_insert_inventories( |
|
3828 |
substream, src_serializer) |
|
3829 |
elif substream_type == 'revisions': |
|
3830 |
# This may fallback to extract-and-insert more often than
|
|
3831 |
# required if the serializers are different only in terms of
|
|
3832 |
# the inventory.
|
|
3833 |
if src_serializer == to_serializer: |
|
3834 |
self.target_repo.revisions.insert_record_stream( |
|
3835 |
substream) |
|
3836 |
else: |
|
3837 |
self._extract_and_insert_revisions(substream, |
|
3838 |
src_serializer) |
|
3839 |
elif substream_type == 'signatures': |
|
3840 |
self.target_repo.signatures.insert_record_stream(substream) |
|
3841 |
else: |
|
3842 |
raise AssertionError('kaboom! %s' % (substream_type,)) |
|
4032.3.7
by Robert Collins
Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink. |
3843 |
try: |
3844 |
missing_keys = set() |
|
3845 |
for prefix, versioned_file in ( |
|
3846 |
('texts', self.target_repo.texts), |
|
3847 |
('inventories', self.target_repo.inventories), |
|
3848 |
('revisions', self.target_repo.revisions), |
|
3849 |
('signatures', self.target_repo.signatures), |
|
3850 |
):
|
|
3851 |
missing_keys.update((prefix,) + key for key in |
|
3852 |
versioned_file.get_missing_compression_parent_keys()) |
|
3853 |
except NotImplementedError: |
|
3854 |
# cannot even attempt suspending, and missing would have failed
|
|
3855 |
# during stream insertion.
|
|
3856 |
missing_keys = set() |
|
3857 |
else: |
|
3858 |
if missing_keys: |
|
3859 |
# suspend the write group and tell the caller what we is
|
|
3860 |
# missing. We know we can suspend or else we would not have
|
|
3861 |
# entered this code path. (All repositories that can handle
|
|
3862 |
# missing keys can handle suspending a write group).
|
|
3863 |
write_group_tokens = self.target_repo.suspend_write_group() |
|
3864 |
return write_group_tokens, missing_keys |
|
3865 |
self.target_repo.commit_write_group() |
|
3866 |
return [], set() |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3867 |
|
3868 |
def _extract_and_insert_inventories(self, substream, serializer): |
|
3869 |
"""Generate a new inventory versionedfile in target, converting data. |
|
4032.1.1
by John Arbash Meinel
Merge the removal of all trailing whitespace, and resolve conflicts. |
3870 |
|
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3871 |
The inventory is retrieved from the source, (deserializing it), and
|
3872 |
stored in the target (reserializing it in a different format).
|
|
3873 |
"""
|
|
3874 |
for record in substream: |
|
3875 |
bytes = record.get_bytes_as('fulltext') |
|
3876 |
revision_id = record.key[0] |
|
3877 |
inv = serializer.read_inventory_from_string(bytes, revision_id) |
|
3878 |
parents = [key[0] for key in record.parents] |
|
3879 |
self.target_repo.add_inventory(revision_id, inv, parents) |
|
3880 |
||
3881 |
def _extract_and_insert_revisions(self, substream, serializer): |
|
3882 |
for record in substream: |
|
3883 |
bytes = record.get_bytes_as('fulltext') |
|
3884 |
revision_id = record.key[0] |
|
3885 |
rev = serializer.read_revision_from_string(bytes) |
|
3886 |
if rev.revision_id != revision_id: |
|
3887 |
raise AssertionError('wtf: %s != %s' % (rev, revision_id)) |
|
3888 |
self.target_repo.add_revision(revision_id, rev) |
|
3889 |
||
3890 |
def finished(self): |
|
4053.1.4
by Robert Collins
Move the fetch control attributes from Repository to RepositoryFormat. |
3891 |
if self.target_repo._format._fetch_reconcile: |
4022.1.1
by Robert Collins
Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts) |
3892 |
self.target_repo.reconcile() |
3893 |
||
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
3894 |
|
3895 |
class StreamSource(object): |
|
4065.1.2
by Robert Collins
Merge bzr.dev [fix conflicts with fetch refactoring]. |
3896 |
"""A source of a stream for fetching between repositories.""" |
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
3897 |
|
3898 |
def __init__(self, from_repository, to_format): |
|
3899 |
"""Create a StreamSource streaming from from_repository.""" |
|
3900 |
self.from_repository = from_repository |
|
3901 |
self.to_format = to_format |
|
3902 |
||
3903 |
def delta_on_metadata(self): |
|
3904 |
"""Return True if delta's are permitted on metadata streams. |
|
3905 |
||
3906 |
That is on revisions and signatures.
|
|
3907 |
"""
|
|
3908 |
src_serializer = self.from_repository._format._serializer |
|
3909 |
target_serializer = self.to_format._serializer |
|
3910 |
return (self.to_format._fetch_uses_deltas and |
|
3911 |
src_serializer == target_serializer) |
|
3912 |
||
3913 |
def _fetch_revision_texts(self, revs): |
|
3914 |
# fetch signatures first and then the revision texts
|
|
3915 |
# may need to be a InterRevisionStore call here.
|
|
3916 |
from_sf = self.from_repository.signatures |
|
3917 |
# A missing signature is just skipped.
|
|
3918 |
keys = [(rev_id,) for rev_id in revs] |
|
4060.1.4
by Robert Collins
Streaming fetch from remote servers. |
3919 |
signatures = versionedfile.filter_absent(from_sf.get_record_stream( |
4060.1.3
by Robert Collins
Implement the separate source component for fetch - repository.StreamSource. |
3920 |
keys, |
3921 |
self.to_format._fetch_order, |
|
3922 |
not self.to_format._fetch_uses_deltas)) |
|
3923 |
# If a revision has a delta, this is actually expanded inside the
|
|
3924 |
# insert_record_stream code now, which is an alternate fix for
|
|
3925 |
# bug #261339
|
|
3926 |
from_rf = self.from_repository.revisions |
|
3927 |
revisions = from_rf.get_record_stream( |
|
3928 |
keys, |
|
3929 |
self.to_format._fetch_order, |
|
3930 |
not self.delta_on_metadata()) |
|
3931 |
return [('signatures', signatures), ('revisions', revisions)] |
|
3932 |
||
3933 |
def _generate_root_texts(self, revs): |
|
3934 |
"""This will be called by __fetch between fetching weave texts and |
|
3935 |
fetching the inventory weave.
|
|
3936 |
||
3937 |
Subclasses should override this if they need to generate root texts
|
|
3938 |
after fetching weave texts.
|
|
3939 |
"""
|
|
3940 |
if self._rich_root_upgrade(): |
|
3941 |
import bzrlib.fetch |
|
3942 |
return bzrlib.fetch.Inter1and2Helper( |
|
3943 |
self.from_repository).generate_root_texts(revs) |
|
3944 |
else: |
|
3945 |
return [] |
|
3946 |
||
3947 |
def get_stream(self, search): |
|
3948 |
phase = 'file' |
|
3949 |
revs = search.get_keys() |
|
3950 |
graph = self.from_repository.get_graph() |
|
3951 |
revs = list(graph.iter_topo_order(revs)) |
|
3952 |
data_to_fetch = self.from_repository.item_keys_introduced_by(revs) |
|
3953 |
text_keys = [] |
|
3954 |
for knit_kind, file_id, revisions in data_to_fetch: |
|
3955 |
if knit_kind != phase: |
|
3956 |
phase = knit_kind |
|
3957 |
# Make a new progress bar for this phase
|
|
3958 |
if knit_kind == "file": |
|
3959 |
# Accumulate file texts
|
|
3960 |
text_keys.extend([(file_id, revision) for revision in |
|
3961 |
revisions]) |
|
3962 |
elif knit_kind == "inventory": |
|
3963 |
# Now copy the file texts.
|
|
3964 |
from_texts = self.from_repository.texts |
|
3965 |
yield ('texts', from_texts.get_record_stream( |
|
3966 |
text_keys, self.to_format._fetch_order, |
|
3967 |
not self.to_format._fetch_uses_deltas)) |
|
3968 |
# Cause an error if a text occurs after we have done the
|
|
3969 |
# copy.
|
|
3970 |
text_keys = None |
|
3971 |
# Before we process the inventory we generate the root
|
|
3972 |
# texts (if necessary) so that the inventories references
|
|
3973 |
# will be valid.
|
|
3974 |
for _ in self._generate_root_texts(revs): |
|
3975 |
yield _ |
|
3976 |
# NB: This currently reopens the inventory weave in source;
|
|
3977 |
# using a single stream interface instead would avoid this.
|
|
3978 |
from_weave = self.from_repository.inventories |
|
3979 |
# we fetch only the referenced inventories because we do not
|
|
3980 |
# know for unselected inventories whether all their required
|
|
3981 |
# texts are present in the other repository - it could be
|
|
3982 |
# corrupt.
|
|
3983 |
yield ('inventories', from_weave.get_record_stream( |
|
3984 |
[(rev_id,) for rev_id in revs], |
|
3985 |
self.inventory_fetch_order(), |
|
3986 |
not self.delta_on_metadata())) |
|
3987 |
elif knit_kind == "signatures": |
|
3988 |
# Nothing to do here; this will be taken care of when
|
|
3989 |
# _fetch_revision_texts happens.
|
|
3990 |
pass
|
|
3991 |
elif knit_kind == "revisions": |
|
3992 |
for record in self._fetch_revision_texts(revs): |
|
3993 |
yield record |
|
3994 |
else: |
|
3995 |
raise AssertionError("Unknown knit kind %r" % knit_kind) |
|
3996 |
||
3997 |
def get_stream_for_missing_keys(self, missing_keys): |
|
3998 |
# missing keys can only occur when we are byte copying and not
|
|
3999 |
# translating (because translation means we don't send
|
|
4000 |
# unreconstructable deltas ever).
|
|
4001 |
keys = {} |
|
4002 |
keys['texts'] = set() |
|
4003 |
keys['revisions'] = set() |
|
4004 |
keys['inventories'] = set() |
|
4005 |
keys['signatures'] = set() |
|
4006 |
for key in missing_keys: |
|
4007 |
keys[key[0]].add(key[1:]) |
|
4008 |
if len(keys['revisions']): |
|
4009 |
# If we allowed copying revisions at this point, we could end up
|
|
4010 |
# copying a revision without copying its required texts: a
|
|
4011 |
# violation of the requirements for repository integrity.
|
|
4012 |
raise AssertionError( |
|
4013 |
'cannot copy revisions to fill in missing deltas %s' % ( |
|
4014 |
keys['revisions'],)) |
|
4015 |
for substream_kind, keys in keys.iteritems(): |
|
4016 |
vf = getattr(self.from_repository, substream_kind) |
|
4017 |
# Ask for full texts always so that we don't need more round trips
|
|
4018 |
# after this stream.
|
|
4019 |
stream = vf.get_record_stream(keys, |
|
4020 |
self.to_format._fetch_order, True) |
|
4021 |
yield substream_kind, stream |
|
4022 |
||
4023 |
def inventory_fetch_order(self): |
|
4024 |
if self._rich_root_upgrade(): |
|
4025 |
return 'topological' |
|
4026 |
else: |
|
4027 |
return self.to_format._fetch_order |
|
4028 |
||
4029 |
def _rich_root_upgrade(self): |
|
4030 |
return (not self.from_repository._format.rich_root_data and |
|
4031 |
self.to_format.rich_root_data) |
|
4032 |