21
21
that has merged into it. As the first step of a merge, pull, or
22
22
branch operation we copy history from the source into the destination
25
The copying is done in a slightly complicated order. We don't want to
26
add a revision to the store until everything it refers to is also
27
stored, so that if a revision is present we can totally recreate it.
28
However, we can't know what files are included in a revision until we
29
read its inventory. So we query the inventory store of the source for
30
the ids we need, and then pull those ids and finally actually join
28
from bzrlib.lazy_import import lazy_import
29
lazy_import(globals(), """
40
from bzrlib.revision import NULL_REVISION
35
import bzrlib.errors as errors
36
from bzrlib.errors import InstallFailed
37
from bzrlib.progress import ProgressPhase
38
from bzrlib.revision import is_null, NULL_REVISION
39
from bzrlib.symbol_versioning import (deprecated_function,
41
42
from bzrlib.trace import mutter
45
from bzrlib.lazy_import import lazy_import
47
# TODO: Avoid repeatedly opening weaves so many times.
49
# XXX: This doesn't handle ghost (not present in branch) revisions at
50
# all yet. I'm not sure they really should be supported.
52
# NOTE: This doesn't copy revisions which may be present but not
53
# merged into the last revision. I'm not sure we want to do that.
55
# - get a list of revisions that need to be pulled in
56
# - for each one, pull in that revision file
57
# and get the inventory, and store the inventory with right
59
# - and get the ancestry, and store that with right parents too
60
# - and keep a note of all file ids and version seen
61
# - then go through all files; for each one get the weave,
62
# and add in all file versions
44
65
class RepoFetcher(object):
45
66
"""Pull revisions and texts from one repository to another.
69
if set, try to limit to the data this revision references.
72
count_copied -- number of revisions copied
47
74
This should not be used directly, it's essential a object to encapsulate
48
75
the logic in InterRepository.fetch().
51
def __init__(self, to_repository, from_repository, last_revision=None,
52
find_ghosts=True, fetch_spec=None):
78
def __init__(self, to_repository, from_repository, last_revision=None, pb=None,
53
80
"""Create a repo fetcher.
55
:param last_revision: If set, try to limit to the data this revision
57
82
:param find_ghosts: If True search the entire history for ghosts.
59
# repository.fetch has the responsibility for short-circuiting
60
# attempts to copy between a repository and itself.
85
self.failed_revisions = []
87
if to_repository.has_same_location(from_repository):
88
# repository.fetch should be taking care of this case.
89
raise errors.BzrError('RepoFetcher run '
90
'between two objects at the same location: '
91
'%r and %r' % (to_repository, from_repository))
61
92
self.to_repository = to_repository
62
93
self.from_repository = from_repository
63
self.sink = to_repository._get_sink()
64
94
# must not mutate self._last_revision as its potentially a shared instance
65
95
self._last_revision = last_revision
66
self._fetch_spec = fetch_spec
67
96
self.find_ghosts = find_ghosts
98
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
99
self.nested_pb = self.pb
102
self.nested_pb = None
68
103
self.from_repository.lock_read()
69
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
70
self.from_repository, self.from_repository._format,
71
self.to_repository, self.to_repository._format)
105
self.to_repository.lock_write()
107
self.to_repository.start_write_group()
111
self.to_repository.abort_write_group()
114
self.to_repository.commit_write_group()
116
if self.nested_pb is not None:
117
self.nested_pb.finished()
118
self.to_repository.unlock()
75
120
self.from_repository.unlock()
77
122
def __fetch(self):
78
123
"""Primary worker function.
80
This initialises all the needed variables, and then fetches the
125
This initialises all the needed variables, and then fetches the
81
126
requested revisions, finally clearing the progress bar.
83
# Roughly this is what we're aiming for fetch to become:
85
# missing = self.sink.insert_stream(self.source.get_stream(search))
87
# missing = self.sink.insert_stream(self.source.get_items(missing))
128
self.to_weaves = self.to_repository.weave_store
129
self.from_weaves = self.from_repository.weave_store
89
130
self.count_total = 0
90
131
self.file_ids_names = {}
91
pb = ui.ui_factory.nested_progress_bar()
92
pb.show_pct = pb.show_count = False
132
pp = ProgressPhase('Transferring', 4, self.pb)
94
pb.update("Finding revisions", 0, 2)
95
135
search = self._revids_to_fetch()
96
136
if search is None:
98
pb.update("Fetching revisions", 1, 2)
99
self._fetch_everything_for_search(search)
138
if getattr(self, '_fetch_everything_for_search', None) is not None:
139
self._fetch_everything_for_search(search, pp)
141
# backward compatibility
142
self._fetch_everything_for_revisions(search.get_keys, pp)
103
def _fetch_everything_for_search(self, search):
146
def _fetch_everything_for_search(self, search, pp):
104
147
"""Fetch all data for the given set of revisions."""
105
148
# The first phase is "file". We pass the progress bar for it directly
106
149
# into item_keys_introduced_by, which has more information about how
110
153
# item_keys_introduced_by should have a richer API than it does at the
111
154
# moment, so that it can feed the progress information back to this
113
if (self.from_repository._format.rich_root_data and
114
not self.to_repository._format.rich_root_data):
115
raise errors.IncompatibleRepositories(
116
self.from_repository, self.to_repository,
117
"different rich-root support")
118
pb = ui.ui_factory.nested_progress_bar()
157
pb = bzrlib.ui.ui_factory.nested_progress_bar()
120
pb.update("Get stream source")
121
source = self.from_repository._get_source(
122
self.to_repository._format)
123
stream = source.get_stream(search)
124
from_format = self.from_repository._format
125
pb.update("Inserting stream")
126
resume_tokens, missing_keys = self.sink.insert_stream(
127
stream, from_format, [])
128
if self.to_repository._fallback_repositories:
130
self._parent_inventories(search.get_keys()))
132
pb.update("Missing keys")
133
stream = source.get_stream_for_missing_keys(missing_keys)
134
pb.update("Inserting missing keys")
135
resume_tokens, missing_keys = self.sink.insert_stream(
136
stream, from_format, resume_tokens)
138
raise AssertionError(
139
"second push failed to complete a fetch %r." % (
142
raise AssertionError(
143
"second push failed to commit the fetch %r." % (
145
pb.update("Finishing stream")
159
revs = search.get_keys()
160
data_to_fetch = self.from_repository.item_keys_introduced_by(revs, pb)
161
for knit_kind, file_id, revisions in data_to_fetch:
162
if knit_kind != phase:
164
# Make a new progress bar for this phase
167
pb = bzrlib.ui.ui_factory.nested_progress_bar()
168
if knit_kind == "file":
169
self._fetch_weave_text(file_id, revisions)
170
elif knit_kind == "inventory":
171
# Before we process the inventory we generate the root
172
# texts (if necessary) so that the inventories references
174
self._generate_root_texts(revs)
175
# NB: This currently reopens the inventory weave in source;
176
# using a full get_data_stream instead would avoid this.
177
self._fetch_inventory_weave(revs, pb)
178
elif knit_kind == "signatures":
179
# Nothing to do here; this will be taken care of when
180
# _fetch_revision_texts happens.
182
elif knit_kind == "revisions":
183
self._fetch_revision_texts(revs, pb)
185
raise AssertionError("Unknown knit kind %r" % knit_kind)
189
self.count_copied += len(revs)
150
191
def _revids_to_fetch(self):
151
192
"""Determines the exact revisions needed from self.from_repository to
152
193
install self._last_revision in self.to_repository.
154
195
If no revisions need to be fetched, then this just returns None.
156
if self._fetch_spec is not None:
157
return self._fetch_spec
158
197
mutter('fetch up to rev {%s}', self._last_revision)
159
198
if self._last_revision is NULL_REVISION:
160
199
# explicit limit of no revisions needed
162
return self.to_repository.search_missing_revision_ids(
163
self.from_repository, self._last_revision,
164
find_ghosts=self.find_ghosts)
166
def _parent_inventories(self, revision_ids):
167
# Find all the parent revisions referenced by the stream, but
168
# not present in the stream, and make sure we send their
170
parent_maps = self.to_repository.get_parent_map(revision_ids)
172
map(parents.update, parent_maps.itervalues())
173
parents.discard(NULL_REVISION)
174
parents.difference_update(revision_ids)
175
missing_keys = set(('inventories', rev_id) for rev_id in parents)
201
if (self._last_revision is not None and
202
self.to_repository.has_revision(self._last_revision)):
205
return self.to_repository.search_missing_revision_ids(
206
self.from_repository, self._last_revision,
207
find_ghosts=self.find_ghosts)
208
except errors.NoSuchRevision:
209
raise InstallFailed([self._last_revision])
211
def _fetch_weave_text(self, file_id, required_versions):
212
to_weave = self.to_weaves.get_weave_or_empty(file_id,
213
self.to_repository.get_transaction())
214
from_weave = self.from_weaves.get_weave(file_id,
215
self.from_repository.get_transaction())
216
# we fetch all the texts, because texts do
217
# not reference anything, and its cheap enough
218
to_weave.join(from_weave, version_ids=required_versions)
220
def _fetch_inventory_weave(self, revs, pb):
221
pb.update("fetch inventory", 0, 2)
222
to_weave = self.to_repository.get_inventory_weave()
223
child_pb = bzrlib.ui.ui_factory.nested_progress_bar()
225
# just merge, this is optimisable and its means we don't
226
# copy unreferenced data such as not-needed inventories.
227
pb.update("fetch inventory", 1, 3)
228
from_weave = self.from_repository.get_inventory_weave()
229
pb.update("fetch inventory", 2, 3)
230
# we fetch only the referenced inventories because we do not
231
# know for unselected inventories whether all their required
232
# texts are present in the other repository - it could be
234
to_weave.join(from_weave, pb=child_pb, msg='merge inventory',
239
def _generate_root_texts(self, revs):
240
"""This will be called by __fetch between fetching weave texts and
241
fetching the inventory weave.
243
Subclasses should override this if they need to generate root texts
244
after fetching weave texts.
249
class GenericRepoFetcher(RepoFetcher):
250
"""This is a generic repo to repo fetcher.
252
This makes minimal assumptions about repo layout and contents.
253
It triggers a reconciliation after fetching to ensure integrity.
256
def _fetch_revision_texts(self, revs, pb):
257
"""Fetch revision object texts"""
258
to_txn = self.to_transaction = self.to_repository.get_transaction()
261
to_store = self.to_repository._revision_store
263
pb.update('copying revisions', count, total)
265
sig_text = self.from_repository.get_signature_text(rev)
266
to_store.add_revision_signature_text(rev, sig_text, to_txn)
267
except errors.NoSuchRevision:
270
to_store.add_revision(self.from_repository.get_revision(rev),
273
# fixup inventory if needed:
274
# this is expensive because we have no inverse index to current ghosts.
275
# but on local disk its a few seconds and sftp push is already insane.
277
# FIXME: repository should inform if this is needed.
278
self.to_repository.reconcile()
281
class KnitRepoFetcher(RepoFetcher):
282
"""This is a knit format repository specific fetcher.
284
This differs from the GenericRepoFetcher by not doing a
285
reconciliation after copying, and using knit joining to
289
def _fetch_revision_texts(self, revs, pb):
290
# may need to be a InterRevisionStore call here.
291
from_transaction = self.from_repository.get_transaction()
292
to_transaction = self.to_repository.get_transaction()
293
to_sf = self.to_repository._revision_store.get_signature_file(
295
from_sf = self.from_repository._revision_store.get_signature_file(
297
to_sf.join(from_sf, version_ids=revs, ignore_missing=True)
298
to_rf = self.to_repository._revision_store.get_revision_file(
300
from_rf = self.from_repository._revision_store.get_revision_file(
302
to_rf.join(from_rf, version_ids=revs)
179
305
class Inter1and2Helper(object):
180
306
"""Helper for operations that convert data from model 1 and 2
182
308
This is for use by fetchers and converters.
185
def __init__(self, source):
311
def __init__(self, source, target):
188
314
:param source: The repository data comes from
315
:param target: The repository data goes to
190
317
self.source = source
192
320
def iter_rev_trees(self, revs):
193
321
"""Iterate through RevisionTrees efficiently.
209
337
revs = revs[100:]
211
def _find_root_ids(self, revs, parent_map, graph):
213
for tree in self.iter_rev_trees(revs):
214
revision_id = tree.inventory.root.revision
215
root_id = tree.get_root_id()
216
revision_root[revision_id] = root_id
217
# Find out which parents we don't already know root ids for
219
for revision_parents in parent_map.itervalues():
220
parents.update(revision_parents)
221
parents.difference_update(revision_root.keys() + [NULL_REVISION])
222
# Limit to revisions present in the versionedfile
223
parents = graph.get_parent_map(parents).keys()
224
for tree in self.iter_rev_trees(parents):
225
root_id = tree.get_root_id()
226
revision_root[tree.get_revision_id()] = root_id
229
339
def generate_root_texts(self, revs):
230
340
"""Generate VersionedFiles for all root ids.
232
342
:param revs: the revisions to include
234
graph = self.source.get_graph()
235
parent_map = graph.get_parent_map(revs)
236
rev_order = tsort.topo_sort(parent_map)
237
rev_id_to_root_id = self._find_root_ids(revs, parent_map, graph)
238
root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
240
# Guaranteed stable, this groups all the file id operations together
241
# retaining topological order within the revisions of a file id.
242
# File id splits and joins would invalidate this, but they don't exist
243
# yet, and are unlikely to in non-rich-root environments anyway.
244
root_id_order.sort(key=operator.itemgetter(0))
245
# Create a record stream containing the roots to create.
247
# XXX: not covered by tests, should have a flag to always run
248
# this. -- mbp 20100129
249
graph = self.source_repo.get_known_graph_ancestry(revs)
250
new_roots_stream = _new_root_data_stream(
251
root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
252
return [('texts', new_roots_stream)]
255
def _get_rich_root_heads_graph(source_repo, revision_ids):
256
"""Get a Graph object suitable for asking heads() for new rich roots."""
260
def _new_root_data_stream(
261
root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
262
"""Generate a texts substream of synthesised root entries.
264
Used in fetches that do rich-root upgrades.
266
:param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
267
the root entries to create.
268
:param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
269
calculating the parents. If a parent rev_id is not found here then it
270
will be recalculated.
271
:param parent_map: a parent map for all the revisions in
273
:param graph: a graph to use instead of repo.get_graph().
275
for root_key in root_keys_to_create:
276
root_id, rev_id = root_key
277
parent_keys = _parent_keys_for_root_version(
278
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
279
yield versionedfile.FulltextContentFactory(
280
root_key, parent_keys, None, '')
283
def _parent_keys_for_root_version(
284
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
285
"""Get the parent keys for a given root id.
287
A helper function for _new_root_data_stream.
289
# Include direct parents of the revision, but only if they used the same
290
# root_id and are heads.
291
rev_parents = parent_map[rev_id]
293
for parent_id in rev_parents:
294
if parent_id == NULL_REVISION:
296
if parent_id not in rev_id_to_root_id_map:
297
# We probably didn't read this revision, go spend the extra effort
300
tree = repo.revision_tree(parent_id)
301
except errors.NoSuchRevision:
302
# Ghost, fill out rev_id_to_root_id in case we encounter this
304
# But set parent_root_id to None since we don't really know
305
parent_root_id = None
307
parent_root_id = tree.get_root_id()
308
rev_id_to_root_id_map[parent_id] = None
310
# rev_id_to_root_id_map[parent_id] = parent_root_id
311
# memory consumption maybe?
313
parent_root_id = rev_id_to_root_id_map[parent_id]
314
if root_id == parent_root_id:
315
# With stacking we _might_ want to refer to a non-local revision,
316
# but this code path only applies when we have the full content
317
# available, so ghosts really are ghosts, not just the edge of
319
parent_ids.append(parent_id)
321
# root_id may be in the parent anyway.
323
tree = repo.revision_tree(parent_id)
324
except errors.NoSuchRevision:
325
# ghost, can't refer to it.
329
parent_ids.append(tree.inventory[root_id].revision)
330
except errors.NoSuchId:
333
# Drop non-head parents
335
graph = repo.get_graph()
336
heads = graph.heads(parent_ids)
338
for parent_id in parent_ids:
339
if parent_id in heads and parent_id not in selected_ids:
340
selected_ids.append(parent_id)
341
parent_keys = [(root_id, parent_id) for parent_id in selected_ids]
344
inventory_weave = self.source.get_inventory_weave()
347
to_store = self.target.weave_store
348
parent_map = self.source.get_graph().get_parent_map(revs)
349
for tree in self.iter_rev_trees(revs):
350
revision_id = tree.inventory.root.revision
351
root_id = tree.get_root_id()
352
parents = parent_map[revision_id]
353
if parents[0] == NULL_REVISION:
355
if root_id not in versionedfile:
356
versionedfile[root_id] = to_store.get_weave_or_empty(root_id,
357
self.target.get_transaction())
358
_, _, parent_texts[root_id] = versionedfile[root_id].add_lines(
359
revision_id, parents, [], parent_texts)
361
def regenerate_inventory(self, revs):
362
"""Generate a new inventory versionedfile in target, convertin data.
364
The inventory is retrieved from the source, (deserializing it), and
365
stored in the target (reserializing it in a different format).
366
:param revs: The revisions to include
368
for tree in self.iter_rev_trees(revs):
369
parents = tree.get_parent_ids()
370
self.target.add_inventory(tree.get_revision_id(), tree.inventory,
374
class Model1toKnit2Fetcher(GenericRepoFetcher):
375
"""Fetch from a Model1 repository into a Knit2 repository
377
def __init__(self, to_repository, from_repository, last_revision=None,
378
pb=None, find_ghosts=True):
379
self.helper = Inter1and2Helper(from_repository, to_repository)
380
GenericRepoFetcher.__init__(self, to_repository, from_repository,
381
last_revision, pb, find_ghosts)
383
def _generate_root_texts(self, revs):
384
self.helper.generate_root_texts(revs)
386
def _fetch_inventory_weave(self, revs, pb):
387
self.helper.regenerate_inventory(revs)
390
class Knit1to2Fetcher(KnitRepoFetcher):
391
"""Fetch from a Knit1 repository into a Knit2 repository"""
393
def __init__(self, to_repository, from_repository, last_revision=None,
394
pb=None, find_ghosts=True):
395
self.helper = Inter1and2Helper(from_repository, to_repository)
396
KnitRepoFetcher.__init__(self, to_repository, from_repository,
397
last_revision, pb, find_ghosts)
399
def _generate_root_texts(self, revs):
400
self.helper.generate_root_texts(revs)
402
def _fetch_inventory_weave(self, revs, pb):
403
self.helper.regenerate_inventory(revs)
406
class RemoteToOtherFetcher(GenericRepoFetcher):
408
def _fetch_everything_for_search(self, search, pp):
409
data_stream = self.from_repository.get_data_stream_for_search(search)
410
self.to_repository.insert_data_stream(data_stream)