21
21
that has merged into it. As the first step of a merge, pull, or
22
22
branch operation we copy history from the source into the destination
25
The copying is done in a slightly complicated order. We don't want to
26
add a revision to the store until everything it refers to is also
27
stored, so that if a revision is present we can totally recreate it.
28
However, we can't know what files are included in a revision until we
29
read its inventory. So we query the inventory store of the source for
30
the ids we need, and then pull those ids and finally actually join
28
from bzrlib.lazy_import import lazy_import
29
lazy_import(globals(), """
35
import bzrlib.errors as errors
36
from bzrlib.errors import (InstallFailed,
43
from bzrlib.revision import NULL_REVISION
38
44
from bzrlib.trace import mutter
39
from bzrlib.progress import ProgressPhase
40
from bzrlib.revision import NULL_REVISION
41
from bzrlib.symbol_versioning import (deprecated_function,
47
# TODO: Avoid repeatedly opening weaves so many times.
49
# XXX: This doesn't handle ghost (not present in branch) revisions at
50
# all yet. I'm not sure they really should be supported.
52
# NOTE: This doesn't copy revisions which may be present but not
53
# merged into the last revision. I'm not sure we want to do that.
55
# - get a list of revisions that need to be pulled in
56
# - for each one, pull in that revision file
57
# and get the inventory, and store the inventory with right
59
# - and get the ancestry, and store that with right parents too
60
# - and keep a note of all file ids and version seen
61
# - then go through all files; for each one get the weave,
62
# and add in all file versions
65
@deprecated_function(zero_eight)
66
def greedy_fetch(to_branch, from_branch, revision=None, pb=None):
67
"""Legacy API, please see branch.fetch(from_branch, last_revision, pb)."""
68
f = Fetcher(to_branch, from_branch, revision, pb)
69
return f.count_copied, f.failed_revisions
74
47
class RepoFetcher(object):
75
48
"""Pull revisions and texts from one repository to another.
78
if set, try to limit to the data this revision references.
81
count_copied -- number of revisions copied
83
This should not be used directory, its essential a object to encapsulate
50
This should not be used directly, it's essential a object to encapsulate
84
51
the logic in InterRepository.fetch().
86
def __init__(self, to_repository, from_repository, last_revision=None, pb=None):
88
self.failed_revisions = []
90
if to_repository.control_files._transport.base == from_repository.control_files._transport.base:
91
# check that last_revision is in 'from' and then return a no-operation.
92
if last_revision not in (None, NULL_REVISION):
93
from_repository.get_revision(last_revision)
54
def __init__(self, to_repository, from_repository, last_revision=None,
55
find_ghosts=True, fetch_spec=None):
56
"""Create a repo fetcher.
58
:param last_revision: If set, try to limit to the data this revision
60
:param find_ghosts: If True search the entire history for ghosts.
62
# repository.fetch has the responsibility for short-circuiting
63
# attempts to copy between a repository and itself.
95
64
self.to_repository = to_repository
96
65
self.from_repository = from_repository
66
self.sink = to_repository._get_sink()
97
67
# must not mutate self._last_revision as its potentially a shared instance
98
68
self._last_revision = last_revision
100
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
101
self.nested_pb = self.pb
104
self.nested_pb = None
69
self._fetch_spec = fetch_spec
70
self.find_ghosts = find_ghosts
105
71
self.from_repository.lock_read()
72
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
73
self.from_repository, self.from_repository._format,
74
self.to_repository, self.to_repository._format)
107
self.to_repository.lock_write()
111
if self.nested_pb is not None:
112
self.nested_pb.finished()
113
self.to_repository.unlock()
115
78
self.from_repository.unlock()
117
80
def __fetch(self):
118
81
"""Primary worker function.
120
This initialises all the needed variables, and then fetches the
83
This initialises all the needed variables, and then fetches the
121
84
requested revisions, finally clearing the progress bar.
123
self.to_weaves = self.to_repository.weave_store
124
self.to_control = self.to_repository.control_weaves
125
self.from_weaves = self.from_repository.weave_store
126
self.from_control = self.from_repository.control_weaves
86
# Roughly this is what we're aiming for fetch to become:
88
# missing = self.sink.insert_stream(self.source.get_stream(search))
90
# missing = self.sink.insert_stream(self.source.get_items(missing))
127
92
self.count_total = 0
128
93
self.file_ids_names = {}
129
pp = ProgressPhase('Fetch phase', 4, self.pb)
132
revs = self._revids_to_fetch()
136
self._fetch_weave_texts(revs)
138
self._fetch_inventory_weave(revs)
140
self._fetch_revision_texts(revs)
141
self.count_copied += len(revs)
94
pb = ui.ui_factory.nested_progress_bar()
95
pb.show_pct = pb.show_count = False
97
pb.update("Finding revisions", 0, 2)
98
search = self._revids_to_fetch()
101
pb.update("Fetching revisions", 1, 2)
102
self._fetch_everything_for_search(search)
106
def _fetch_everything_for_search(self, search):
107
"""Fetch all data for the given set of revisions."""
108
# The first phase is "file". We pass the progress bar for it directly
109
# into item_keys_introduced_by, which has more information about how
110
# that phase is progressing than we do. Progress updates for the other
111
# phases are taken care of in this function.
112
# XXX: there should be a clear owner of the progress reporting. Perhaps
113
# item_keys_introduced_by should have a richer API than it does at the
114
# moment, so that it can feed the progress information back to this
116
if (self.from_repository._format.rich_root_data and
117
not self.to_repository._format.rich_root_data):
118
raise errors.IncompatibleRepositories(
119
self.from_repository, self.to_repository,
120
"different rich-root support")
121
pb = ui.ui_factory.nested_progress_bar()
123
pb.update("Get stream source")
124
source = self.from_repository._get_source(
125
self.to_repository._format)
126
stream = source.get_stream(search)
127
from_format = self.from_repository._format
128
pb.update("Inserting stream")
129
resume_tokens, missing_keys = self.sink.insert_stream(
130
stream, from_format, [])
131
if self.to_repository._fallback_repositories:
133
self._parent_inventories(search.get_keys()))
135
pb.update("Missing keys")
136
stream = source.get_stream_for_missing_keys(missing_keys)
137
pb.update("Inserting missing keys")
138
resume_tokens, missing_keys = self.sink.insert_stream(
139
stream, from_format, resume_tokens)
141
raise AssertionError(
142
"second push failed to complete a fetch %r." % (
145
raise AssertionError(
146
"second push failed to commit the fetch %r." % (
148
pb.update("Finishing stream")
145
153
def _revids_to_fetch(self):
154
"""Determines the exact revisions needed from self.from_repository to
155
install self._last_revision in self.to_repository.
157
If no revisions need to be fetched, then this just returns None.
159
if self._fetch_spec is not None:
160
return self._fetch_spec
146
161
mutter('fetch up to rev {%s}', self._last_revision)
147
162
if self._last_revision is NULL_REVISION:
148
163
# explicit limit of no revisions needed
150
if (self._last_revision is not None and
151
self.to_repository.has_revision(self._last_revision)):
155
return self.to_repository.missing_revision_ids(self.from_repository,
157
except errors.NoSuchRevision:
158
raise InstallFailed([self._last_revision])
160
def _fetch_weave_texts(self, revs):
161
texts_pb = bzrlib.ui.ui_factory.nested_progress_bar()
163
# fileids_altered_by_revision_ids requires reading the inventory
164
# weave, we will need to read the inventory weave again when
165
# all this is done, so enable caching for that specific weave
166
inv_w = self.from_repository.get_inventory_weave()
168
file_ids = self.from_repository.fileids_altered_by_revision_ids(revs)
170
num_file_ids = len(file_ids)
171
for file_id, required_versions in file_ids.items():
172
texts_pb.update("fetch texts", count, num_file_ids)
174
to_weave = self.to_weaves.get_weave_or_empty(file_id,
175
self.to_repository.get_transaction())
176
from_weave = self.from_weaves.get_weave(file_id,
177
self.from_repository.get_transaction())
178
# we fetch all the texts, because texts do
179
# not reference anything, and its cheap enough
180
to_weave.join(from_weave, version_ids=required_versions)
181
# we don't need *all* of this data anymore, but we dont know
182
# what we do. This cache clearing will result in a new read
183
# of the knit data when we do the checkout, but probably we
184
# want to emit the needed data on the fly rather than at the
186
# the from weave should know not to cache data being joined,
187
# but its ok to ask it to clear.
188
from_weave.clear_cache()
189
to_weave.clear_cache()
193
def _fetch_inventory_weave(self, revs):
194
pb = bzrlib.ui.ui_factory.nested_progress_bar()
196
pb.update("fetch inventory", 0, 2)
197
to_weave = self.to_control.get_weave('inventory',
198
self.to_repository.get_transaction())
200
child_pb = bzrlib.ui.ui_factory.nested_progress_bar()
202
# just merge, this is optimisable and its means we don't
203
# copy unreferenced data such as not-needed inventories.
204
pb.update("fetch inventory", 1, 3)
205
from_weave = self.from_repository.get_inventory_weave()
206
pb.update("fetch inventory", 2, 3)
207
# we fetch only the referenced inventories because we do not
208
# know for unselected inventories whether all their required
209
# texts are present in the other repository - it could be
211
to_weave.join(from_weave, pb=child_pb, msg='merge inventory',
213
from_weave.clear_cache()
220
class GenericRepoFetcher(RepoFetcher):
221
"""This is a generic repo to repo fetcher.
223
This makes minimal assumptions about repo layout and contents.
224
It triggers a reconciliation after fetching to ensure integrity.
227
def _fetch_revision_texts(self, revs):
228
"""Fetch revision object texts"""
229
rev_pb = bzrlib.ui.ui_factory.nested_progress_bar()
231
to_txn = self.to_transaction = self.to_repository.get_transaction()
234
to_store = self.to_repository._revision_store
236
pb = bzrlib.ui.ui_factory.nested_progress_bar()
238
pb.update('copying revisions', count, total)
240
sig_text = self.from_repository.get_signature_text(rev)
241
to_store.add_revision_signature_text(rev, sig_text, to_txn)
242
except errors.NoSuchRevision:
245
to_store.add_revision(self.from_repository.get_revision(rev),
250
# fixup inventory if needed:
251
# this is expensive because we have no inverse index to current ghosts.
252
# but on local disk its a few seconds and sftp push is already insane.
254
# FIXME: repository should inform if this is needed.
255
self.to_repository.reconcile()
260
class KnitRepoFetcher(RepoFetcher):
261
"""This is a knit format repository specific fetcher.
263
This differs from the GenericRepoFetcher by not doing a
264
reconciliation after copying, and using knit joining to
268
def _fetch_revision_texts(self, revs):
269
# may need to be a InterRevisionStore call here.
270
from_transaction = self.from_repository.get_transaction()
271
to_transaction = self.to_repository.get_transaction()
272
to_sf = self.to_repository._revision_store.get_signature_file(
274
from_sf = self.from_repository._revision_store.get_signature_file(
276
to_sf.join(from_sf, version_ids=revs, ignore_missing=True)
277
to_rf = self.to_repository._revision_store.get_revision_file(
279
from_rf = self.from_repository._revision_store.get_revision_file(
281
to_rf.join(from_rf, version_ids=revs)
165
return self.to_repository.search_missing_revision_ids(
166
self.from_repository, self._last_revision,
167
find_ghosts=self.find_ghosts)
169
def _parent_inventories(self, revision_ids):
170
# Find all the parent revisions referenced by the stream, but
171
# not present in the stream, and make sure we send their
173
parent_maps = self.to_repository.get_parent_map(revision_ids)
175
map(parents.update, parent_maps.itervalues())
176
parents.discard(NULL_REVISION)
177
parents.difference_update(revision_ids)
178
missing_keys = set(('inventories', rev_id) for rev_id in parents)
284
182
class Inter1and2Helper(object):
285
183
"""Helper for operations that convert data from model 1 and 2
287
185
This is for use by fetchers and converters.
290
def __init__(self, source, target):
188
def __init__(self, source):
293
191
:param source: The repository data comes from
294
:param target: The repository data goes to
296
193
self.source = source
299
195
def iter_rev_trees(self, revs):
300
196
"""Iterate through RevisionTrees efficiently.
314
212
revs = revs[100:]
214
def _find_root_ids(self, revs, parent_map, graph):
216
for tree in self.iter_rev_trees(revs):
217
revision_id = tree.inventory.root.revision
218
root_id = tree.get_root_id()
219
revision_root[revision_id] = root_id
220
# Find out which parents we don't already know root ids for
222
for revision_parents in parent_map.itervalues():
223
parents.update(revision_parents)
224
parents.difference_update(revision_root.keys() + [NULL_REVISION])
225
# Limit to revisions present in the versionedfile
226
parents = graph.get_parent_map(parents).keys()
227
for tree in self.iter_rev_trees(parents):
228
root_id = tree.get_root_id()
229
revision_root[tree.get_revision_id()] = root_id
316
232
def generate_root_texts(self, revs):
317
233
"""Generate VersionedFiles for all root ids.
319
235
:param revs: the revisions to include
321
inventory_weave = self.source.get_inventory_weave()
324
to_store = self.target.weave_store
325
for tree in self.iter_rev_trees(revs):
326
revision_id = tree.inventory.root.revision
327
root_id = tree.inventory.root.file_id
328
parents = inventory_weave.get_parents(revision_id)
329
if root_id not in versionedfile:
330
versionedfile[root_id] = to_store.get_weave_or_empty(root_id,
331
self.target.get_transaction())
332
parent_texts[root_id] = versionedfile[root_id].add_lines(
333
revision_id, parents, [], parent_texts)
335
def regenerate_inventory(self, revs):
336
"""Generate a new inventory versionedfile in target, convertin data.
338
The inventory is retrieved from the source, (deserializing it), and
339
stored in the target (reserializing it in a different format).
340
:param revs: The revisions to include
342
inventory_weave = self.source.get_inventory_weave()
343
for tree in self.iter_rev_trees(revs):
344
parents = inventory_weave.get_parents(tree.get_revision_id())
345
self.target.add_inventory(tree.get_revision_id(), tree.inventory,
349
class Model1toKnit2Fetcher(GenericRepoFetcher):
350
"""Fetch from a Model1 repository into a Knit2 repository
352
def __init__(self, to_repository, from_repository, last_revision=None,
354
self.helper = Inter1and2Helper(from_repository, to_repository)
355
GenericRepoFetcher.__init__(self, to_repository, from_repository,
358
def _fetch_weave_texts(self, revs):
359
GenericRepoFetcher._fetch_weave_texts(self, revs)
360
# Now generate a weave for the tree root
361
self.helper.generate_root_texts(revs)
363
def _fetch_inventory_weave(self, revs):
364
self.helper.regenerate_inventory(revs)
367
class Knit1to2Fetcher(KnitRepoFetcher):
368
"""Fetch from a Knit1 repository into a Knit2 repository"""
370
def __init__(self, to_repository, from_repository, last_revision=None,
372
self.helper = Inter1and2Helper(from_repository, to_repository)
373
KnitRepoFetcher.__init__(self, to_repository, from_repository,
376
def _fetch_weave_texts(self, revs):
377
KnitRepoFetcher._fetch_weave_texts(self, revs)
378
# Now generate a weave for the tree root
379
self.helper.generate_root_texts(revs)
381
def _fetch_inventory_weave(self, revs):
382
self.helper.regenerate_inventory(revs)
385
class Fetcher(object):
386
"""Backwards compatibility glue for branch.fetch()."""
388
@deprecated_method(zero_eight)
389
def __init__(self, to_branch, from_branch, last_revision=None, pb=None):
390
"""Please see branch.fetch()."""
391
to_branch.fetch(from_branch, last_revision, pb)
237
graph = self.source.get_graph()
238
parent_map = graph.get_parent_map(revs)
239
rev_order = tsort.topo_sort(parent_map)
240
rev_id_to_root_id = self._find_root_ids(revs, parent_map, graph)
241
root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
243
# Guaranteed stable, this groups all the file id operations together
244
# retaining topological order within the revisions of a file id.
245
# File id splits and joins would invalidate this, but they don't exist
246
# yet, and are unlikely to in non-rich-root environments anyway.
247
root_id_order.sort(key=operator.itemgetter(0))
248
# Create a record stream containing the roots to create.
250
# XXX: not covered by tests, should have a flag to always run
251
# this. -- mbp 20100129
252
graph = _get_rich_root_heads_graph(self.source, revs)
253
new_roots_stream = _new_root_data_stream(
254
root_id_order, rev_id_to_root_id, parent_map, self.source, graph)
255
return [('texts', new_roots_stream)]
258
def _get_rich_root_heads_graph(source_repo, revision_ids):
259
"""Get a Graph object suitable for asking heads() for new rich roots."""
260
st = static_tuple.StaticTuple
261
revision_keys = [st(r_id).intern() for r_id in revision_ids]
262
known_graph = source_repo.revisions.get_known_graph_ancestry(
264
return _mod_graph.GraphThunkIdsToKeys(known_graph)
267
def _new_root_data_stream(
268
root_keys_to_create, rev_id_to_root_id_map, parent_map, repo, graph=None):
269
"""Generate a texts substream of synthesised root entries.
271
Used in fetches that do rich-root upgrades.
273
:param root_keys_to_create: iterable of (root_id, rev_id) pairs describing
274
the root entries to create.
275
:param rev_id_to_root_id_map: dict of known rev_id -> root_id mappings for
276
calculating the parents. If a parent rev_id is not found here then it
277
will be recalculated.
278
:param parent_map: a parent map for all the revisions in
280
:param graph: a graph to use instead of repo.get_graph().
282
for root_key in root_keys_to_create:
283
root_id, rev_id = root_key
284
parent_keys = _parent_keys_for_root_version(
285
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph)
286
yield versionedfile.FulltextContentFactory(
287
root_key, parent_keys, None, '')
290
def _parent_keys_for_root_version(
291
root_id, rev_id, rev_id_to_root_id_map, parent_map, repo, graph=None):
292
"""Get the parent keys for a given root id.
294
A helper function for _new_root_data_stream.
296
# Include direct parents of the revision, but only if they used the same
297
# root_id and are heads.
298
rev_parents = parent_map[rev_id]
300
for parent_id in rev_parents:
301
if parent_id == NULL_REVISION:
303
if parent_id not in rev_id_to_root_id_map:
304
# We probably didn't read this revision, go spend the extra effort
307
tree = repo.revision_tree(parent_id)
308
except errors.NoSuchRevision:
309
# Ghost, fill out rev_id_to_root_id in case we encounter this
311
# But set parent_root_id to None since we don't really know
312
parent_root_id = None
314
parent_root_id = tree.get_root_id()
315
rev_id_to_root_id_map[parent_id] = None
317
# rev_id_to_root_id_map[parent_id] = parent_root_id
318
# memory consumption maybe?
320
parent_root_id = rev_id_to_root_id_map[parent_id]
321
if root_id == parent_root_id:
322
# With stacking we _might_ want to refer to a non-local revision,
323
# but this code path only applies when we have the full content
324
# available, so ghosts really are ghosts, not just the edge of
326
parent_ids.append(parent_id)
328
# root_id may be in the parent anyway.
330
tree = repo.revision_tree(parent_id)
331
except errors.NoSuchRevision:
332
# ghost, can't refer to it.
336
parent_ids.append(tree.inventory[root_id].revision)
337
except errors.NoSuchId:
340
# Drop non-head parents
342
graph = repo.get_graph()
343
heads = graph.heads(parent_ids)
345
for parent_id in parent_ids:
346
if parent_id in heads and parent_id not in selected_ids:
347
selected_ids.append(parent_id)
348
parent_keys = [(root_id, parent_id) for parent_id in selected_ids]