21
21
that has merged into it. As the first step of a merge, pull, or
22
22
branch operation we copy history from the source into the destination
25
The copying is done in a slightly complicated order. We don't want to
26
add a revision to the store until everything it refers to is also
27
stored, so that if a revision is present we can totally recreate it.
28
However, we can't know what files are included in a revision until we
29
read its inventory. So we query the inventory store of the source for
30
the ids we need, and then pull those ids and finally actually join
35
import bzrlib.errors as errors
36
from bzrlib.errors import (InstallFailed,
38
from bzrlib.progress import ProgressPhase
39
33
from bzrlib.revision import NULL_REVISION
40
from bzrlib.symbol_versioning import (deprecated_function,
34
from bzrlib.tsort import topo_sort
44
35
from bzrlib.trace import mutter
47
from bzrlib.lazy_import import lazy_import
49
# TODO: Avoid repeatedly opening weaves so many times.
51
# XXX: This doesn't handle ghost (not present in branch) revisions at
52
# all yet. I'm not sure they really should be supported.
54
# NOTE: This doesn't copy revisions which may be present but not
55
# merged into the last revision. I'm not sure we want to do that.
57
# - get a list of revisions that need to be pulled in
58
# - for each one, pull in that revision file
59
# and get the inventory, and store the inventory with right
61
# - and get the ancestry, and store that with right parents too
62
# - and keep a note of all file ids and version seen
63
# - then go through all files; for each one get the weave,
64
# and add in all file versions
67
@deprecated_function(zero_eight)
68
def greedy_fetch(to_branch, from_branch, revision=None, pb=None):
69
"""Legacy API, please see branch.fetch(from_branch, last_revision, pb)."""
70
f = Fetcher(to_branch, from_branch, revision, pb)
71
return f.count_copied, f.failed_revisions
37
from bzrlib.versionedfile import FulltextContentFactory
76
40
class RepoFetcher(object):
77
41
"""Pull revisions and texts from one repository to another.
80
if set, try to limit to the data this revision references.
83
count_copied -- number of revisions copied
85
This should not be used directory, its essential a object to encapsulate
43
This should not be used directly, it's essential a object to encapsulate
86
44
the logic in InterRepository.fetch().
88
def __init__(self, to_repository, from_repository, last_revision=None, pb=None):
90
self.failed_revisions = []
92
if to_repository.control_files._transport.base == from_repository.control_files._transport.base:
93
# check that last_revision is in 'from' and then return a no-operation.
94
if last_revision not in (None, NULL_REVISION):
95
to_repository.get_revision(last_revision)
47
def __init__(self, to_repository, from_repository, last_revision=None,
48
pb=None, find_ghosts=True, fetch_spec=None):
49
"""Create a repo fetcher.
51
:param last_revision: If set, try to limit to the data this revision
53
:param find_ghosts: If True search the entire history for ghosts.
54
:param _write_group_acquired_callable: Don't use; this parameter only
55
exists to facilitate a hack done in InterPackRepo.fetch. We would
56
like to remove this parameter.
57
:param pb: ProgressBar object to use; deprecated and ignored.
58
This method will just create one on top of the stack.
61
symbol_versioning.warn(
62
symbol_versioning.deprecated_in((1, 14, 0))
63
% "pb parameter to RepoFetcher.__init__")
64
# and for simplicity it is in fact ignored
65
if to_repository.has_same_location(from_repository):
66
# repository.fetch should be taking care of this case.
67
raise errors.BzrError('RepoFetcher run '
68
'between two objects at the same location: '
69
'%r and %r' % (to_repository, from_repository))
97
70
self.to_repository = to_repository
98
71
self.from_repository = from_repository
72
self.sink = to_repository._get_sink()
99
73
# must not mutate self._last_revision as its potentially a shared instance
100
74
self._last_revision = last_revision
102
self.pb = bzrlib.ui.ui_factory.nested_progress_bar()
103
self.nested_pb = self.pb
106
self.nested_pb = None
75
self._fetch_spec = fetch_spec
76
self.find_ghosts = find_ghosts
107
77
self.from_repository.lock_read()
78
mutter("Using fetch logic to copy between %s(%s) and %s(%s)",
79
self.from_repository, self.from_repository._format,
80
self.to_repository, self.to_repository._format)
109
self.to_repository.lock_write()
111
self.to_repository.start_write_group()
115
self.to_repository.abort_write_group()
118
self.to_repository.commit_write_group()
120
if self.nested_pb is not None:
121
self.nested_pb.finished()
122
self.to_repository.unlock()
124
84
self.from_repository.unlock()
126
86
def __fetch(self):
127
87
"""Primary worker function.
129
This initialises all the needed variables, and then fetches the
89
This initialises all the needed variables, and then fetches the
130
90
requested revisions, finally clearing the progress bar.
132
self.to_weaves = self.to_repository.weave_store
133
self.to_control = self.to_repository.control_weaves
134
self.from_weaves = self.from_repository.weave_store
135
self.from_control = self.from_repository.control_weaves
92
# Roughly this is what we're aiming for fetch to become:
94
# missing = self.sink.insert_stream(self.source.get_stream(search))
96
# missing = self.sink.insert_stream(self.source.get_items(missing))
136
98
self.count_total = 0
137
99
self.file_ids_names = {}
138
pp = ProgressPhase('Fetch phase', 4, self.pb)
141
revs = self._revids_to_fetch()
145
self._fetch_weave_texts(revs)
147
self._fetch_inventory_weave(revs)
149
self._fetch_revision_texts(revs)
150
self.count_copied += len(revs)
100
pb = bzrlib.ui.ui_factory.nested_progress_bar()
101
pb.show_pct = pb.show_count = False
103
pb.update("Finding revisions", 0, 2)
104
search = self._revids_to_fetch()
107
pb.update("Fetching revisions", 1, 2)
108
self._fetch_everything_for_search(search)
112
def _fetch_everything_for_search(self, search):
113
"""Fetch all data for the given set of revisions."""
114
# The first phase is "file". We pass the progress bar for it directly
115
# into item_keys_introduced_by, which has more information about how
116
# that phase is progressing than we do. Progress updates for the other
117
# phases are taken care of in this function.
118
# XXX: there should be a clear owner of the progress reporting. Perhaps
119
# item_keys_introduced_by should have a richer API than it does at the
120
# moment, so that it can feed the progress information back to this
122
if (self.from_repository._format.rich_root_data and
123
not self.to_repository._format.rich_root_data):
124
raise errors.IncompatibleRepositories(
125
self.from_repository, self.to_repository,
126
"different rich-root support")
127
pb = bzrlib.ui.ui_factory.nested_progress_bar()
129
pb.update("Get stream source")
130
source = self.from_repository._get_source(
131
self.to_repository._format)
132
stream = source.get_stream(search)
133
from_format = self.from_repository._format
134
pb.update("Inserting stream")
135
resume_tokens, missing_keys = self.sink.insert_stream(
136
stream, from_format, [])
137
if self.to_repository._fallback_repositories:
139
self._parent_inventories(search.get_keys()))
141
pb.update("Missing keys")
142
stream = source.get_stream_for_missing_keys(missing_keys)
143
pb.update("Inserting missing keys")
144
resume_tokens, missing_keys = self.sink.insert_stream(
145
stream, from_format, resume_tokens)
147
raise AssertionError(
148
"second push failed to complete a fetch %r." % (
151
raise AssertionError(
152
"second push failed to commit the fetch %r." % (
154
pb.update("Finishing stream")
154
159
def _revids_to_fetch(self):
160
"""Determines the exact revisions needed from self.from_repository to
161
install self._last_revision in self.to_repository.
163
If no revisions need to be fetched, then this just returns None.
165
if self._fetch_spec is not None:
166
return self._fetch_spec
155
167
mutter('fetch up to rev {%s}', self._last_revision)
156
168
if self._last_revision is NULL_REVISION:
157
169
# explicit limit of no revisions needed
159
if (self._last_revision is not None and
160
self.to_repository.has_revision(self._last_revision)):
164
return self.to_repository.missing_revision_ids(self.from_repository,
166
except errors.NoSuchRevision:
167
raise InstallFailed([self._last_revision])
169
def _fetch_weave_texts(self, revs):
170
texts_pb = bzrlib.ui.ui_factory.nested_progress_bar()
172
# fileids_altered_by_revision_ids requires reading the inventory
173
# weave, we will need to read the inventory weave again when
174
# all this is done, so enable caching for that specific weave
175
inv_w = self.from_repository.get_inventory_weave()
177
file_ids = self.from_repository.fileids_altered_by_revision_ids(revs)
179
num_file_ids = len(file_ids)
180
for file_id, required_versions in file_ids.items():
181
texts_pb.update("fetch texts", count, num_file_ids)
183
to_weave = self.to_weaves.get_weave_or_empty(file_id,
184
self.to_repository.get_transaction())
185
from_weave = self.from_weaves.get_weave(file_id,
186
self.from_repository.get_transaction())
187
# we fetch all the texts, because texts do
188
# not reference anything, and its cheap enough
189
to_weave.join(from_weave, version_ids=required_versions)
190
# we don't need *all* of this data anymore, but we dont know
191
# what we do. This cache clearing will result in a new read
192
# of the knit data when we do the checkout, but probably we
193
# want to emit the needed data on the fly rather than at the
195
# the from weave should know not to cache data being joined,
196
# but its ok to ask it to clear.
197
from_weave.clear_cache()
198
to_weave.clear_cache()
202
def _fetch_inventory_weave(self, revs):
203
pb = bzrlib.ui.ui_factory.nested_progress_bar()
205
pb.update("fetch inventory", 0, 2)
206
to_weave = self.to_control.get_weave('inventory',
207
self.to_repository.get_transaction())
209
child_pb = bzrlib.ui.ui_factory.nested_progress_bar()
211
# just merge, this is optimisable and its means we don't
212
# copy unreferenced data such as not-needed inventories.
213
pb.update("fetch inventory", 1, 3)
214
from_weave = self.from_repository.get_inventory_weave()
215
pb.update("fetch inventory", 2, 3)
216
# we fetch only the referenced inventories because we do not
217
# know for unselected inventories whether all their required
218
# texts are present in the other repository - it could be
220
to_weave.join(from_weave, pb=child_pb, msg='merge inventory',
222
from_weave.clear_cache()
229
class GenericRepoFetcher(RepoFetcher):
230
"""This is a generic repo to repo fetcher.
232
This makes minimal assumptions about repo layout and contents.
233
It triggers a reconciliation after fetching to ensure integrity.
236
def _fetch_revision_texts(self, revs):
237
"""Fetch revision object texts"""
238
rev_pb = bzrlib.ui.ui_factory.nested_progress_bar()
240
to_txn = self.to_transaction = self.to_repository.get_transaction()
243
to_store = self.to_repository._revision_store
245
pb = bzrlib.ui.ui_factory.nested_progress_bar()
247
pb.update('copying revisions', count, total)
249
sig_text = self.from_repository.get_signature_text(rev)
250
to_store.add_revision_signature_text(rev, sig_text, to_txn)
251
except errors.NoSuchRevision:
254
to_store.add_revision(self.from_repository.get_revision(rev),
259
# fixup inventory if needed:
260
# this is expensive because we have no inverse index to current ghosts.
261
# but on local disk its a few seconds and sftp push is already insane.
263
# FIXME: repository should inform if this is needed.
264
self.to_repository.reconcile()
269
class KnitRepoFetcher(RepoFetcher):
270
"""This is a knit format repository specific fetcher.
272
This differs from the GenericRepoFetcher by not doing a
273
reconciliation after copying, and using knit joining to
277
def _fetch_revision_texts(self, revs):
278
# may need to be a InterRevisionStore call here.
279
from_transaction = self.from_repository.get_transaction()
280
to_transaction = self.to_repository.get_transaction()
281
to_sf = self.to_repository._revision_store.get_signature_file(
283
from_sf = self.from_repository._revision_store.get_signature_file(
285
to_sf.join(from_sf, version_ids=revs, ignore_missing=True)
286
to_rf = self.to_repository._revision_store.get_revision_file(
288
from_rf = self.from_repository._revision_store.get_revision_file(
290
to_rf.join(from_rf, version_ids=revs)
171
return self.to_repository.search_missing_revision_ids(
172
self.from_repository, self._last_revision,
173
find_ghosts=self.find_ghosts)
175
def _parent_inventories(self, revision_ids):
176
# Find all the parent revisions referenced by the stream, but
177
# not present in the stream, and make sure we send their
179
parent_maps = self.to_repository.get_parent_map(revision_ids)
181
map(parents.update, parent_maps.itervalues())
182
parents.discard(NULL_REVISION)
183
parents.difference_update(revision_ids)
184
missing_keys = set(('inventories', rev_id) for rev_id in parents)
293
188
class Inter1and2Helper(object):
294
189
"""Helper for operations that convert data from model 1 and 2
296
191
This is for use by fetchers and converters.
299
def __init__(self, source, target):
194
def __init__(self, source):
302
197
:param source: The repository data comes from
303
:param target: The repository data goes to
305
199
self.source = source
308
201
def iter_rev_trees(self, revs):
309
202
"""Iterate through RevisionTrees efficiently.
323
218
revs = revs[100:]
220
def _find_root_ids(self, revs, parent_map, graph):
222
planned_versions = {}
223
for tree in self.iter_rev_trees(revs):
224
revision_id = tree.inventory.root.revision
225
root_id = tree.get_root_id()
226
planned_versions.setdefault(root_id, []).append(revision_id)
227
revision_root[revision_id] = root_id
228
# Find out which parents we don't already know root ids for
230
for revision_parents in parent_map.itervalues():
231
parents.update(revision_parents)
232
parents.difference_update(revision_root.keys() + [NULL_REVISION])
233
# Limit to revisions present in the versionedfile
234
parents = graph.get_parent_map(parents).keys()
235
for tree in self.iter_rev_trees(parents):
236
root_id = tree.get_root_id()
237
revision_root[tree.get_revision_id()] = root_id
238
return revision_root, planned_versions
325
240
def generate_root_texts(self, revs):
326
241
"""Generate VersionedFiles for all root ids.
328
243
:param revs: the revisions to include
330
inventory_weave = self.source.get_inventory_weave()
333
to_store = self.target.weave_store
334
for tree in self.iter_rev_trees(revs):
335
revision_id = tree.inventory.root.revision
336
root_id = tree.inventory.root.file_id
337
parents = inventory_weave.get_parents(revision_id)
338
if root_id not in versionedfile:
339
versionedfile[root_id] = to_store.get_weave_or_empty(root_id,
340
self.target.get_transaction())
341
parent_texts[root_id] = versionedfile[root_id].add_lines(
342
revision_id, parents, [], parent_texts)
344
def regenerate_inventory(self, revs):
345
"""Generate a new inventory versionedfile in target, convertin data.
347
The inventory is retrieved from the source, (deserializing it), and
348
stored in the target (reserializing it in a different format).
349
:param revs: The revisions to include
351
inventory_weave = self.source.get_inventory_weave()
352
for tree in self.iter_rev_trees(revs):
353
parents = inventory_weave.get_parents(tree.get_revision_id())
354
self.target.add_inventory(tree.get_revision_id(), tree.inventory,
358
class Model1toKnit2Fetcher(GenericRepoFetcher):
359
"""Fetch from a Model1 repository into a Knit2 repository
361
def __init__(self, to_repository, from_repository, last_revision=None,
363
self.helper = Inter1and2Helper(from_repository, to_repository)
364
GenericRepoFetcher.__init__(self, to_repository, from_repository,
367
def _fetch_weave_texts(self, revs):
368
GenericRepoFetcher._fetch_weave_texts(self, revs)
369
# Now generate a weave for the tree root
370
self.helper.generate_root_texts(revs)
372
def _fetch_inventory_weave(self, revs):
373
self.helper.regenerate_inventory(revs)
376
class Knit1to2Fetcher(KnitRepoFetcher):
377
"""Fetch from a Knit1 repository into a Knit2 repository"""
379
def __init__(self, to_repository, from_repository, last_revision=None,
381
self.helper = Inter1and2Helper(from_repository, to_repository)
382
KnitRepoFetcher.__init__(self, to_repository, from_repository,
385
def _fetch_weave_texts(self, revs):
386
KnitRepoFetcher._fetch_weave_texts(self, revs)
387
# Now generate a weave for the tree root
388
self.helper.generate_root_texts(revs)
390
def _fetch_inventory_weave(self, revs):
391
self.helper.regenerate_inventory(revs)
394
class Fetcher(object):
395
"""Backwards compatibility glue for branch.fetch()."""
397
@deprecated_method(zero_eight)
398
def __init__(self, to_branch, from_branch, last_revision=None, pb=None):
399
"""Please see branch.fetch()."""
400
to_branch.fetch(from_branch, last_revision, pb)
245
graph = self.source.get_graph()
246
parent_map = graph.get_parent_map(revs)
247
rev_order = topo_sort(parent_map)
248
rev_id_to_root_id, root_id_to_rev_ids = self._find_root_ids(
249
revs, parent_map, graph)
250
root_id_order = [(rev_id_to_root_id[rev_id], rev_id) for rev_id in
252
# Guaranteed stable, this groups all the file id operations together
253
# retaining topological order within the revisions of a file id.
254
# File id splits and joins would invalidate this, but they don't exist
255
# yet, and are unlikely to in non-rich-root environments anyway.
256
root_id_order.sort(key=operator.itemgetter(0))
257
# Create a record stream containing the roots to create.
259
for key in root_id_order:
260
root_id, rev_id = key
261
rev_parents = parent_map[rev_id]
262
# We drop revision parents with different file-ids, because
263
# that represents a rename of the root to a different location
264
# - its not actually a parent for us. (We could look for that
265
# file id in the revision tree at considerably more expense,
266
# but for now this is sufficient (and reconcile will catch and
267
# correct this anyway).
268
# When a parent revision is a ghost, we guess that its root id
269
# was unchanged (rather than trimming it from the parent list).
270
parent_keys = tuple((root_id, parent) for parent in rev_parents
271
if parent != NULL_REVISION and
272
rev_id_to_root_id.get(parent, root_id) == root_id)
273
yield FulltextContentFactory(key, parent_keys, None, '')
274
return [('texts', yield_roots())]