13
15
# You should have received a copy of the GNU General Public License
14
16
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""bzr upgrade logic."""
20
from bzrlib.bzrdir import BzrDir, format_registry
21
import bzrlib.errors as errors
22
from bzrlib.remote import RemoteBzrDir
23
import bzrlib.ui as ui
17
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
"""Experiment in converting existing bzr branches to weaves."""
21
# To make this properly useful
23
# 1. assign text version ids, and put those text versions into
24
# the inventory as they're converted.
26
# 2. keep track of the previous version of each file, rather than
27
# just using the last one imported
29
# 3. assign entry versions when files are added, renamed or moved.
31
# 4. when merged-in versions are observed, walk down through them
32
# to discover everything, then commit bottom-up
34
# 5. track ancestry as things are merged in, and commit that in each
37
# Perhaps it's best to first walk the whole graph and make a plan for
38
# what should be imported in what order? Need a kind of topological
39
# sort of all revisions. (Or do we, can we just before doing a revision
40
# see that all its parents have either been converted or abandoned?)
43
# Cannot import a revision until all its parents have been
44
# imported. in other words, we can only import revisions whose
45
# parents have all been imported. the first step must be to
46
# import a revision with no parents, of which there must be at
47
# least one. (So perhaps it's useful to store forward pointers
48
# from a list of parents to their children?)
50
# Another (equivalent?) approach is to build up the ordered
51
# ancestry list for the last revision, and walk through that. We
52
# are going to need that.
54
# We don't want to have to recurse all the way back down the list.
56
# Suppose we keep a queue of the revisions able to be processed at
57
# any point. This starts out with all the revisions having no
60
# This seems like a generally useful algorithm...
62
# The current algorithm is dumb (O(n**2)?) but will do the job, and
63
# takes less than a second on the bzr.dev branch.
65
# This currently does a kind of lazy conversion of file texts, where a
66
# new text is written in every version. That's unnecessary but for
67
# the moment saves us having to worry about when files need new
80
import hotshot, hotshot.stats
84
from bzrlib.branch import Branch, find_branch
85
from bzrlib.revfile import Revfile
86
from bzrlib.weave import Weave
87
from bzrlib.weavefile import read_weave, write_weave
88
from bzrlib.progress import ProgressBar
89
from bzrlib.atomicfile import AtomicFile
90
from bzrlib.xml4 import serializer_v4
91
from bzrlib.xml5 import serializer_v5
92
from bzrlib.trace import mutter, note, warning, enable_default_logging
93
from bzrlib.osutils import sha_strings, sha_string
94
from bzrlib.commit import merge_ancestry_lines
26
97
class Convert(object):
28
def __init__(self, url, format=None):
30
self.bzrdir = BzrDir.open_unsupported(url)
31
# XXX: Change to cleanup
32
warning_id = 'cross_format_fetch'
33
saved_warning = warning_id in ui.ui_factory.suppressed_warnings
34
if isinstance(self.bzrdir, RemoteBzrDir):
35
self.bzrdir._ensure_real()
36
self.bzrdir = self.bzrdir._real_bzrdir
37
if self.bzrdir.root_transport.is_readonly():
38
raise errors.UpgradeReadonly
39
self.transport = self.bzrdir.root_transport
40
ui.ui_factory.suppressed_warnings.add(warning_id)
45
ui.ui_factory.suppressed_warnings.remove(warning_id)
99
self.converted_revs = set()
100
self.absent_revisions = set()
103
self.inventories = {}
47
109
def convert(self):
49
branch = self.bzrdir.open_branch()
50
if branch.user_url != self.bzrdir.user_url:
51
ui.ui_factory.note("This is a checkout. The branch (%s) needs to be "
52
"upgraded separately." %
55
except (errors.NotBranchError, errors.IncompatibleRepositories):
56
# might not be a format we can open without upgrading; see e.g.
57
# https://bugs.launchpad.net/bzr/+bug/253891
59
if self.format is None:
61
rich_root = self.bzrdir.find_repository()._format.rich_root_data
62
except errors.NoRepositoryPresent:
63
rich_root = False # assume no rich roots
65
format_name = "default-rich-root"
67
format_name = "default"
68
format = format_registry.make_bzrdir(format_name)
71
if not self.bzrdir.needs_format_conversion(format):
72
raise errors.UpToDateFormat(self.bzrdir._format)
73
if not self.bzrdir.can_convert_format():
74
raise errors.BzrError("cannot upgrade from bzrdir format %s" %
76
self.bzrdir.check_conversion_target(format)
77
ui.ui_factory.note('starting upgrade of %s' % self.transport.base)
79
self.bzrdir.backup_bzrdir()
80
while self.bzrdir.needs_format_conversion(format):
81
converter = self.bzrdir._format.get_converter(format)
82
self.bzrdir = converter.convert(self.bzrdir, None)
83
ui.ui_factory.note("finished")
86
def upgrade(url, format=None):
87
"""Upgrade to format, or the default bzrdir format if not supplied."""
110
enable_default_logging()
111
self.pb = ProgressBar()
112
self.inv_weave = Weave('__inventory')
113
self.anc_weave = Weave('__ancestry')
115
# holds in-memory weaves for all files
116
self.text_weaves = {}
117
self.branch = Branch('.', relax_version_check=True)
118
rev_history = self.branch.revision_history()
119
# to_read is a stack holding the revisions we still need to process;
120
# appending to it adds new highest-priority revisions
121
self.known_revisions = set(rev_history)
122
self.to_read = [rev_history[-1]]
124
rev_id = self.to_read.pop()
125
if (rev_id not in self.revisions
126
and rev_id not in self.absent_revisions):
127
self._load_one_rev(rev_id)
129
to_import = self._make_order()
130
for i, rev_id in enumerate(to_import):
131
self.pb.update('converting revision', i, len(to_import))
132
self._convert_one_rev(rev_id)
134
print 'upgraded to weaves:'
135
print ' %6d revisions and inventories' % len(self.revisions)
136
print ' %6d absent revisions removed' % len(self.absent_revisions)
137
print ' %6d texts' % self.text_count
138
self._write_all_weaves()
139
self._write_all_revs()
142
def _write_all_weaves(self):
143
write_a_weave(self.inv_weave, 'weaves/inventory.weave')
144
write_a_weave(self.anc_weave, 'weaves/ancestry.weave')
147
for file_id, file_weave in self.text_weaves.items():
148
self.pb.update('writing weave', i, len(self.text_weaves))
149
write_a_weave(file_weave, 'weaves/%s.weave' % file_id)
155
def _write_all_revs(self):
156
"""Write all revisions out in new form."""
158
for i, rev_id in enumerate(self.converted_revs):
159
self.pb.update('write revision', i, len(self.converted_revs))
160
f = file('new-revisions/%s' % rev_id, 'wb')
162
serializer_v5.write_revision(self.revisions[rev_id], f)
169
def _load_one_rev(self, rev_id):
170
"""Load a revision object into memory.
172
Any parents not either loaded or abandoned get queued to be
174
self.pb.update('loading revision',
176
len(self.known_revisions))
177
if rev_id not in self.branch.revision_store:
179
note('revision {%s} not present in branch; '
180
'will not be converted',
182
self.absent_revisions.add(rev_id)
184
rev_xml = self.branch.revision_store[rev_id].read()
185
rev = serializer_v4.read_revision_from_string(rev_xml)
186
for parent_id in rev.parent_ids:
187
self.known_revisions.add(parent_id)
188
self.to_read.append(parent_id)
189
self.revisions[rev_id] = rev
190
old_inv_xml = self.branch.inventory_store[rev_id].read()
191
inv = serializer_v4.read_inventory_from_string(old_inv_xml)
192
assert rev.inventory_sha1 == sha_string(old_inv_xml)
193
self.inventories[rev_id] = inv
196
def _convert_one_rev(self, rev_id):
197
"""Convert revision and all referenced objects to new format."""
198
rev = self.revisions[rev_id]
199
inv = self.inventories[rev_id]
200
for parent_id in rev.parent_ids[:]:
201
if parent_id in self.absent_revisions:
202
rev.parent_ids.remove(parent_id)
204
note('remove {%s} as parent of {%s}', parent_id, rev_id)
205
self._convert_revision_contents(rev, inv)
206
# the XML is now updated with text versions
207
new_inv_xml = serializer_v5.write_inventory_to_string(inv)
208
new_inv_sha1 = sha_string(new_inv_xml)
209
self.inv_weave.add(rev_id, rev.parent_ids,
210
new_inv_xml.splitlines(True),
212
# TODO: Upgrade revision XML and write that out
213
rev.inventory_sha1 = new_inv_sha1
214
self._make_rev_ancestry(rev)
215
self.converted_revs.add(rev_id)
218
def _make_rev_ancestry(self, rev):
219
rev_id = rev.revision_id
220
for parent_id in rev.parent_ids:
221
assert parent_id in self.converted_revs
223
lines = list(self.anc_weave.mash_iter(rev.parent_ids))
226
lines.append(rev_id + '\n')
228
parent_ancestries = [self.ancestries[p] for p in rev.parent_ids]
229
new_lines = merge_ancestry_lines(rev_id, parent_ancestries)
230
assert set(lines) == set(new_lines)
231
self.ancestries[rev_id] = new_lines
232
self.anc_weave.add(rev_id, rev.parent_ids, lines)
235
def _convert_revision_contents(self, rev, inv):
236
"""Convert all the files within a revision.
238
Also upgrade the inventory to refer to the text revision ids."""
239
rev_id = rev.revision_id
240
mutter('converting texts of revision {%s}',
244
self._set_name_version(rev, ie)
245
if ie.kind != 'file':
247
self._convert_file_version(rev, ie)
250
def _set_name_version(self, rev, ie):
251
"""Set name version for a file.
253
Done in a slightly lazy way: if the file is renamed or in a merge revision
254
it gets a new version, otherwise the same as before.
257
if len(rev.parent_ids) != 1:
258
ie.name_version = rev.revision_id
260
old_inv = self.inventories[rev.parent_ids[0]]
261
if not old_inv.has_id(file_id):
262
ie.name_version = rev.revision_id
264
old_ie = old_inv[file_id]
265
if (old_ie.parent_id != ie.parent_id
266
or old_ie.name != ie.name):
267
ie.name_version = rev.revision_id
269
ie.name_version = old_ie.name_version
273
def _convert_file_version(self, rev, ie):
274
"""Convert one version of one file.
276
The file needs to be added into the weave if it is a merge
277
of >=2 parents or if it's changed from its parent.
280
rev_id = rev.revision_id
281
w = self.text_weaves.get(file_id)
284
self.text_weaves[file_id] = w
285
file_lines = self.branch.text_store[ie.text_id].readlines()
286
assert sha_strings(file_lines) == ie.text_sha1
287
assert sum(map(len, file_lines)) == ie.text_size
290
for parent_id in rev.parent_ids:
291
##if parent_id in self.absent_revisions:
293
assert parent_id in self.converted_revs, \
294
'parent {%s} not converted' % parent_id
295
parent_inv = self.inventories[parent_id]
296
if parent_inv.has_id(file_id):
297
parent_ie = parent_inv[file_id]
298
old_text_version = parent_ie.text_version
299
assert old_text_version in self.converted_revs
300
if old_text_version not in file_parents:
301
file_parents.append(old_text_version)
302
if parent_ie.text_sha1 != ie.text_sha1:
304
if len(file_parents) != 1 or text_changed:
305
w.add(rev_id, file_parents, file_lines, ie.text_sha1)
306
ie.text_version = rev_id
308
##mutter('import text {%s} of {%s}',
309
## ie.text_id, file_id)
311
##mutter('text of {%s} unchanged from parent', file_id)
312
ie.text_version = file_parents[0]
317
def _make_order(self):
318
"""Return a suitable order for importing revisions.
320
The order must be such that an revision is imported after all
321
its (present) parents.
323
todo = set(self.revisions.keys())
324
done = self.absent_revisions.copy()
327
# scan through looking for a revision whose parents
329
for rev_id in sorted(list(todo)):
330
rev = self.revisions[rev_id]
331
parent_ids = set(rev.parent_ids)
332
if parent_ids.issubset(done):
333
# can take this one now
340
def write_a_weave(weave, filename):
341
inv_wf = file(filename, 'wb')
343
write_weave(weave, inv_wf)
350
def profile_convert():
351
prof_f = tempfile.NamedTemporaryFile()
353
prof = hotshot.Profile(prof_f.name)
355
prof.runcall(Convert)
358
stats = hotshot.stats.load(prof_f.name)
360
stats.sort_stats('time')
361
# XXX: Might like to write to stderr or the trace file instead but
362
# print_stats seems hardcoded to stdout
363
stats.print_stats(100)
366
if __name__ == '__main__':
367
enable_default_logging()
369
if '-p' in sys.argv[1:]: