1
# Copyright (C) 2005-2010 Canonical Ltd
1
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
53
49
from bzrlib.testament import Testament
56
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
52
from bzrlib.decorators import needs_read_lock, needs_write_lock
57
53
from bzrlib.inter import InterObject
58
54
from bzrlib.inventory import (
64
from bzrlib.lock import _RelockDebugMixin
65
60
from bzrlib import registry
66
61
from bzrlib.trace import (
67
62
log_exception_quietly, note, mutter, mutter_callsite, warning)
210
205
# an inventory delta was accumulated without creating a new
212
207
basis_id = self.basis_delta_revision
213
# We ignore the 'inventory' returned by add_inventory_by_delta
214
# because self.new_inventory is used to hint to the rest of the
215
# system what code path was taken
216
self.inv_sha1, _ = self.repository.add_inventory_by_delta(
208
self.inv_sha1 = self.repository.add_inventory_by_delta(
217
209
basis_id, self._basis_delta, self._new_revision_id,
473
465
if content_summary[2] is None:
474
466
raise ValueError("Files must not have executable = None")
476
# We can't trust a check of the file length because of content
478
if (# if the exec bit has changed we have to store:
468
if (# if the file length changed we have to store:
469
parent_entry.text_size != content_summary[1] or
470
# if the exec bit has changed we have to store:
479
471
parent_entry.executable != content_summary[2]):
481
473
elif parent_entry.text_sha1 == content_summary[3]:
548
540
ie.revision = parent_entry.revision
549
541
return self._get_delta(ie, basis_inv, path), False, None
550
542
ie.reference_revision = content_summary[3]
551
if ie.reference_revision is None:
552
raise AssertionError("invalid content_summary for nested tree: %r"
553
% (content_summary,))
554
543
self._add_text_to_weave(ie.file_id, '', heads, None)
556
545
raise NotImplementedError('unknown kind')
819
808
self.new_inventory = None
820
809
if len(inv_delta):
821
# This should perhaps be guarded by a check that the basis we
822
# commit against is the basis for the commit and if not do a delta
824
810
self._any_changes = True
825
811
if not seen_root:
826
812
# housekeeping root entry changes do not affect no-change commits.
860
846
# versioned roots do not change unless the tree found a change.
863
class RepositoryWriteLockResult(object):
864
"""The result of write locking a repository.
866
:ivar repository_token: The token obtained from the underlying lock, or
868
:ivar unlock: A callable which will unlock the lock.
871
def __init__(self, unlock, repository_token):
872
self.repository_token = repository_token
876
849
######################################################################
880
class Repository(_RelockDebugMixin, bzrdir.ControlComponent):
853
class Repository(object):
881
854
"""Repository holding history for one or more branches.
883
856
The repository holds and retrieves historical information including
1042
1015
:seealso: add_inventory, for the contract.
1044
inv_lines = self._serializer.write_inventory_to_lines(inv)
1017
inv_lines = self._serialise_inventory_to_lines(inv)
1045
1018
return self._inventory_add_lines(revision_id, parents,
1046
1019
inv_lines, check_content=False)
1243
1216
for record in getattr(self, kind).check(keys=keys[kind]):
1244
1217
if record.storage_kind == 'absent':
1245
1218
checker._report_items.append(
1246
'Missing %s {%s}' % (kind, record.key,))
1219
'Missing inventory {%s}' % (record.key,))
1248
1221
last_object = self._check_record(kind, record,
1249
1222
checker, last_object, current_keys[(kind,) + record.key])
1254
1227
"""Check a single text from this repository."""
1255
1228
if kind == 'inventories':
1256
1229
rev_id = record.key[0]
1257
inv = self._deserialise_inventory(rev_id,
1230
inv = self.deserialise_inventory(rev_id,
1258
1231
record.get_bytes_as('fulltext'))
1259
1232
if last_object is not None:
1260
1233
delta = inv._make_delta(last_object)
1305
1278
:param _format: The format of the repository on disk.
1306
1279
:param a_bzrdir: The BzrDir of the repository.
1281
In the future we will have a single api for all stores for
1282
getting file texts, inventories and revisions, then
1283
this construct will accept instances of those things.
1308
# In the future we will have a single api for all stores for
1309
# getting file texts, inventories and revisions, then
1310
# this construct will accept instances of those things.
1311
1285
super(Repository, self).__init__()
1312
1286
self._format = _format
1313
1287
# the following are part of the public API for Repository:
1319
1293
self._reconcile_does_inventory_gc = True
1320
1294
self._reconcile_fixes_text_parents = False
1321
1295
self._reconcile_backsup_inventory = True
1296
# not right yet - should be more semantically clear ?
1298
# TODO: make sure to construct the right store classes, etc, depending
1299
# on whether escaping is required.
1300
self._warn_if_deprecated()
1322
1301
self._write_group = None
1323
1302
# Additional places to query for data.
1324
1303
self._fallback_repositories = []
1325
1304
# An InventoryEntry cache, used during deserialization
1326
1305
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1327
# Is it safe to return inventory entries directly from the entry cache,
1328
# rather copying them?
1329
self._safe_to_return_from_cache = False
1332
def user_transport(self):
1333
return self.bzrdir.user_transport
1336
def control_transport(self):
1337
return self._transport
1339
1307
def __repr__(self):
1340
1308
if self._fallback_repositories:
1389
1357
data during reads, and allows a 'write_group' to be obtained. Write
1390
1358
groups must be used for actual data insertion.
1392
A token should be passed in if you know that you have locked the object
1393
some other way, and need to synchronise this object's state with that
1396
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1398
1360
:param token: if this is already locked, then lock_write will fail
1399
1361
unless the token matches the existing lock.
1400
1362
:returns: a token if this instance supports tokens, otherwise None.
1403
1365
:raises MismatchedToken: if the specified token doesn't match the token
1404
1366
of the existing lock.
1405
1367
:seealso: start_write_group.
1406
:return: A RepositoryWriteLockResult.
1369
A token should be passed in if you know that you have locked the object
1370
some other way, and need to synchronise this object's state with that
1373
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1408
1375
locked = self.is_locked()
1409
token = self.control_files.lock_write(token=token)
1376
result = self.control_files.lock_write(token=token)
1411
self._warn_if_deprecated()
1412
self._note_lock('w')
1413
1378
for repo in self._fallback_repositories:
1414
1379
# Writes don't affect fallback repos
1415
1380
repo.lock_read()
1416
1381
self._refresh_data()
1417
return RepositoryWriteLockResult(self.unlock, token)
1419
1384
def lock_read(self):
1420
"""Lock the repository for read operations.
1422
:return: An object with an unlock method which will release the lock
1425
1385
locked = self.is_locked()
1426
1386
self.control_files.lock_read()
1428
self._warn_if_deprecated()
1429
self._note_lock('r')
1430
1388
for repo in self._fallback_repositories:
1431
1389
repo.lock_read()
1432
1390
self._refresh_data()
1435
1392
def get_physical_lock_status(self):
1436
1393
return self.control_files.get_physical_lock_status()
1497
1454
# now gather global repository information
1498
1455
# XXX: This is available for many repos regardless of listability.
1499
if self.user_transport.listable():
1456
if self.bzrdir.root_transport.listable():
1500
1457
# XXX: do we want to __define len__() ?
1501
1458
# Maybe the versionedfiles object should provide a different
1502
1459
# method to get the number of keys.
1512
1469
:param using: If True, list only branches using this repository.
1514
1471
if using and not self.is_shared():
1515
return self.bzrdir.list_branches()
1473
return [self.bzrdir.open_branch()]
1474
except errors.NotBranchError:
1516
1476
class Evaluator(object):
1518
1478
def __init__(self):
1527
1487
except errors.NoRepositoryPresent:
1530
return False, ([], repository)
1490
return False, (None, repository)
1531
1491
self.first_call = False
1532
value = (bzrdir.list_branches(), None)
1493
value = (bzrdir.open_branch(), None)
1494
except errors.NotBranchError:
1495
value = (None, None)
1533
1496
return True, value
1536
for branches, repository in bzrdir.BzrDir.find_bzrdirs(
1537
self.user_transport, evaluate=Evaluator()):
1538
if branches is not None:
1539
ret.extend(branches)
1499
for branch, repository in bzrdir.BzrDir.find_bzrdirs(
1500
self.bzrdir.root_transport, evaluate=Evaluator()):
1501
if branch is not None:
1502
branches.append(branch)
1540
1503
if not using and repository is not None:
1541
ret.extend(repository.find_branches())
1504
branches.extend(repository.find_branches())
1544
1507
@needs_read_lock
1545
1508
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):
1635
1598
# but at the moment we're only checking for texts referenced by
1636
1599
# inventories at the graph's edge.
1637
1600
key_deps = self.revisions._index._key_dependencies
1638
key_deps.satisfy_refs_for_keys(present_inventories)
1601
key_deps.add_keys(present_inventories)
1639
1602
referrers = frozenset(r[0] for r in key_deps.get_referrers())
1640
1603
file_ids = self.fileids_altered_by_revision_ids(referrers)
1641
1604
missing_texts = set()
1923
1885
rev = self._serializer.read_revision_from_string(text)
1924
1886
yield (revid, rev)
1889
def get_revision_xml(self, revision_id):
1890
# TODO: jam 20070210 This shouldn't be necessary since get_revision
1891
# would have already do it.
1892
# TODO: jam 20070210 Just use _serializer.write_revision_to_string()
1893
# TODO: this can't just be replaced by:
1894
# return self._serializer.write_revision_to_string(
1895
# self.get_revision(revision_id))
1896
# as cStringIO preservers the encoding unlike write_revision_to_string
1897
# or some other call down the path.
1898
rev = self.get_revision(revision_id)
1899
rev_tmp = cStringIO.StringIO()
1900
# the current serializer..
1901
self._serializer.write_revision(rev, rev_tmp)
1903
return rev_tmp.getvalue()
1926
1905
def get_deltas_for_revisions(self, revisions, specific_fileids=None):
1927
1906
"""Produce a generator of revision deltas.
2171
2150
selected_keys = set((revid,) for revid in revision_ids)
2172
2151
w = _inv_weave or self.inventories
2173
return self._find_file_ids_from_xml_inventory_lines(
2174
w.iter_lines_added_or_present_in_keys(
2175
selected_keys, pb=None),
2152
pb = ui.ui_factory.nested_progress_bar()
2154
return self._find_file_ids_from_xml_inventory_lines(
2155
w.iter_lines_added_or_present_in_keys(
2156
selected_keys, pb=pb),
2178
2161
def iter_files_bytes(self, desired_files):
2179
2162
"""Iterate through file versions.
2340
2323
num_file_ids = len(file_ids)
2341
2324
for file_id, altered_versions in file_ids.iteritems():
2342
2325
if pb is not None:
2343
pb.update("Fetch texts", count, num_file_ids)
2326
pb.update("fetch texts", count, num_file_ids)
2345
2328
yield ("file", file_id, altered_versions)
2389
2372
"""single-document based inventory iteration."""
2390
2373
inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
2391
2374
for text, revision_id in inv_xmls:
2392
yield self._deserialise_inventory(revision_id, text)
2375
yield self.deserialise_inventory(revision_id, text)
2394
2377
def _iter_inventory_xmls(self, revision_ids, ordering):
2395
2378
if ordering is None:
2427
2410
next_key = None
2430
def _deserialise_inventory(self, revision_id, xml):
2413
def deserialise_inventory(self, revision_id, xml):
2431
2414
"""Transform the xml into an inventory object.
2433
2416
:param revision_id: The expected revision id of the inventory.
2434
2417
:param xml: A serialised inventory.
2436
2419
result = self._serializer.read_inventory_from_string(xml, revision_id,
2437
entry_cache=self._inventory_entry_cache,
2438
return_from_cache=self._safe_to_return_from_cache)
2420
entry_cache=self._inventory_entry_cache)
2439
2421
if result.revision_id != revision_id:
2440
2422
raise AssertionError('revision id mismatch %s != %s' % (
2441
2423
result.revision_id, revision_id))
2426
def serialise_inventory(self, inv):
2427
return self._serializer.write_inventory_to_string(inv)
2429
def _serialise_inventory_to_lines(self, inv):
2430
return self._serializer.write_inventory_to_lines(inv)
2444
2432
def get_serializer_format(self):
2445
2433
return self._serializer.format_num
2447
2435
@needs_read_lock
2448
def _get_inventory_xml(self, revision_id):
2449
"""Get serialized inventory as a string."""
2436
def get_inventory_xml(self, revision_id):
2437
"""Get inventory XML as a file object."""
2450
2438
texts = self._iter_inventory_xmls([revision_id], 'unordered')
2452
2440
text, revision_id = texts.next()
2454
2442
raise errors.HistoryMissing(self, 'inventory', revision_id)
2446
def get_inventory_sha1(self, revision_id):
2447
"""Return the sha1 hash of the inventory entry
2449
return self.get_revision(revision_id).inventory_sha1
2457
2451
def get_rev_id_for_revno(self, revno, known_pair):
2458
2452
"""Return the revision id of a revno, given a later (revno, revid)
2459
2453
pair in the same history.
2511
2505
next_id = parents[0]
2508
def get_revision_inventory(self, revision_id):
2509
"""Return inventory of a past revision."""
2510
# TODO: Unify this with get_inventory()
2511
# bzr 0.0.6 and later imposes the constraint that the inventory_id
2512
# must be the same as its revision, so this is trivial.
2513
if revision_id is None:
2514
# This does not make sense: if there is no revision,
2515
# then it is the current tree inventory surely ?!
2516
# and thus get_root_id() is something that looks at the last
2517
# commit on the branch, and the get_root_id is an inventory check.
2518
raise NotImplementedError
2519
# return Inventory(self.get_root_id())
2521
return self.get_inventory(revision_id)
2513
2523
def is_shared(self):
2514
2524
"""Return True if this repository is flagged as a shared repository."""
2515
2525
raise NotImplementedError(self.is_shared)
2549
2559
return RevisionTree(self, Inventory(root_id=None),
2550
2560
_mod_revision.NULL_REVISION)
2552
inv = self.get_inventory(revision_id)
2562
inv = self.get_revision_inventory(revision_id)
2553
2563
return RevisionTree(self, inv, revision_id)
2555
2565
def revision_trees(self, revision_ids):
2608
2618
keys = tsort.topo_sort(parent_map)
2609
2619
return [None] + list(keys)
2611
def pack(self, hint=None, clean_obsolete_packs=False):
2621
def pack(self, hint=None):
2612
2622
"""Compress the data within the repository.
2614
2624
This operation only makes sense for some repository types. For other
2624
2634
obtained from the result of commit_write_group(). Out of
2625
2635
date hints are simply ignored, because concurrent operations
2626
2636
can obsolete them rapidly.
2628
:param clean_obsolete_packs: Clean obsolete packs immediately after
2632
2639
def get_transaction(self):
2648
2655
for ((revision_id,), parent_keys) in \
2649
2656
self.revisions.get_parent_map(query_keys).iteritems():
2650
2657
if parent_keys:
2651
result[revision_id] = tuple([parent_revid
2652
for (parent_revid,) in parent_keys])
2658
result[revision_id] = tuple(parent_revid
2659
for (parent_revid,) in parent_keys)
2654
2661
result[revision_id] = (_mod_revision.NULL_REVISION,)
2657
2664
def _make_parents_provider(self):
2661
def get_known_graph_ancestry(self, revision_ids):
2662
"""Return the known graph for a set of revision ids and their ancestors.
2664
st = static_tuple.StaticTuple
2665
revision_keys = [st(r_id).intern() for r_id in revision_ids]
2666
known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
2667
return graph.GraphThunkIdsToKeys(known_graph)
2669
2667
def get_graph(self, other_repository=None):
2670
2668
"""Return the graph walker for this repository format"""
2671
2669
parents_provider = self._make_parents_provider()
2766
2764
result.check(callback_refs)
2769
def _warn_if_deprecated(self, branch=None):
2767
def _warn_if_deprecated(self):
2770
2768
global _deprecation_warning_done
2771
2769
if _deprecation_warning_done:
2775
conf = config.GlobalConfig()
2777
conf = branch.get_config()
2778
if conf.suppress_warning('format_deprecation'):
2780
warning("Format %s for %s is deprecated -"
2781
" please use 'bzr upgrade' to get better performance"
2782
% (self._format, self.bzrdir.transport.base))
2784
_deprecation_warning_done = True
2771
_deprecation_warning_done = True
2772
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2773
% (self._format, self.bzrdir.transport.base))
2786
2775
def supports_rich_root(self):
2787
2776
return self._format.rich_root_data
3070
3059
pack_compresses = False
3071
3060
# Does the repository inventory storage understand references to trees?
3072
3061
supports_tree_reference = None
3073
# Is the format experimental ?
3074
experimental = False
3077
return "%s()" % self.__class__.__name__
3064
return "<%s>" % self.__class__.__name__
3079
3066
def __eq__(self, other):
3080
3067
# format objects are generally stateless
3095
3082
transport = a_bzrdir.get_repository_transport(None)
3096
format_string = transport.get_bytes("format")
3083
format_string = transport.get("format").read()
3097
3084
return format_registry.get(format_string)
3098
3085
except errors.NoSuchFile:
3099
3086
raise errors.NoRepositoryPresent(a_bzrdir)
3199
3186
raise NotImplementedError(self.open)
3201
def _run_post_repo_init_hooks(self, repository, a_bzrdir, shared):
3202
from bzrlib.bzrdir import BzrDir, RepoInitHookParams
3203
hooks = BzrDir.hooks['post_repo_init']
3206
params = RepoInitHookParams(repository, self, a_bzrdir, shared)
3211
3189
class MetaDirRepositoryFormat(RepositoryFormat):
3212
3190
"""Common base class for the new repositories using the metadir layout."""
3418
3396
:param revision_id: if None all content is copied, if NULL_REVISION no
3419
3397
content is copied.
3398
:param pb: optional progress bar to use for progress reports. If not
3399
provided a default one will be created.
3423
ui.ui_factory.warn_experimental_format_fetch(self)
3424
3402
from bzrlib.fetch import RepoFetcher
3425
# See <https://launchpad.net/bugs/456077> asking for a warning here
3426
if self.source._format.network_name() != self.target._format.network_name():
3427
ui.ui_factory.show_user_warning('cross_format_fetch',
3428
from_format=self.source._format,
3429
to_format=self.target._format)
3430
3403
f = RepoFetcher(to_repository=self.target,
3431
3404
from_repository=self.source,
3432
3405
last_revision=revision_id,
3433
3406
fetch_spec=fetch_spec,
3434
find_ghosts=find_ghosts)
3407
pb=pb, find_ghosts=find_ghosts)
3436
3409
def _walk_to_common_revisions(self, revision_ids):
3437
3410
"""Walk out from revision_ids in source to revisions target has.
3606
3579
self.target.texts.insert_record_stream(
3607
3580
self.source.texts.get_record_stream(
3608
3581
self.source.texts.keys(), 'topological', False))
3609
pb.update('Copying inventory', 0, 1)
3582
pb.update('copying inventory', 0, 1)
3610
3583
self.target.inventories.insert_record_stream(
3611
3584
self.source.inventories.get_record_stream(
3612
3585
self.source.inventories.keys(), 'topological', False))
3833
3806
basis_id, delta, current_revision_id, parents_parents)
3834
3807
cache[current_revision_id] = parent_tree
3836
def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3809
def _fetch_batch(self, revision_ids, basis_id, cache):
3837
3810
"""Fetch across a few revisions.
3839
3812
:param revision_ids: The revisions to copy
3840
3813
:param basis_id: The revision_id of a tree that must be in cache, used
3841
3814
as a basis for delta when no other base is available
3842
3815
:param cache: A cache of RevisionTrees that we can use.
3843
:param a_graph: A Graph object to determine the heads() of the
3844
rich-root data stream.
3845
3816
:return: The revision_id of the last converted tree. The RevisionTree
3846
3817
for it will be in cache
3854
3825
pending_revisions = []
3855
3826
parent_map = self.source.get_parent_map(revision_ids)
3856
3827
self._fetch_parent_invs_for_stacking(parent_map, cache)
3857
self.source._safe_to_return_from_cache = True
3858
3828
for tree in self.source.revision_trees(revision_ids):
3859
3829
# Find a inventory delta for this revision.
3860
3830
# Find text entries that need to be copied, too.
3868
3838
possible_trees.append((basis_id, cache[basis_id]))
3869
3839
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
3870
3840
possible_trees)
3871
revision = self.source.get_revision(current_revision_id)
3872
pending_deltas.append((basis_id, delta,
3873
current_revision_id, revision.parent_ids))
3874
3841
if self._converting_to_rich_root:
3875
3842
self._revision_id_to_root_id[current_revision_id] = \
3876
3843
tree.get_root_id()
3905
3872
if entry.revision == file_revision:
3906
3873
texts_possibly_new_in_tree.remove(file_key)
3907
3874
text_keys.update(texts_possibly_new_in_tree)
3875
revision = self.source.get_revision(current_revision_id)
3876
pending_deltas.append((basis_id, delta,
3877
current_revision_id, revision.parent_ids))
3908
3878
pending_revisions.append(revision)
3909
3879
cache[current_revision_id] = tree
3910
3880
basis_id = current_revision_id
3911
self.source._safe_to_return_from_cache = False
3912
3881
# Copy file texts
3913
3882
from_texts = self.source.texts
3914
3883
to_texts = self.target.texts
3915
3884
if root_keys_to_create:
3916
root_stream = _mod_fetch._new_root_data_stream(
3885
from bzrlib.fetch import _new_root_data_stream
3886
root_stream = _new_root_data_stream(
3917
3887
root_keys_to_create, self._revision_id_to_root_id, parent_map,
3918
self.source, graph=a_graph)
3919
3889
to_texts.insert_record_stream(root_stream)
3920
3890
to_texts.insert_record_stream(from_texts.get_record_stream(
3921
3891
text_keys, self.target._format._fetch_order,
3978
3948
cache[basis_id] = basis_tree
3979
3949
del basis_tree # We don't want to hang on to it here
3981
if self._converting_to_rich_root and len(revision_ids) > 100:
3982
a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
3987
3951
for offset in range(0, len(revision_ids), batch_size):
3988
3952
self.target.start_write_group()
3990
3954
pb.update('Transferring revisions', offset,
3991
3955
len(revision_ids))
3992
3956
batch = revision_ids[offset:offset+batch_size]
3993
basis_id = self._fetch_batch(batch, basis_id, cache,
3957
basis_id = self._fetch_batch(batch, basis_id, cache)
3996
self.source._safe_to_return_from_cache = False
3997
3959
self.target.abort_write_group()
4011
3973
"""See InterRepository.fetch()."""
4012
3974
if fetch_spec is not None:
4013
3975
raise AssertionError("Not implemented yet...")
4014
ui.ui_factory.warn_experimental_format_fetch(self)
4015
3976
if (not self.source.supports_rich_root()
4016
3977
and self.target.supports_rich_root()):
4017
3978
self._converting_to_rich_root = True
4018
3979
self._revision_id_to_root_id = {}
4020
3981
self._converting_to_rich_root = False
4021
# See <https://launchpad.net/bugs/456077> asking for a warning here
4022
if self.source._format.network_name() != self.target._format.network_name():
4023
ui.ui_factory.show_user_warning('cross_format_fetch',
4024
from_format=self.source._format,
4025
to_format=self.target._format)
4026
3982
revision_ids = self.target.search_missing_revision_ids(self.source,
4027
3983
revision_id, find_ghosts=find_ghosts).get_keys()
4028
3984
if not revision_ids:
4097
4053
:param to_convert: The disk object to convert.
4098
4054
:param pb: a progress bar to use for progress information.
4100
pb = ui.ui_factory.nested_progress_bar()
4103
4059
# this is only useful with metadir layouts - separated repo content.
4104
4060
# trigger an assertion if not such
4105
4061
repo._format.get_format_string()
4106
4062
self.repo_dir = repo.bzrdir
4107
pb.update('Moving repository to repository.backup')
4063
self.step('Moving repository to repository.backup')
4108
4064
self.repo_dir.transport.move('repository', 'repository.backup')
4109
4065
backup_transport = self.repo_dir.transport.clone('repository.backup')
4110
4066
repo._format.check_conversion_target(self.target_format)
4111
4067
self.source_repo = repo._format.open(self.repo_dir,
4113
4069
_override_transport=backup_transport)
4114
pb.update('Creating new repository')
4070
self.step('Creating new repository')
4115
4071
converted = self.target_format.initialize(self.repo_dir,
4116
4072
self.source_repo.is_shared())
4117
4073
converted.lock_write()
4119
pb.update('Copying content')
4075
self.step('Copying content into repository.')
4120
4076
self.source_repo.copy_content_into(converted)
4122
4078
converted.unlock()
4123
pb.update('Deleting old repository content')
4079
self.step('Deleting old repository content.')
4124
4080
self.repo_dir.transport.delete_tree('repository.backup')
4125
ui.ui_factory.note('repository converted')
4081
self.pb.note('repository converted')
4083
def step(self, message):
4084
"""Update the pb by a step."""
4086
self.pb.update(message, self.count, self.total)
4129
4089
_unescape_map = {
4350
4310
if versioned_file is None:
4352
# TODO: key is often going to be a StaticTuple object
4353
# I don't believe we can define a method by which
4354
# (prefix,) + StaticTuple will work, though we could
4355
# define a StaticTuple.sq_concat that would allow you to
4356
# pass in either a tuple or a StaticTuple as the second
4357
# object, so instead we could have:
4358
# StaticTuple(prefix) + key here...
4359
4312
missing_keys.update((prefix,) + key for key in
4360
4313
versioned_file.get_missing_compression_parent_keys())
4361
4314
except NotImplementedError:
4473
4426
fetching the inventory weave.
4475
4428
if self._rich_root_upgrade():
4476
return _mod_fetch.Inter1and2Helper(
4430
return bzrlib.fetch.Inter1and2Helper(
4477
4431
self.from_repository).generate_root_texts(revs)
4622
4576
def _get_convertable_inventory_stream(self, revision_ids,
4623
4577
delta_versus_null=False):
4624
# The two formats are sufficiently different that there is no fast
4625
# path, so we need to send just inventorydeltas, which any
4626
# sufficiently modern client can insert into any repository.
4627
# The StreamSink code expects to be able to
4578
# The source is using CHKs, but the target either doesn't or it has a
4579
# different serializer. The StreamSink code expects to be able to
4628
4580
# convert on the target, so we need to put bytes-on-the-wire that can
4629
4581
# be converted. That means inventory deltas (if the remote is <1.19,
4630
4582
# RemoteStreamSink will fallback to VFS to insert the deltas).