64
from bzrlib.lock import _RelockDebugMixin
65
from bzrlib import registry
67
from bzrlib.recordcounter import RecordCounter
68
from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
66
69
from bzrlib.trace import (
67
70
log_exception_quietly, note, mutter, mutter_callsite, warning)
71
74
_deprecation_warning_done = False
77
class IsInWriteGroupError(errors.InternalBzrError):
79
_fmt = "May not refresh_data of repo %(repo)s while in a write group."
81
def __init__(self, repo):
82
errors.InternalBzrError.__init__(self, repo=repo)
74
85
class CommitBuilder(object):
75
86
"""Provides an interface to build up a commit.
860
871
# versioned roots do not change unless the tree found a change.
874
class RepositoryWriteLockResult(LogicalLockResult):
875
"""The result of write locking a repository.
877
:ivar repository_token: The token obtained from the underlying lock, or
879
:ivar unlock: A callable which will unlock the lock.
882
def __init__(self, unlock, repository_token):
883
LogicalLockResult.__init__(self, unlock)
884
self.repository_token = repository_token
887
return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
863
891
######################################################################
1376
1404
data during reads, and allows a 'write_group' to be obtained. Write
1377
1405
groups must be used for actual data insertion.
1407
A token should be passed in if you know that you have locked the object
1408
some other way, and need to synchronise this object's state with that
1411
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1379
1413
:param token: if this is already locked, then lock_write will fail
1380
1414
unless the token matches the existing lock.
1381
1415
:returns: a token if this instance supports tokens, otherwise None.
1384
1418
:raises MismatchedToken: if the specified token doesn't match the token
1385
1419
of the existing lock.
1386
1420
:seealso: start_write_group.
1388
A token should be passed in if you know that you have locked the object
1389
some other way, and need to synchronise this object's state with that
1392
XXX: this docstring is duplicated in many places, e.g. lockable_files.py
1421
:return: A RepositoryWriteLockResult.
1394
1423
locked = self.is_locked()
1395
result = self.control_files.lock_write(token=token)
1424
token = self.control_files.lock_write(token=token)
1397
1426
self._warn_if_deprecated()
1398
1427
self._note_lock('w')
1634
1669
return missing_keys
1636
1671
def refresh_data(self):
1637
"""Re-read any data needed to to synchronise with disk.
1672
"""Re-read any data needed to synchronise with disk.
1639
1674
This method is intended to be called after another repository instance
1640
1675
(such as one used by a smart server) has inserted data into the
1641
repository. It may not be called during a write group, but may be
1642
called at any other time.
1676
repository. On all repositories this will work outside of write groups.
1677
Some repository formats (pack and newer for bzrlib native formats)
1678
support refresh_data inside write groups. If called inside a write
1679
group on a repository that does not support refreshing in a write group
1680
IsInWriteGroupError will be raised.
1644
if self.is_in_write_group():
1645
raise errors.InternalBzrError(
1646
"May not refresh_data while in a write group.")
1647
1682
self._refresh_data()
1649
1684
def resume_write_group(self, tokens):
4280
4316
new_pack.set_write_cache_size(1024*1024)
4318
rc = RecordCounter()
4319
pb = ui.ui_factory.nested_progress_bar()
4281
4321
for substream_type, substream in stream:
4282
4322
if 'stream' in debug.debug_flags:
4283
4323
mutter('inserting substream: %s', substream_type)
4284
4324
if substream_type == 'texts':
4285
self.target_repo.texts.insert_record_stream(substream)
4326
rc.stream_type = substream_type
4327
self.target_repo.texts.insert_record_stream(substream,
4286
4329
elif substream_type == 'inventories':
4287
4331
if src_serializer == to_serializer:
4332
rc.stream_type = substream_type
4288
4333
self.target_repo.inventories.insert_record_stream(
4334
substream, substream_type, rc)
4291
4336
self._extract_and_insert_inventories(
4292
4337
substream, src_serializer)
4293
4338
elif substream_type == 'inventory-deltas':
4294
4340
self._extract_and_insert_inventory_deltas(
4295
4341
substream, src_serializer)
4296
4342
elif substream_type == 'chk_bytes':
4297
4343
# XXX: This doesn't support conversions, as it assumes the
4298
4344
# conversion was done in the fetch code.
4299
self.target_repo.chk_bytes.insert_record_stream(substream)
4346
rc.stream_type = substream_type
4347
self.target_repo.chk_bytes.insert_record_stream(substream,
4300
4349
elif substream_type == 'revisions':
4301
4350
# This may fallback to extract-and-insert more often than
4302
4351
# required if the serializers are different only in terms of
4303
4352
# the inventory.
4304
4353
if src_serializer == to_serializer:
4354
# To avoid eager counting of the number of revisions to
4355
# fetch we pass RecordCounter to revisions.insert_record_stream
4356
# which initialzed RecordCounter to be used with the other
4357
# insert_record_stream operation to provide better estimate
4360
rc.stream_type = substream_type
4305
4361
self.target_repo.revisions.insert_record_stream(
4362
substream, substream_type, rc)
4308
4364
self._extract_and_insert_revisions(substream,
4309
4365
src_serializer)
4310
4366
elif substream_type == 'signatures':
4311
self.target_repo.signatures.insert_record_stream(substream)
4368
rc.stream_type = substream_type
4369
current_count = self.target_repo.signatures.insert_record_stream(substream,
4313
4372
raise AssertionError('kaboom! %s' % (substream_type,))
4374
# Indicate the record copy is complete.
4375
# We do this as max is only an estimate
4376
pb.update('', rc.max, rc.max)
4314
4379
# Done inserting data, and the missing_keys calculations will try to
4315
4380
# read back from the inserted data, so flush the writes to the new pack
4316
4381
# (if this is pack format).