/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repository.py

  • Committer: Parth Malwankar
  • Date: 2010-05-13 06:54:21 UTC
  • mfrom: (5225 +trunk)
  • mto: This revision was merged to the branch mainline in revision 5241.
  • Revision ID: parth.malwankar@gmail.com-20100513065421-bde255i4ga5o3oqe
merged in changes from trunk.

Show diffs side-by-side

added added

removed removed

Lines of Context:
26
26
    chk_map,
27
27
    config,
28
28
    debug,
29
 
    errors,
30
29
    fetch as _mod_fetch,
31
30
    fifo_cache,
32
31
    generate_ids,
44
43
    symbol_versioning,
45
44
    trace,
46
45
    tsort,
47
 
    ui,
48
46
    versionedfile,
49
47
    )
50
48
from bzrlib.bundle import serializer
53
51
from bzrlib.testament import Testament
54
52
""")
55
53
 
 
54
from bzrlib import (
 
55
    errors,
 
56
    registry,
 
57
    ui,
 
58
    )
56
59
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
57
60
from bzrlib.inter import InterObject
58
61
from bzrlib.inventory import (
61
64
    ROOT_ID,
62
65
    entry_factory,
63
66
    )
64
 
from bzrlib.lock import _RelockDebugMixin
65
 
from bzrlib import registry
 
67
from bzrlib.recordcounter import RecordCounter
 
68
from bzrlib.lock import _RelockDebugMixin, LogicalLockResult
66
69
from bzrlib.trace import (
67
70
    log_exception_quietly, note, mutter, mutter_callsite, warning)
68
71
 
71
74
_deprecation_warning_done = False
72
75
 
73
76
 
 
77
class IsInWriteGroupError(errors.InternalBzrError):
 
78
 
 
79
    _fmt = "May not refresh_data of repo %(repo)s while in a write group."
 
80
 
 
81
    def __init__(self, repo):
 
82
        errors.InternalBzrError.__init__(self, repo=repo)
 
83
 
 
84
 
74
85
class CommitBuilder(object):
75
86
    """Provides an interface to build up a commit.
76
87
 
860
871
        # versioned roots do not change unless the tree found a change.
861
872
 
862
873
 
 
874
class RepositoryWriteLockResult(LogicalLockResult):
 
875
    """The result of write locking a repository.
 
876
 
 
877
    :ivar repository_token: The token obtained from the underlying lock, or
 
878
        None.
 
879
    :ivar unlock: A callable which will unlock the lock.
 
880
    """
 
881
 
 
882
    def __init__(self, unlock, repository_token):
 
883
        LogicalLockResult.__init__(self, unlock)
 
884
        self.repository_token = repository_token
 
885
 
 
886
    def __repr__(self):
 
887
        return "RepositoryWriteLockResult(%s, %s)" % (self.repository_token,
 
888
            self.unlock)
 
889
 
 
890
 
863
891
######################################################################
864
892
# Repositories
865
893
 
1376
1404
        data during reads, and allows a 'write_group' to be obtained. Write
1377
1405
        groups must be used for actual data insertion.
1378
1406
 
 
1407
        A token should be passed in if you know that you have locked the object
 
1408
        some other way, and need to synchronise this object's state with that
 
1409
        fact.
 
1410
 
 
1411
        XXX: this docstring is duplicated in many places, e.g. lockable_files.py
 
1412
 
1379
1413
        :param token: if this is already locked, then lock_write will fail
1380
1414
            unless the token matches the existing lock.
1381
1415
        :returns: a token if this instance supports tokens, otherwise None.
1384
1418
        :raises MismatchedToken: if the specified token doesn't match the token
1385
1419
            of the existing lock.
1386
1420
        :seealso: start_write_group.
1387
 
 
1388
 
        A token should be passed in if you know that you have locked the object
1389
 
        some other way, and need to synchronise this object's state with that
1390
 
        fact.
1391
 
 
1392
 
        XXX: this docstring is duplicated in many places, e.g. lockable_files.py
 
1421
        :return: A RepositoryWriteLockResult.
1393
1422
        """
1394
1423
        locked = self.is_locked()
1395
 
        result = self.control_files.lock_write(token=token)
 
1424
        token = self.control_files.lock_write(token=token)
1396
1425
        if not locked:
1397
1426
            self._warn_if_deprecated()
1398
1427
            self._note_lock('w')
1400
1429
                # Writes don't affect fallback repos
1401
1430
                repo.lock_read()
1402
1431
            self._refresh_data()
1403
 
        return result
 
1432
        return RepositoryWriteLockResult(self.unlock, token)
1404
1433
 
1405
1434
    def lock_read(self):
 
1435
        """Lock the repository for read operations.
 
1436
 
 
1437
        :return: An object with an unlock method which will release the lock
 
1438
            obtained.
 
1439
        """
1406
1440
        locked = self.is_locked()
1407
1441
        self.control_files.lock_read()
1408
1442
        if not locked:
1411
1445
            for repo in self._fallback_repositories:
1412
1446
                repo.lock_read()
1413
1447
            self._refresh_data()
 
1448
        return LogicalLockResult(self.unlock)
1414
1449
 
1415
1450
    def get_physical_lock_status(self):
1416
1451
        return self.control_files.get_physical_lock_status()
1634
1669
        return missing_keys
1635
1670
 
1636
1671
    def refresh_data(self):
1637
 
        """Re-read any data needed to to synchronise with disk.
 
1672
        """Re-read any data needed to synchronise with disk.
1638
1673
 
1639
1674
        This method is intended to be called after another repository instance
1640
1675
        (such as one used by a smart server) has inserted data into the
1641
 
        repository. It may not be called during a write group, but may be
1642
 
        called at any other time.
 
1676
        repository. On all repositories this will work outside of write groups.
 
1677
        Some repository formats (pack and newer for bzrlib native formats)
 
1678
        support refresh_data inside write groups. If called inside a write
 
1679
        group on a repository that does not support refreshing in a write group
 
1680
        IsInWriteGroupError will be raised.
1643
1681
        """
1644
 
        if self.is_in_write_group():
1645
 
            raise errors.InternalBzrError(
1646
 
                "May not refresh_data while in a write group.")
1647
1682
        self._refresh_data()
1648
1683
 
1649
1684
    def resume_write_group(self, tokens):
4249
4284
                is_resume = False
4250
4285
            try:
4251
4286
                # locked_insert_stream performs a commit|suspend.
4252
 
                return self._locked_insert_stream(stream, src_format, is_resume)
 
4287
                return self._locked_insert_stream(stream, src_format,
 
4288
                    is_resume)
4253
4289
            except:
4254
4290
                self.target_repo.abort_write_group(suppress_errors=True)
4255
4291
                raise
4278
4314
                pass
4279
4315
            else:
4280
4316
                new_pack.set_write_cache_size(1024*1024)
 
4317
        current_count = 0
 
4318
        rc = RecordCounter()
 
4319
        pb = ui.ui_factory.nested_progress_bar()
 
4320
 
4281
4321
        for substream_type, substream in stream:
4282
4322
            if 'stream' in debug.debug_flags:
4283
4323
                mutter('inserting substream: %s', substream_type)
4284
4324
            if substream_type == 'texts':
4285
 
                self.target_repo.texts.insert_record_stream(substream)
 
4325
                #print "A"
 
4326
                rc.stream_type = substream_type
 
4327
                self.target_repo.texts.insert_record_stream(substream,
 
4328
                    rc)
4286
4329
            elif substream_type == 'inventories':
 
4330
                #print "B"
4287
4331
                if src_serializer == to_serializer:
 
4332
                    rc.stream_type = substream_type
4288
4333
                    self.target_repo.inventories.insert_record_stream(
4289
 
                        substream)
 
4334
                        substream, substream_type, rc)
4290
4335
                else:
4291
4336
                    self._extract_and_insert_inventories(
4292
4337
                        substream, src_serializer)
4293
4338
            elif substream_type == 'inventory-deltas':
 
4339
                #print "C"
4294
4340
                self._extract_and_insert_inventory_deltas(
4295
4341
                    substream, src_serializer)
4296
4342
            elif substream_type == 'chk_bytes':
4297
4343
                # XXX: This doesn't support conversions, as it assumes the
4298
4344
                #      conversion was done in the fetch code.
4299
 
                self.target_repo.chk_bytes.insert_record_stream(substream)
 
4345
                #print "D"
 
4346
                rc.stream_type = substream_type
 
4347
                self.target_repo.chk_bytes.insert_record_stream(substream,
 
4348
                    substream_type, rc)
4300
4349
            elif substream_type == 'revisions':
4301
4350
                # This may fallback to extract-and-insert more often than
4302
4351
                # required if the serializers are different only in terms of
4303
4352
                # the inventory.
4304
4353
                if src_serializer == to_serializer:
 
4354
                    # To avoid eager counting of the number of revisions to
 
4355
                    # fetch we pass RecordCounter to revisions.insert_record_stream
 
4356
                    # which initialzed RecordCounter to be used with the other
 
4357
                    # insert_record_stream operation to provide better estimate
 
4358
                    # of workload.
 
4359
                    #print "E"
 
4360
                    rc.stream_type = substream_type
4305
4361
                    self.target_repo.revisions.insert_record_stream(
4306
 
                        substream)
 
4362
                        substream, substream_type, rc)
4307
4363
                else:
4308
4364
                    self._extract_and_insert_revisions(substream,
4309
4365
                        src_serializer)
4310
4366
            elif substream_type == 'signatures':
4311
 
                self.target_repo.signatures.insert_record_stream(substream)
 
4367
                #print "F"
 
4368
                rc.stream_type = substream_type
 
4369
                current_count = self.target_repo.signatures.insert_record_stream(substream,
 
4370
                    substream_type, rc)
4312
4371
            else:
4313
4372
                raise AssertionError('kaboom! %s' % (substream_type,))
 
4373
 
 
4374
        # Indicate the record copy is complete.
 
4375
        # We do this as max is only an estimate
 
4376
        pb.update('', rc.max, rc.max)
 
4377
        pb.finished()
 
4378
 
4314
4379
        # Done inserting data, and the missing_keys calculations will try to
4315
4380
        # read back from the inserted data, so flush the writes to the new pack
4316
4381
        # (if this is pack format).