/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/repository.py

  • Committer: Canonical.com Patch Queue Manager
  • Date: 2010-01-14 00:01:32 UTC
  • mfrom: (4957.1.1 jam-integration)
  • Revision ID: pqm@pqm.ubuntu.com-20100114000132-3p3rabnonjw3gzqb
(jam) Merge bzr.stable, bringing in bug fixes #175839, #504390

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
 
1
# Copyright (C) 2005-2010 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
24
24
    bzrdir,
25
25
    check,
26
26
    chk_map,
 
27
    config,
27
28
    debug,
28
29
    errors,
 
30
    fetch as _mod_fetch,
29
31
    fifo_cache,
30
32
    generate_ids,
31
33
    gpg,
39
41
    osutils,
40
42
    revision as _mod_revision,
41
43
    symbol_versioning,
 
44
    trace,
42
45
    tsort,
43
46
    ui,
44
47
    versionedfile,
49
52
from bzrlib.testament import Testament
50
53
""")
51
54
 
52
 
from bzrlib.decorators import needs_read_lock, needs_write_lock
 
55
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
53
56
from bzrlib.inter import InterObject
54
57
from bzrlib.inventory import (
55
58
    Inventory,
57
60
    ROOT_ID,
58
61
    entry_factory,
59
62
    )
 
63
from bzrlib.lock import _RelockDebugMixin
60
64
from bzrlib import registry
61
65
from bzrlib.trace import (
62
66
    log_exception_quietly, note, mutter, mutter_callsite, warning)
205
209
            # an inventory delta was accumulated without creating a new
206
210
            # inventory.
207
211
            basis_id = self.basis_delta_revision
208
 
            self.inv_sha1 = self.repository.add_inventory_by_delta(
 
212
            # We ignore the 'inventory' returned by add_inventory_by_delta
 
213
            # because self.new_inventory is used to hint to the rest of the
 
214
            # system what code path was taken
 
215
            self.inv_sha1, _ = self.repository.add_inventory_by_delta(
209
216
                basis_id, self._basis_delta, self._new_revision_id,
210
217
                self.parents)
211
218
        else:
856
863
# Repositories
857
864
 
858
865
 
859
 
class Repository(object):
 
866
class Repository(_RelockDebugMixin):
860
867
    """Repository holding history for one or more branches.
861
868
 
862
869
    The repository holds and retrieves historical information including
1299
1306
        self._reconcile_does_inventory_gc = True
1300
1307
        self._reconcile_fixes_text_parents = False
1301
1308
        self._reconcile_backsup_inventory = True
1302
 
        # not right yet - should be more semantically clear ?
1303
 
        #
1304
 
        # TODO: make sure to construct the right store classes, etc, depending
1305
 
        # on whether escaping is required.
1306
 
        self._warn_if_deprecated()
1307
1309
        self._write_group = None
1308
1310
        # Additional places to query for data.
1309
1311
        self._fallback_repositories = []
1310
1312
        # An InventoryEntry cache, used during deserialization
1311
1313
        self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
 
1314
        # Is it safe to return inventory entries directly from the entry cache,
 
1315
        # rather copying them?
 
1316
        self._safe_to_return_from_cache = False
1312
1317
 
1313
1318
    def __repr__(self):
1314
1319
        if self._fallback_repositories:
1381
1386
        locked = self.is_locked()
1382
1387
        result = self.control_files.lock_write(token=token)
1383
1388
        if not locked:
 
1389
            self._warn_if_deprecated()
 
1390
            self._note_lock('w')
1384
1391
            for repo in self._fallback_repositories:
1385
1392
                # Writes don't affect fallback repos
1386
1393
                repo.lock_read()
1391
1398
        locked = self.is_locked()
1392
1399
        self.control_files.lock_read()
1393
1400
        if not locked:
 
1401
            self._warn_if_deprecated()
 
1402
            self._note_lock('r')
1394
1403
            for repo in self._fallback_repositories:
1395
1404
                repo.lock_read()
1396
1405
            self._refresh_data()
1720
1729
        self.start_write_group()
1721
1730
        return result
1722
1731
 
 
1732
    @only_raises(errors.LockNotHeld, errors.LockBroken)
1723
1733
    def unlock(self):
1724
1734
        if (self.control_files._lock_count == 1 and
1725
1735
            self.control_files._lock_mode == 'w'):
2329
2339
        num_file_ids = len(file_ids)
2330
2340
        for file_id, altered_versions in file_ids.iteritems():
2331
2341
            if pb is not None:
2332
 
                pb.update("fetch texts", count, num_file_ids)
 
2342
                pb.update("Fetch texts", count, num_file_ids)
2333
2343
            count += 1
2334
2344
            yield ("file", file_id, altered_versions)
2335
2345
 
2423
2433
        :param xml: A serialised inventory.
2424
2434
        """
2425
2435
        result = self._serializer.read_inventory_from_string(xml, revision_id,
2426
 
                    entry_cache=self._inventory_entry_cache)
 
2436
                    entry_cache=self._inventory_entry_cache,
 
2437
                    return_from_cache=self._safe_to_return_from_cache)
2427
2438
        if result.revision_id != revision_id:
2428
2439
            raise AssertionError('revision id mismatch %s != %s' % (
2429
2440
                result.revision_id, revision_id))
2661
2672
        for ((revision_id,), parent_keys) in \
2662
2673
                self.revisions.get_parent_map(query_keys).iteritems():
2663
2674
            if parent_keys:
2664
 
                result[revision_id] = tuple(parent_revid
2665
 
                    for (parent_revid,) in parent_keys)
 
2675
                result[revision_id] = tuple([parent_revid
 
2676
                    for (parent_revid,) in parent_keys])
2666
2677
            else:
2667
2678
                result[revision_id] = (_mod_revision.NULL_REVISION,)
2668
2679
        return result
2770
2781
        result.check(callback_refs)
2771
2782
        return result
2772
2783
 
2773
 
    def _warn_if_deprecated(self):
 
2784
    def _warn_if_deprecated(self, branch=None):
2774
2785
        global _deprecation_warning_done
2775
2786
        if _deprecation_warning_done:
2776
2787
            return
2777
 
        _deprecation_warning_done = True
2778
 
        warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"
2779
 
                % (self._format, self.bzrdir.transport.base))
 
2788
        try:
 
2789
            if branch is None:
 
2790
                conf = config.GlobalConfig()
 
2791
            else:
 
2792
                conf = branch.get_config()
 
2793
            if conf.suppress_warning('format_deprecation'):
 
2794
                return
 
2795
            warning("Format %s for %s is deprecated -"
 
2796
                    " please use 'bzr upgrade' to get better performance"
 
2797
                    % (self._format, self.bzrdir.transport.base))
 
2798
        finally:
 
2799
            _deprecation_warning_done = True
2780
2800
 
2781
2801
    def supports_rich_root(self):
2782
2802
        return self._format.rich_root_data
3086
3106
        """
3087
3107
        try:
3088
3108
            transport = a_bzrdir.get_repository_transport(None)
3089
 
            format_string = transport.get("format").read()
 
3109
            format_string = transport.get_bytes("format")
3090
3110
            return format_registry.get(format_string)
3091
3111
        except errors.NoSuchFile:
3092
3112
            raise errors.NoRepositoryPresent(a_bzrdir)
3405
3425
                   provided a default one will be created.
3406
3426
        :return: None.
3407
3427
        """
3408
 
        from bzrlib.fetch import RepoFetcher
3409
 
        f = RepoFetcher(to_repository=self.target,
 
3428
        f = _mod_fetch.RepoFetcher(to_repository=self.target,
3410
3429
                               from_repository=self.source,
3411
3430
                               last_revision=revision_id,
3412
3431
                               fetch_spec=fetch_spec,
3585
3604
                self.target.texts.insert_record_stream(
3586
3605
                    self.source.texts.get_record_stream(
3587
3606
                        self.source.texts.keys(), 'topological', False))
3588
 
                pb.update('copying inventory', 0, 1)
 
3607
                pb.update('Copying inventory', 0, 1)
3589
3608
                self.target.inventories.insert_record_stream(
3590
3609
                    self.source.inventories.get_record_stream(
3591
3610
                        self.source.inventories.keys(), 'topological', False))
3812
3831
                basis_id, delta, current_revision_id, parents_parents)
3813
3832
            cache[current_revision_id] = parent_tree
3814
3833
 
3815
 
    def _fetch_batch(self, revision_ids, basis_id, cache):
 
3834
    def _fetch_batch(self, revision_ids, basis_id, cache, a_graph=None):
3816
3835
        """Fetch across a few revisions.
3817
3836
 
3818
3837
        :param revision_ids: The revisions to copy
3819
3838
        :param basis_id: The revision_id of a tree that must be in cache, used
3820
3839
            as a basis for delta when no other base is available
3821
3840
        :param cache: A cache of RevisionTrees that we can use.
 
3841
        :param a_graph: A Graph object to determine the heads() of the
 
3842
            rich-root data stream.
3822
3843
        :return: The revision_id of the last converted tree. The RevisionTree
3823
3844
            for it will be in cache
3824
3845
        """
3831
3852
        pending_revisions = []
3832
3853
        parent_map = self.source.get_parent_map(revision_ids)
3833
3854
        self._fetch_parent_invs_for_stacking(parent_map, cache)
 
3855
        self.source._safe_to_return_from_cache = True
3834
3856
        for tree in self.source.revision_trees(revision_ids):
3835
3857
            # Find a inventory delta for this revision.
3836
3858
            # Find text entries that need to be copied, too.
3884
3906
            pending_revisions.append(revision)
3885
3907
            cache[current_revision_id] = tree
3886
3908
            basis_id = current_revision_id
 
3909
        self.source._safe_to_return_from_cache = False
3887
3910
        # Copy file texts
3888
3911
        from_texts = self.source.texts
3889
3912
        to_texts = self.target.texts
3890
3913
        if root_keys_to_create:
3891
 
            from bzrlib.fetch import _new_root_data_stream
3892
 
            root_stream = _new_root_data_stream(
 
3914
            root_stream = _mod_fetch._new_root_data_stream(
3893
3915
                root_keys_to_create, self._revision_id_to_root_id, parent_map,
3894
 
                self.source)
 
3916
                self.source, graph=a_graph)
3895
3917
            to_texts.insert_record_stream(root_stream)
3896
3918
        to_texts.insert_record_stream(from_texts.get_record_stream(
3897
3919
            text_keys, self.target._format._fetch_order,
3954
3976
        cache[basis_id] = basis_tree
3955
3977
        del basis_tree # We don't want to hang on to it here
3956
3978
        hints = []
 
3979
        if self._converting_to_rich_root and len(revision_ids) > 100:
 
3980
            a_graph = _mod_fetch._get_rich_root_heads_graph(self.source,
 
3981
                                                            revision_ids)
 
3982
        else:
 
3983
            a_graph = None
 
3984
 
3957
3985
        for offset in range(0, len(revision_ids), batch_size):
3958
3986
            self.target.start_write_group()
3959
3987
            try:
3960
3988
                pb.update('Transferring revisions', offset,
3961
3989
                          len(revision_ids))
3962
3990
                batch = revision_ids[offset:offset+batch_size]
3963
 
                basis_id = self._fetch_batch(batch, basis_id, cache)
 
3991
                basis_id = self._fetch_batch(batch, basis_id, cache,
 
3992
                                             a_graph=a_graph)
3964
3993
            except:
 
3994
                self.source._safe_to_return_from_cache = False
3965
3995
                self.target.abort_write_group()
3966
3996
                raise
3967
3997
            else:
3979
4009
        """See InterRepository.fetch()."""
3980
4010
        if fetch_spec is not None:
3981
4011
            raise AssertionError("Not implemented yet...")
 
4012
        # See <https://launchpad.net/bugs/456077> asking for a warning here
 
4013
        #
 
4014
        # nb this is only active for local-local fetches; other things using
 
4015
        # streaming.
 
4016
        trace.warning("Fetching between repositories with different formats\n"
 
4017
            "from %s to %s.\n"
 
4018
            "This may take some time. Upgrade the branches to the same format \n"
 
4019
            "for better results.\n"
 
4020
            % (self.source._format, self.target._format))
3982
4021
        if (not self.source.supports_rich_root()
3983
4022
            and self.target.supports_rich_root()):
3984
4023
            self._converting_to_rich_root = True
4078
4117
                                                  self.source_repo.is_shared())
4079
4118
        converted.lock_write()
4080
4119
        try:
4081
 
            self.step('Copying content into repository.')
 
4120
            self.step('Copying content')
4082
4121
            self.source_repo.copy_content_into(converted)
4083
4122
        finally:
4084
4123
            converted.unlock()
4085
 
        self.step('Deleting old repository content.')
 
4124
        self.step('Deleting old repository content')
4086
4125
        self.repo_dir.transport.delete_tree('repository.backup')
4087
 
        self.pb.note('repository converted')
 
4126
        ui.ui_factory.note('repository converted')
4088
4127
 
4089
4128
    def step(self, message):
4090
4129
        """Update the pb by a step."""
4315
4354
                ):
4316
4355
                if versioned_file is None:
4317
4356
                    continue
 
4357
                # TODO: key is often going to be a StaticTuple object
 
4358
                #       I don't believe we can define a method by which
 
4359
                #       (prefix,) + StaticTuple will work, though we could
 
4360
                #       define a StaticTuple.sq_concat that would allow you to
 
4361
                #       pass in either a tuple or a StaticTuple as the second
 
4362
                #       object, so instead we could have:
 
4363
                #       StaticTuple(prefix) + key here...
4318
4364
                missing_keys.update((prefix,) + key for key in
4319
4365
                    versioned_file.get_missing_compression_parent_keys())
4320
4366
        except NotImplementedError:
4432
4478
        fetching the inventory weave.
4433
4479
        """
4434
4480
        if self._rich_root_upgrade():
4435
 
            import bzrlib.fetch
4436
 
            return bzrlib.fetch.Inter1and2Helper(
 
4481
            return _mod_fetch.Inter1and2Helper(
4437
4482
                self.from_repository).generate_root_texts(revs)
4438
4483
        else:
4439
4484
            return []