/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/branch.py

  • Committer: Jonathan Lange
  • Date: 2009-12-09 09:20:42 UTC
  • mfrom: (4881 +trunk)
  • mto: This revision was merged to the branch mainline in revision 4907.
  • Revision ID: jml@canonical.com-20091209092042-s2zgqcf8f39yzxpj
Merge trunk.

Show diffs side-by-side

added added

removed removed

Lines of Context:
35
35
        symbol_versioning,
36
36
        transport,
37
37
        tsort,
 
38
        ui,
38
39
        urlutils,
39
40
        )
40
41
from bzrlib.config import BranchConfig, TransportConfig
45
46
    )
46
47
""")
47
48
 
48
 
from bzrlib.decorators import needs_read_lock, needs_write_lock
 
49
from bzrlib.decorators import needs_read_lock, needs_write_lock, only_raises
49
50
from bzrlib.hooks import HookPoint, Hooks
50
51
from bzrlib.inter import InterObject
 
52
from bzrlib.lock import _RelockDebugMixin
51
53
from bzrlib import registry
52
54
from bzrlib.symbol_versioning import (
53
55
    deprecated_in,
148
150
        if self._partial_revision_history_cache[-1] == _mod_revision.NULL_REVISION:
149
151
            self._partial_revision_history_cache.pop()
150
152
 
 
153
    def _get_check_refs(self):
 
154
        """Get the references needed for check().
 
155
 
 
156
        See bzrlib.check.
 
157
        """
 
158
        revid = self.last_revision()
 
159
        return [('revision-existence', revid), ('lefthand-distance', revid)]
 
160
 
151
161
    @staticmethod
152
162
    def open(base, _unsupported=False, possible_transports=None):
153
163
        """Open the branch rooted at base.
437
447
        # start_revision_id.
438
448
        if self._merge_sorted_revisions_cache is None:
439
449
            last_revision = self.last_revision()
440
 
            graph = self.repository.get_graph()
441
 
            parent_map = dict(((key, value) for key, value in
442
 
                     graph.iter_ancestry([last_revision]) if value is not None))
443
 
            revision_graph = repository._strip_NULL_ghosts(parent_map)
444
 
            revs = tsort.merge_sort(revision_graph, last_revision, None,
445
 
                generate_revno=True)
446
 
            # Drop the sequence # before caching
447
 
            self._merge_sorted_revisions_cache = [r[1:] for r in revs]
448
 
 
 
450
            last_key = (last_revision,)
 
451
            known_graph = self.repository.revisions.get_known_graph_ancestry(
 
452
                [last_key])
 
453
            self._merge_sorted_revisions_cache = known_graph.merge_sort(
 
454
                last_key)
449
455
        filtered = self._filter_merge_sorted_revisions(
450
456
            self._merge_sorted_revisions_cache, start_revision_id,
451
457
            stop_revision_id, stop_rule)
461
467
        """Iterate over an inclusive range of sorted revisions."""
462
468
        rev_iter = iter(merge_sorted_revisions)
463
469
        if start_revision_id is not None:
464
 
            for rev_id, depth, revno, end_of_merge in rev_iter:
 
470
            for node in rev_iter:
 
471
                rev_id = node.key[-1]
465
472
                if rev_id != start_revision_id:
466
473
                    continue
467
474
                else:
468
475
                    # The decision to include the start or not
469
476
                    # depends on the stop_rule if a stop is provided
470
 
                    rev_iter = chain(
471
 
                        iter([(rev_id, depth, revno, end_of_merge)]),
472
 
                        rev_iter)
 
477
                    # so pop this node back into the iterator
 
478
                    rev_iter = chain(iter([node]), rev_iter)
473
479
                    break
474
480
        if stop_revision_id is None:
475
 
            for rev_id, depth, revno, end_of_merge in rev_iter:
476
 
                yield rev_id, depth, revno, end_of_merge
 
481
            # Yield everything
 
482
            for node in rev_iter:
 
483
                rev_id = node.key[-1]
 
484
                yield (rev_id, node.merge_depth, node.revno,
 
485
                       node.end_of_merge)
477
486
        elif stop_rule == 'exclude':
478
 
            for rev_id, depth, revno, end_of_merge in rev_iter:
 
487
            for node in rev_iter:
 
488
                rev_id = node.key[-1]
479
489
                if rev_id == stop_revision_id:
480
490
                    return
481
 
                yield rev_id, depth, revno, end_of_merge
 
491
                yield (rev_id, node.merge_depth, node.revno,
 
492
                       node.end_of_merge)
482
493
        elif stop_rule == 'include':
483
 
            for rev_id, depth, revno, end_of_merge in rev_iter:
484
 
                yield rev_id, depth, revno, end_of_merge
 
494
            for node in rev_iter:
 
495
                rev_id = node.key[-1]
 
496
                yield (rev_id, node.merge_depth, node.revno,
 
497
                       node.end_of_merge)
485
498
                if rev_id == stop_revision_id:
486
499
                    return
487
500
        elif stop_rule == 'with-merges':
490
503
                left_parent = stop_rev.parent_ids[0]
491
504
            else:
492
505
                left_parent = _mod_revision.NULL_REVISION
493
 
            for rev_id, depth, revno, end_of_merge in rev_iter:
 
506
            # left_parent is the actual revision we want to stop logging at,
 
507
            # since we want to show the merged revisions after the stop_rev too
 
508
            reached_stop_revision_id = False
 
509
            revision_id_whitelist = []
 
510
            for node in rev_iter:
 
511
                rev_id = node.key[-1]
494
512
                if rev_id == left_parent:
 
513
                    # reached the left parent after the stop_revision
495
514
                    return
496
 
                yield rev_id, depth, revno, end_of_merge
 
515
                if (not reached_stop_revision_id or
 
516
                        rev_id in revision_id_whitelist):
 
517
                    yield (rev_id, node.merge_depth, node.revno,
 
518
                       node.end_of_merge)
 
519
                    if reached_stop_revision_id or rev_id == stop_revision_id:
 
520
                        # only do the merged revs of rev_id from now on
 
521
                        rev = self.repository.get_revision(rev_id)
 
522
                        if rev.parent_ids:
 
523
                            reached_stop_revision_id = True
 
524
                            revision_id_whitelist.extend(rev.parent_ids)
497
525
        else:
498
526
            raise ValueError('invalid stop_rule %r' % stop_rule)
499
527
 
662
690
        """
663
691
        if not self._format.supports_stacking():
664
692
            raise errors.UnstackableBranchFormat(self._format, self.base)
 
693
        # XXX: Changing from one fallback repository to another does not check
 
694
        # that all the data you need is present in the new fallback.
 
695
        # Possibly it should.
665
696
        self._check_stackable_repo()
666
697
        if not url:
667
698
            try:
669
700
            except (errors.NotStacked, errors.UnstackableBranchFormat,
670
701
                errors.UnstackableRepositoryFormat):
671
702
                return
672
 
            url = ''
673
 
            # XXX: Lock correctness - should unlock our old repo if we were
674
 
            # locked.
675
 
            # repositories don't offer an interface to remove fallback
676
 
            # repositories today; take the conceptually simpler option and just
677
 
            # reopen it.
678
 
            self.repository = self.bzrdir.find_repository()
679
 
            self.repository.lock_write()
680
 
            # for every revision reference the branch has, ensure it is pulled
681
 
            # in.
682
 
            source_repository = self._get_fallback_repository(old_url)
683
 
            for revision_id in chain([self.last_revision()],
684
 
                self.tags.get_reverse_tag_dict()):
685
 
                self.repository.fetch(source_repository, revision_id,
686
 
                    find_ghosts=True)
 
703
            self._unstack()
687
704
        else:
688
705
            self._activate_fallback_location(url)
689
706
        # write this out after the repository is stacked to avoid setting a
690
707
        # stacked config that doesn't work.
691
708
        self._set_config_location('stacked_on_location', url)
692
709
 
 
710
    def _unstack(self):
 
711
        """Change a branch to be unstacked, copying data as needed.
 
712
        
 
713
        Don't call this directly, use set_stacked_on_url(None).
 
714
        """
 
715
        pb = ui.ui_factory.nested_progress_bar()
 
716
        try:
 
717
            pb.update("Unstacking")
 
718
            # The basic approach here is to fetch the tip of the branch,
 
719
            # including all available ghosts, from the existing stacked
 
720
            # repository into a new repository object without the fallbacks. 
 
721
            #
 
722
            # XXX: See <https://launchpad.net/bugs/397286> - this may not be
 
723
            # correct for CHKMap repostiories
 
724
            old_repository = self.repository
 
725
            if len(old_repository._fallback_repositories) != 1:
 
726
                raise AssertionError("can't cope with fallback repositories "
 
727
                    "of %r" % (self.repository,))
 
728
            # unlock it, including unlocking the fallback
 
729
            old_repository.unlock()
 
730
            old_repository.lock_read()
 
731
            try:
 
732
                # Repositories don't offer an interface to remove fallback
 
733
                # repositories today; take the conceptually simpler option and just
 
734
                # reopen it.  We reopen it starting from the URL so that we
 
735
                # get a separate connection for RemoteRepositories and can
 
736
                # stream from one of them to the other.  This does mean doing
 
737
                # separate SSH connection setup, but unstacking is not a
 
738
                # common operation so it's tolerable.
 
739
                new_bzrdir = bzrdir.BzrDir.open(self.bzrdir.root_transport.base)
 
740
                new_repository = new_bzrdir.find_repository()
 
741
                self.repository = new_repository
 
742
                if self.repository._fallback_repositories:
 
743
                    raise AssertionError("didn't expect %r to have "
 
744
                        "fallback_repositories"
 
745
                        % (self.repository,))
 
746
                # this is not paired with an unlock because it's just restoring
 
747
                # the previous state; the lock's released when set_stacked_on_url
 
748
                # returns
 
749
                self.repository.lock_write()
 
750
                # XXX: If you unstack a branch while it has a working tree
 
751
                # with a pending merge, the pending-merged revisions will no
 
752
                # longer be present.  You can (probably) revert and remerge.
 
753
                #
 
754
                # XXX: This only fetches up to the tip of the repository; it
 
755
                # doesn't bring across any tags.  That's fairly consistent
 
756
                # with how branch works, but perhaps not ideal.
 
757
                self.repository.fetch(old_repository,
 
758
                    revision_id=self.last_revision(),
 
759
                    find_ghosts=True)
 
760
            finally:
 
761
                old_repository.unlock()
 
762
        finally:
 
763
            pb.finished()
693
764
 
694
765
    def _set_tags_bytes(self, bytes):
695
766
        """Mirror method for _get_tags_bytes.
1095
1166
        revision_id: if not None, the revision history in the new branch will
1096
1167
                     be truncated to end with revision_id.
1097
1168
        """
 
1169
        if (repository_policy is not None and
 
1170
            repository_policy.requires_stacking()):
 
1171
            to_bzrdir._format.require_stacking(_skip_repo=True)
1098
1172
        result = to_bzrdir.create_branch()
1099
1173
        result.lock_write()
1100
1174
        try:
1168
1242
        target._set_all_reference_info(target_reference_dict)
1169
1243
 
1170
1244
    @needs_read_lock
1171
 
    def check(self):
 
1245
    def check(self, refs):
1172
1246
        """Check consistency of the branch.
1173
1247
 
1174
1248
        In particular this checks that revisions given in the revision-history
1177
1251
 
1178
1252
        Callers will typically also want to check the repository.
1179
1253
 
 
1254
        :param refs: Calculated refs for this branch as specified by
 
1255
            branch._get_check_refs()
1180
1256
        :return: A BranchCheckResult.
1181
1257
        """
1182
 
        ret = BranchCheckResult(self)
1183
 
        mainline_parent_id = None
 
1258
        result = BranchCheckResult(self)
1184
1259
        last_revno, last_revision_id = self.last_revision_info()
1185
 
        real_rev_history = []
1186
 
        try:
1187
 
            for revid in self.repository.iter_reverse_revision_history(
1188
 
                last_revision_id):
1189
 
                real_rev_history.append(revid)
1190
 
        except errors.RevisionNotPresent:
1191
 
            ret.ghosts_in_mainline = True
1192
 
        else:
1193
 
            ret.ghosts_in_mainline = False
1194
 
        real_rev_history.reverse()
1195
 
        if len(real_rev_history) != last_revno:
1196
 
            raise errors.BzrCheckError('revno does not match len(mainline)'
1197
 
                ' %s != %s' % (last_revno, len(real_rev_history)))
1198
 
        # TODO: We should probably also check that real_rev_history actually
1199
 
        #       matches self.revision_history()
1200
 
        for revision_id in real_rev_history:
1201
 
            try:
1202
 
                revision = self.repository.get_revision(revision_id)
1203
 
            except errors.NoSuchRevision, e:
1204
 
                raise errors.BzrCheckError("mainline revision {%s} not in repository"
1205
 
                            % revision_id)
1206
 
            # In general the first entry on the revision history has no parents.
1207
 
            # But it's not illegal for it to have parents listed; this can happen
1208
 
            # in imports from Arch when the parents weren't reachable.
1209
 
            if mainline_parent_id is not None:
1210
 
                if mainline_parent_id not in revision.parent_ids:
1211
 
                    raise errors.BzrCheckError("previous revision {%s} not listed among "
1212
 
                                        "parents of {%s}"
1213
 
                                        % (mainline_parent_id, revision_id))
1214
 
            mainline_parent_id = revision_id
1215
 
        return ret
 
1260
        actual_revno = refs[('lefthand-distance', last_revision_id)]
 
1261
        if actual_revno != last_revno:
 
1262
            result.errors.append(errors.BzrCheckError(
 
1263
                'revno does not match len(mainline) %s != %s' % (
 
1264
                last_revno, actual_revno)))
 
1265
        # TODO: We should probably also check that self.revision_history
 
1266
        # matches the repository for older branch formats.
 
1267
        # If looking for the code that cross-checks repository parents against
 
1268
        # the iter_reverse_revision_history output, that is now a repository
 
1269
        # specific check.
 
1270
        return result
1216
1271
 
1217
1272
    def _get_checkout_format(self):
1218
1273
        """Return the most suitable metadir for a checkout of this branch.
1243
1298
        # clone call. Or something. 20090224 RBC/spiv.
1244
1299
        if revision_id is None:
1245
1300
            revision_id = self.last_revision()
1246
 
        try:
1247
 
            dir_to = self.bzrdir.clone_on_transport(to_transport,
1248
 
                revision_id=revision_id, stacked_on=stacked_on,
1249
 
                create_prefix=create_prefix, use_existing_dir=use_existing_dir)
1250
 
        except errors.FileExists:
1251
 
            if not use_existing_dir:
1252
 
                raise
1253
 
        except errors.NoSuchFile:
1254
 
            if not create_prefix:
1255
 
                raise
 
1301
        dir_to = self.bzrdir.clone_on_transport(to_transport,
 
1302
            revision_id=revision_id, stacked_on=stacked_on,
 
1303
            create_prefix=create_prefix, use_existing_dir=use_existing_dir)
1256
1304
        return dir_to.open_branch()
1257
1305
 
1258
1306
    def create_checkout(self, to_location, revision_id=None,
1398
1446
        """Return the format for the branch object in a_bzrdir."""
1399
1447
        try:
1400
1448
            transport = a_bzrdir.get_branch_transport(None)
1401
 
            format_string = transport.get("format").read()
 
1449
            format_string = transport.get_bytes("format")
1402
1450
            return klass._formats[format_string]
1403
1451
        except errors.NoSuchFile:
1404
1452
            raise errors.NotBranchError(path=transport.base)
1937
1985
    def get_reference(self, a_bzrdir):
1938
1986
        """See BranchFormat.get_reference()."""
1939
1987
        transport = a_bzrdir.get_branch_transport(None)
1940
 
        return transport.get('location').read()
 
1988
        return transport.get_bytes('location')
1941
1989
 
1942
1990
    def set_reference(self, a_bzrdir, to_branch):
1943
1991
        """See BranchFormat.set_reference()."""
2031
2079
BranchFormat.register_format(__format6)
2032
2080
BranchFormat.register_format(__format7)
2033
2081
BranchFormat.register_format(__format8)
2034
 
BranchFormat.set_default_format(__format6)
 
2082
BranchFormat.set_default_format(__format7)
2035
2083
_legacy_formats = [BzrBranchFormat4(),
2036
2084
    ]
2037
2085
network_format_registry.register(
2038
2086
    _legacy_formats[0].network_name(), _legacy_formats[0].__class__)
2039
2087
 
2040
2088
 
2041
 
class BzrBranch(Branch):
 
2089
class BzrBranch(Branch, _RelockDebugMixin):
2042
2090
    """A branch stored in the actual filesystem.
2043
2091
 
2044
2092
    Note that it's "local" in the context of the filesystem; it doesn't
2090
2138
        return self.control_files.is_locked()
2091
2139
 
2092
2140
    def lock_write(self, token=None):
 
2141
        if not self.is_locked():
 
2142
            self._note_lock('w')
2093
2143
        # All-in-one needs to always unlock/lock.
2094
2144
        repo_control = getattr(self.repository, 'control_files', None)
2095
2145
        if self.control_files == repo_control or not self.is_locked():
2105
2155
            raise
2106
2156
 
2107
2157
    def lock_read(self):
 
2158
        if not self.is_locked():
 
2159
            self._note_lock('r')
2108
2160
        # All-in-one needs to always unlock/lock.
2109
2161
        repo_control = getattr(self.repository, 'control_files', None)
2110
2162
        if self.control_files == repo_control or not self.is_locked():
2119
2171
                self.repository.unlock()
2120
2172
            raise
2121
2173
 
 
2174
    @only_raises(errors.LockNotHeld, errors.LockBroken)
2122
2175
    def unlock(self):
2123
2176
        try:
2124
2177
            self.control_files.unlock()
2795
2848
 
2796
2849
    def __init__(self, branch):
2797
2850
        self.branch = branch
2798
 
        self.ghosts_in_mainline = False
 
2851
        self.errors = []
2799
2852
 
2800
2853
    def report_results(self, verbose):
2801
2854
        """Report the check results via trace.note.
2803
2856
        :param verbose: Requests more detailed display of what was checked,
2804
2857
            if any.
2805
2858
        """
2806
 
        note('checked branch %s format %s',
2807
 
             self.branch.base,
2808
 
             self.branch._format)
2809
 
        if self.ghosts_in_mainline:
2810
 
            note('branch contains ghosts in mainline')
 
2859
        note('checked branch %s format %s', self.branch.base,
 
2860
            self.branch._format)
 
2861
        for error in self.errors:
 
2862
            note('found error:%s', error)
2811
2863
 
2812
2864
 
2813
2865
class Converter5to6(object):