/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/workingtree.py

Merge from bzr.dev

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
# Copyright (C) 2005, 2006 Canonical Ltd
 
1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
2
2
#
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
41
41
 
42
42
from bzrlib.lazy_import import lazy_import
43
43
lazy_import(globals(), """
 
44
from bisect import bisect_left
44
45
import collections
45
46
from copy import deepcopy
46
47
import errno
 
48
import itertools
 
49
import operator
47
50
import stat
48
51
from time import time
49
52
import warnings
 
53
import re
50
54
 
51
55
import bzrlib
52
56
from bzrlib import (
 
57
    branch,
53
58
    bzrdir,
54
59
    conflicts as _mod_conflicts,
 
60
    dirstate,
55
61
    errors,
56
62
    generate_ids,
57
63
    globbing,
59
65
    ignores,
60
66
    merge,
61
67
    osutils,
 
68
    revisiontree,
 
69
    repository,
62
70
    textui,
63
71
    transform,
64
72
    urlutils,
65
73
    xml5,
66
74
    xml6,
 
75
    xml7,
67
76
    )
68
77
import bzrlib.branch
69
78
from bzrlib.transport import get_transport
70
79
import bzrlib.ui
 
80
from bzrlib.workingtree_4 import WorkingTreeFormat4
71
81
""")
72
82
 
73
83
from bzrlib import symbol_versioning
74
84
from bzrlib.decorators import needs_read_lock, needs_write_lock
75
 
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID
 
85
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, TreeReference
76
86
from bzrlib.lockable_files import LockableFiles, TransportLock
77
87
from bzrlib.lockdir import LockDir
78
88
import bzrlib.mutabletree
91
101
    )
92
102
from bzrlib.trace import mutter, note
93
103
from bzrlib.transport.local import LocalTransport
94
 
import bzrlib.tree
95
104
from bzrlib.progress import DummyProgress, ProgressPhase
96
105
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
97
 
import bzrlib.revisiontree
98
106
from bzrlib.rio import RioReader, rio_file, Stanza
99
107
from bzrlib.symbol_versioning import (deprecated_passed,
100
108
        deprecated_method,
297
305
        self._control_files.break_lock()
298
306
        self.branch.break_lock()
299
307
 
 
308
    def requires_rich_root(self):
 
309
        return self._format.requires_rich_root
 
310
 
 
311
    def supports_tree_reference(self):
 
312
        return False
 
313
 
300
314
    def _set_inventory(self, inv, dirty):
301
315
        """Set the internal cached inventory.
302
316
 
348
362
        """
349
363
        return WorkingTree.open(path, _unsupported=True)
350
364
 
 
365
    # should be deprecated - this is slow and in any case treating them as a
 
366
    # container is (we now know) bad style -- mbp 20070302
 
367
    ## @deprecated_method(zero_fifteen)
351
368
    def __iter__(self):
352
369
        """Iterate through file_ids for this tree.
353
370
 
379
396
            # in the future this should return the tree for
380
397
            # 'empty:' - the implicit root empty tree.
381
398
            return self.branch.repository.revision_tree(None)
382
 
        else:
383
 
            try:
384
 
                xml = self.read_basis_inventory()
385
 
                inv = xml6.serializer_v6.read_inventory_from_string(xml)
386
 
                if inv is not None and inv.revision_id == revision_id:
387
 
                    return bzrlib.revisiontree.RevisionTree(
388
 
                        self.branch.repository, inv, revision_id)
389
 
            except (errors.NoSuchFile, errors.BadInventoryFormat):
390
 
                pass
 
399
        try:
 
400
            return self.revision_tree(revision_id)
 
401
        except errors.NoSuchRevision:
 
402
            pass
391
403
        # No cached copy available, retrieve from the repository.
392
404
        # FIXME? RBC 20060403 should we cache the inventory locally
393
405
        # at this point ?
452
464
        return osutils.lexists(self.abspath(filename))
453
465
 
454
466
    def get_file(self, file_id):
 
467
        file_id = osutils.safe_file_id(file_id)
455
468
        return self.get_file_byname(self.id2path(file_id))
456
469
 
457
470
    def get_file_text(self, file_id):
 
471
        file_id = osutils.safe_file_id(file_id)
458
472
        return self.get_file(file_id).read()
459
473
 
460
474
    def get_file_byname(self, filename):
461
475
        return file(self.abspath(filename), 'rb')
462
476
 
 
477
    @needs_read_lock
463
478
    def annotate_iter(self, file_id):
464
479
        """See Tree.annotate_iter
465
480
 
470
485
        incorrectly attributed to CURRENT_REVISION (but after committing, the
471
486
        attribution will be correct).
472
487
        """
 
488
        file_id = osutils.safe_file_id(file_id)
473
489
        basis = self.basis_tree()
474
 
        changes = self._iter_changes(basis, True, [file_id]).next()
475
 
        changed_content, kind = changes[2], changes[6]
476
 
        if not changed_content:
477
 
            return basis.annotate_iter(file_id)
478
 
        if kind[1] is None:
479
 
            return None
480
 
        import annotate
481
 
        if kind[0] != 'file':
482
 
            old_lines = []
483
 
        else:
484
 
            old_lines = list(basis.annotate_iter(file_id))
485
 
        old = [old_lines]
486
 
        for tree in self.branch.repository.revision_trees(
487
 
            self.get_parent_ids()[1:]):
488
 
            if file_id not in tree:
489
 
                continue
490
 
            old.append(list(tree.annotate_iter(file_id)))
491
 
        return annotate.reannotate(old, self.get_file(file_id).readlines(),
492
 
                                   CURRENT_REVISION)
 
490
        basis.lock_read()
 
491
        try:
 
492
            changes = self._iter_changes(basis, True, [self.id2path(file_id)],
 
493
                require_versioned=True).next()
 
494
            changed_content, kind = changes[2], changes[6]
 
495
            if not changed_content:
 
496
                return basis.annotate_iter(file_id)
 
497
            if kind[1] is None:
 
498
                return None
 
499
            import annotate
 
500
            if kind[0] != 'file':
 
501
                old_lines = []
 
502
            else:
 
503
                old_lines = list(basis.annotate_iter(file_id))
 
504
            old = [old_lines]
 
505
            for tree in self.branch.repository.revision_trees(
 
506
                self.get_parent_ids()[1:]):
 
507
                if file_id not in tree:
 
508
                    continue
 
509
                old.append(list(tree.annotate_iter(file_id)))
 
510
            return annotate.reannotate(old, self.get_file(file_id).readlines(),
 
511
                                       CURRENT_REVISION)
 
512
        finally:
 
513
            basis.unlock()
493
514
 
494
515
    def get_parent_ids(self):
495
516
        """See Tree.get_parent_ids.
503
524
        else:
504
525
            parents = [last_rev]
505
526
        try:
506
 
            merges_file = self._control_files.get_utf8('pending-merges')
 
527
            merges_file = self._control_files.get('pending-merges')
507
528
        except errors.NoSuchFile:
508
529
            pass
509
530
        else:
510
531
            for l in merges_file.readlines():
511
 
                parents.append(l.rstrip('\n'))
 
532
                revision_id = osutils.safe_revision_id(l.rstrip('\n'))
 
533
                parents.append(revision_id)
512
534
        return parents
513
535
 
514
536
    @needs_read_lock
518
540
        
519
541
    def _get_store_filename(self, file_id):
520
542
        ## XXX: badly named; this is not in the store at all
 
543
        file_id = osutils.safe_file_id(file_id)
521
544
        return self.abspath(self.id2path(file_id))
522
545
 
523
546
    @needs_read_lock
556
579
            tree.set_parent_ids([revision_id])
557
580
 
558
581
    def id2abspath(self, file_id):
 
582
        file_id = osutils.safe_file_id(file_id)
559
583
        return self.abspath(self.id2path(file_id))
560
584
 
561
585
    def has_id(self, file_id):
562
586
        # files that have been deleted are excluded
563
 
        inv = self._inventory
 
587
        file_id = osutils.safe_file_id(file_id)
 
588
        inv = self.inventory
564
589
        if not inv.has_id(file_id):
565
590
            return False
566
591
        path = inv.id2path(file_id)
567
592
        return osutils.lexists(self.abspath(path))
568
593
 
569
594
    def has_or_had_id(self, file_id):
 
595
        file_id = osutils.safe_file_id(file_id)
570
596
        if file_id == self.inventory.root.file_id:
571
597
            return True
572
598
        return self.inventory.has_id(file_id)
574
600
    __contains__ = has_id
575
601
 
576
602
    def get_file_size(self, file_id):
 
603
        file_id = osutils.safe_file_id(file_id)
577
604
        return os.path.getsize(self.id2abspath(file_id))
578
605
 
579
606
    @needs_read_lock
580
607
    def get_file_sha1(self, file_id, path=None, stat_value=None):
 
608
        file_id = osutils.safe_file_id(file_id)
581
609
        if not path:
582
610
            path = self._inventory.id2path(file_id)
583
611
        return self._hashcache.get_sha1(path, stat_value)
584
612
 
585
613
    def get_file_mtime(self, file_id, path=None):
 
614
        file_id = osutils.safe_file_id(file_id)
586
615
        if not path:
587
 
            path = self._inventory.id2path(file_id)
 
616
            path = self.inventory.id2path(file_id)
588
617
        return os.lstat(self.abspath(path)).st_mtime
589
618
 
590
619
    if not supports_executable():
591
620
        def is_executable(self, file_id, path=None):
 
621
            file_id = osutils.safe_file_id(file_id)
592
622
            return self._inventory[file_id].executable
593
623
    else:
594
624
        def is_executable(self, file_id, path=None):
595
625
            if not path:
596
 
                path = self._inventory.id2path(file_id)
 
626
                file_id = osutils.safe_file_id(file_id)
 
627
                path = self.id2path(file_id)
597
628
            mode = os.lstat(self.abspath(path)).st_mode
598
629
            return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
599
630
 
600
 
    @needs_write_lock
 
631
    @needs_tree_write_lock
601
632
    def _add(self, files, ids, kinds):
602
633
        """See MutableTree._add."""
603
634
        # TODO: Re-adding a file that is removed in the working copy
610
641
            if file_id is None:
611
642
                inv.add_path(f, kind=kind)
612
643
            else:
 
644
                file_id = osutils.safe_file_id(file_id)
613
645
                inv.add_path(f, kind=kind, file_id=file_id)
614
646
        self._write_inventory(inv)
615
647
 
704
736
 
705
737
    def _set_merges_from_parent_ids(self, parent_ids):
706
738
        merges = parent_ids[1:]
707
 
        self._control_files.put_utf8('pending-merges', '\n'.join(merges))
 
739
        self._control_files.put_bytes('pending-merges', '\n'.join(merges))
708
740
 
709
741
    @needs_tree_write_lock
710
742
    def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
719
751
        :param revision_ids: The revision_ids to set as the parent ids of this
720
752
            working tree. Any of these may be ghosts.
721
753
        """
 
754
        revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
722
755
        self._check_parents_for_ghosts(revision_ids,
723
756
            allow_leftmost_as_ghost=allow_leftmost_as_ghost)
724
757
 
732
765
    @needs_tree_write_lock
733
766
    def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
734
767
        """See MutableTree.set_parent_trees."""
735
 
        parent_ids = [rev for (rev, tree) in parents_list]
 
768
        parent_ids = [osutils.safe_revision_id(rev) for (rev, tree) in parents_list]
736
769
 
737
770
        self._check_parents_for_ghosts(parent_ids,
738
771
            allow_leftmost_as_ghost=allow_leftmost_as_ghost)
766
799
    def set_merge_modified(self, modified_hashes):
767
800
        def iter_stanzas():
768
801
            for file_id, hash in modified_hashes.iteritems():
769
 
                yield Stanza(file_id=file_id, hash=hash)
 
802
                yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
770
803
        self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
771
804
 
772
 
    @needs_tree_write_lock
773
805
    def _put_rio(self, filename, stanzas, header):
 
806
        self._must_be_locked()
774
807
        my_file = rio_file(stanzas, header)
775
808
        self._control_files.put(filename, my_file)
776
809
 
795
828
            merger.check_basis(check_clean=True, require_commits=False)
796
829
            if to_revision is None:
797
830
                to_revision = branch.last_revision()
 
831
            else:
 
832
                to_revision = osutils.safe_revision_id(to_revision)
798
833
            merger.other_rev_id = to_revision
799
834
            if merger.other_rev_id is None:
800
835
                raise error.NoCommits(branch)
802
837
            merger.other_basis = merger.other_rev_id
803
838
            merger.other_tree = self.branch.repository.revision_tree(
804
839
                merger.other_rev_id)
 
840
            merger.other_branch = branch
805
841
            merger.pp.next_phase()
806
842
            merger.find_base()
807
843
            if merger.base_rev_id == merger.other_rev_id:
819
855
 
820
856
    @needs_read_lock
821
857
    def merge_modified(self):
 
858
        """Return a dictionary of files modified by a merge.
 
859
 
 
860
        The list is initialized by WorkingTree.set_merge_modified, which is 
 
861
        typically called after we make some automatic updates to the tree
 
862
        because of a merge.
 
863
 
 
864
        This returns a map of file_id->sha1, containing only files which are
 
865
        still in the working inventory and have that text hash.
 
866
        """
822
867
        try:
823
868
            hashfile = self._control_files.get('merge-hashes')
824
869
        except errors.NoSuchFile:
830
875
        except StopIteration:
831
876
            raise errors.MergeModifiedFormatError()
832
877
        for s in RioReader(hashfile):
833
 
            file_id = s.get("file_id")
 
878
            # RioReader reads in Unicode, so convert file_ids back to utf8
 
879
            file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
834
880
            if file_id not in self.inventory:
835
881
                continue
836
 
            hash = s.get("hash")
837
 
            if hash == self.get_file_sha1(file_id):
838
 
                merge_hashes[file_id] = hash
 
882
            text_hash = s.get("hash")
 
883
            if text_hash == self.get_file_sha1(file_id):
 
884
                merge_hashes[file_id] = text_hash
839
885
        return merge_hashes
840
886
 
841
887
    @needs_write_lock
848
894
        return file_id
849
895
 
850
896
    def get_symlink_target(self, file_id):
 
897
        file_id = osutils.safe_file_id(file_id)
851
898
        return os.readlink(self.id2abspath(file_id))
852
899
 
853
 
    def file_class(self, filename):
854
 
        if self.path2id(filename):
855
 
            return 'V'
856
 
        elif self.is_ignored(filename):
857
 
            return 'I'
858
 
        else:
859
 
            return '?'
 
900
    @needs_write_lock
 
901
    def subsume(self, other_tree):
 
902
        def add_children(inventory, entry):
 
903
            for child_entry in entry.children.values():
 
904
                inventory._byid[child_entry.file_id] = child_entry
 
905
                if child_entry.kind == 'directory':
 
906
                    add_children(inventory, child_entry)
 
907
        if other_tree.get_root_id() == self.get_root_id():
 
908
            raise errors.BadSubsumeSource(self, other_tree,
 
909
                                          'Trees have the same root')
 
910
        try:
 
911
            other_tree_path = self.relpath(other_tree.basedir)
 
912
        except errors.PathNotChild:
 
913
            raise errors.BadSubsumeSource(self, other_tree,
 
914
                'Tree is not contained by the other')
 
915
        new_root_parent = self.path2id(osutils.dirname(other_tree_path))
 
916
        if new_root_parent is None:
 
917
            raise errors.BadSubsumeSource(self, other_tree,
 
918
                'Parent directory is not versioned.')
 
919
        # We need to ensure that the result of a fetch will have a
 
920
        # versionedfile for the other_tree root, and only fetching into
 
921
        # RepositoryKnit2 guarantees that.
 
922
        if not self.branch.repository.supports_rich_root():
 
923
            raise errors.SubsumeTargetNeedsUpgrade(other_tree)
 
924
        other_tree.lock_tree_write()
 
925
        try:
 
926
            new_parents = other_tree.get_parent_ids()
 
927
            other_root = other_tree.inventory.root
 
928
            other_root.parent_id = new_root_parent
 
929
            other_root.name = osutils.basename(other_tree_path)
 
930
            self.inventory.add(other_root)
 
931
            add_children(self.inventory, other_root)
 
932
            self._write_inventory(self.inventory)
 
933
            # normally we don't want to fetch whole repositories, but i think
 
934
            # here we really do want to consolidate the whole thing.
 
935
            for parent_id in other_tree.get_parent_ids():
 
936
                self.branch.fetch(other_tree.branch, parent_id)
 
937
                self.add_parent_tree_id(parent_id)
 
938
        finally:
 
939
            other_tree.unlock()
 
940
        other_tree.bzrdir.retire_bzrdir()
 
941
 
 
942
    @needs_tree_write_lock
 
943
    def extract(self, file_id, format=None):
 
944
        """Extract a subtree from this tree.
 
945
        
 
946
        A new branch will be created, relative to the path for this tree.
 
947
        """
 
948
        def mkdirs(path):
 
949
            segments = osutils.splitpath(path)
 
950
            transport = self.branch.bzrdir.root_transport
 
951
            for name in segments:
 
952
                transport = transport.clone(name)
 
953
                try:
 
954
                    transport.mkdir('.')
 
955
                except errors.FileExists:
 
956
                    pass
 
957
            return transport
 
958
            
 
959
        sub_path = self.id2path(file_id)
 
960
        branch_transport = mkdirs(sub_path)
 
961
        if format is None:
 
962
            format = bzrdir.format_registry.make_bzrdir('dirstate-with-subtree')
 
963
        try:
 
964
            branch_transport.mkdir('.')
 
965
        except errors.FileExists:
 
966
            pass
 
967
        branch_bzrdir = format.initialize_on_transport(branch_transport)
 
968
        try:
 
969
            repo = branch_bzrdir.find_repository()
 
970
        except errors.NoRepositoryPresent:
 
971
            repo = branch_bzrdir.create_repository()
 
972
            assert repo.supports_rich_root()
 
973
        else:
 
974
            if not repo.supports_rich_root():
 
975
                raise errors.RootNotRich()
 
976
        new_branch = branch_bzrdir.create_branch()
 
977
        new_branch.pull(self.branch)
 
978
        for parent_id in self.get_parent_ids():
 
979
            new_branch.fetch(self.branch, parent_id)
 
980
        tree_transport = self.bzrdir.root_transport.clone(sub_path)
 
981
        if tree_transport.base != branch_transport.base:
 
982
            tree_bzrdir = format.initialize_on_transport(tree_transport)
 
983
            branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
 
984
        else:
 
985
            tree_bzrdir = branch_bzrdir
 
986
        wt = tree_bzrdir.create_workingtree(NULL_REVISION)
 
987
        wt.set_parent_ids(self.get_parent_ids())
 
988
        my_inv = self.inventory
 
989
        child_inv = Inventory(root_id=None)
 
990
        new_root = my_inv[file_id]
 
991
        my_inv.remove_recursive_id(file_id)
 
992
        new_root.parent_id = None
 
993
        child_inv.add(new_root)
 
994
        self._write_inventory(my_inv)
 
995
        wt._write_inventory(child_inv)
 
996
        return wt
 
997
 
 
998
    def _serialize(self, inventory, out_file):
 
999
        xml5.serializer_v5.write_inventory(self._inventory, out_file)
 
1000
 
 
1001
    def _deserialize(selt, in_file):
 
1002
        return xml5.serializer_v5.read_inventory(in_file)
860
1003
 
861
1004
    def flush(self):
862
1005
        """Write the in memory inventory to disk."""
864
1007
        if self._control_files._lock_mode != 'w':
865
1008
            raise errors.NotWriteLocked(self)
866
1009
        sio = StringIO()
867
 
        xml5.serializer_v5.write_inventory(self._inventory, sio)
 
1010
        self._serialize(self._inventory, sio)
868
1011
        sio.seek(0)
869
1012
        self._control_files.put('inventory', sio)
870
1013
        self._inventory_is_modified = False
879
1022
 
880
1023
        Skips the control directory.
881
1024
        """
882
 
        inv = self._inventory
 
1025
        # list_files is an iterator, so @needs_read_lock doesn't work properly
 
1026
        # with it. So callers should be careful to always read_lock the tree.
 
1027
        if not self.is_locked():
 
1028
            raise errors.ObjectNotLocked(self)
 
1029
 
 
1030
        inv = self.inventory
883
1031
        if include_root is True:
884
1032
            yield ('', 'V', 'directory', inv.root.file_id, inv.root)
885
1033
        # Convert these into local objects to save lookup times
1267
1415
        These are files in the working directory that are not versioned or
1268
1416
        control files or ignored.
1269
1417
        """
1270
 
        for subp in self.extras():
1271
 
            if not self.is_ignored(subp):
1272
 
                yield subp
 
1418
        # force the extras method to be fully executed before returning, to 
 
1419
        # prevent race conditions with the lock
 
1420
        return iter(
 
1421
            [subp for subp in self.extras() if not self.is_ignored(subp)])
1273
1422
    
1274
1423
    @needs_tree_write_lock
1275
1424
    def unversion(self, file_ids):
1282
1431
        :raises: NoSuchId if any fileid is not currently versioned.
1283
1432
        """
1284
1433
        for file_id in file_ids:
 
1434
            file_id = osutils.safe_file_id(file_id)
1285
1435
            if self._inventory.has_id(file_id):
1286
1436
                self._inventory.remove_recursive_id(file_id)
1287
1437
            else:
1330
1480
                pp.next_phase()
1331
1481
                repository = self.branch.repository
1332
1482
                pb = bzrlib.ui.ui_factory.nested_progress_bar()
 
1483
                basis_tree.lock_read()
1333
1484
                try:
1334
1485
                    new_basis_tree = self.branch.basis_tree()
1335
1486
                    merge.merge_inner(
1344
1495
                        self.set_root_id(new_basis_tree.inventory.root.file_id)
1345
1496
                finally:
1346
1497
                    pb.finished()
 
1498
                    basis_tree.unlock()
1347
1499
                # TODO - dedup parents list with things merged by pull ?
1348
1500
                # reuse the revisiontree we merged against to set the new
1349
1501
                # tree data.
1351
1503
                # we have to pull the merge trees out again, because 
1352
1504
                # merge_inner has set the ids. - this corner is not yet 
1353
1505
                # layered well enough to prevent double handling.
 
1506
                # XXX TODO: Fix the double handling: telling the tree about
 
1507
                # the already known parent data is wasteful.
1354
1508
                merges = self.get_parent_ids()[1:]
1355
1509
                parent_trees.extend([
1356
1510
                    (parent, repository.revision_tree(parent)) for
1364
1518
    @needs_write_lock
1365
1519
    def put_file_bytes_non_atomic(self, file_id, bytes):
1366
1520
        """See MutableTree.put_file_bytes_non_atomic."""
 
1521
        file_id = osutils.safe_file_id(file_id)
1367
1522
        stream = file(self.id2abspath(file_id), 'wb')
1368
1523
        try:
1369
1524
            stream.write(bytes)
1372
1527
        # TODO: update the hashcache here ?
1373
1528
 
1374
1529
    def extras(self):
1375
 
        """Yield all unknown files in this WorkingTree.
 
1530
        """Yield all unversioned files in this WorkingTree.
1376
1531
 
1377
 
        If there are any unknown directories then only the directory is
1378
 
        returned, not all its children.  But if there are unknown files
 
1532
        If there are any unversioned directories then only the directory is
 
1533
        returned, not all its children.  But if there are unversioned files
1379
1534
        under a versioned subdirectory, they are returned.
1380
1535
 
1381
1536
        Currently returned depth-first, sorted by name within directories.
 
1537
        This is the same order used by 'osutils.walkdirs'.
1382
1538
        """
1383
1539
        ## TODO: Work from given directory downwards
1384
1540
        for path, dir_entry in self.inventory.directories():
1405
1561
                subp = pathjoin(path, subf)
1406
1562
                yield subp
1407
1563
 
1408
 
 
1409
1564
    def ignored_files(self):
1410
1565
        """Yield list of PATH, IGNORE_PATTERN"""
1411
1566
        for subp in self.extras():
1496
1651
    def is_locked(self):
1497
1652
        return self._control_files.is_locked()
1498
1653
 
 
1654
    def _must_be_locked(self):
 
1655
        if not self.is_locked():
 
1656
            raise errors.ObjectNotLocked(self)
 
1657
 
1499
1658
    def lock_read(self):
1500
1659
        """See Branch.lock_read, and WorkingTree.unlock."""
 
1660
        if not self.is_locked():
 
1661
            self._reset_data()
1501
1662
        self.branch.lock_read()
1502
1663
        try:
1503
1664
            return self._control_files.lock_read()
1507
1668
 
1508
1669
    def lock_tree_write(self):
1509
1670
        """See MutableTree.lock_tree_write, and WorkingTree.unlock."""
 
1671
        if not self.is_locked():
 
1672
            self._reset_data()
1510
1673
        self.branch.lock_read()
1511
1674
        try:
1512
1675
            return self._control_files.lock_write()
1516
1679
 
1517
1680
    def lock_write(self):
1518
1681
        """See MutableTree.lock_write, and WorkingTree.unlock."""
 
1682
        if not self.is_locked():
 
1683
            self._reset_data()
1519
1684
        self.branch.lock_write()
1520
1685
        try:
1521
1686
            return self._control_files.lock_write()
1529
1694
    def _basis_inventory_name(self):
1530
1695
        return 'basis-inventory-cache'
1531
1696
 
 
1697
    def _reset_data(self):
 
1698
        """Reset transient data that cannot be revalidated."""
 
1699
        self._inventory_is_modified = False
 
1700
        result = self._deserialize(self._control_files.get('inventory'))
 
1701
        self._set_inventory(result, dirty=False)
 
1702
 
1532
1703
    @needs_tree_write_lock
1533
1704
    def set_last_revision(self, new_revision):
1534
1705
        """Change the last revision in the working tree."""
 
1706
        new_revision = osutils.safe_revision_id(new_revision)
1535
1707
        if self._change_last_revision(new_revision):
1536
1708
            self._cache_basis_inventory(new_revision)
1537
1709
 
1560
1732
 
1561
1733
    def _create_basis_xml_from_inventory(self, revision_id, inventory):
1562
1734
        """Create the text that will be saved in basis-inventory"""
1563
 
        inventory.revision_id = revision_id
1564
 
        return xml6.serializer_v6.write_inventory_to_string(inventory)
 
1735
        # TODO: jam 20070209 This should be redundant, as the revision_id
 
1736
        #       as all callers should have already converted the revision_id to
 
1737
        #       utf8
 
1738
        inventory.revision_id = osutils.safe_revision_id(revision_id)
 
1739
        return xml7.serializer_v7.write_inventory_to_string(inventory)
1565
1740
 
1566
1741
    def _cache_basis_inventory(self, new_revision):
1567
1742
        """Cache new_revision as the basis inventory."""
1582
1757
            xml = self.branch.repository.get_inventory_xml(new_revision)
1583
1758
            firstline = xml.split('\n', 1)[0]
1584
1759
            if (not 'revision_id="' in firstline or 
1585
 
                'format="6"' not in firstline):
 
1760
                'format="7"' not in firstline):
1586
1761
                inv = self.branch.repository.deserialise_inventory(
1587
1762
                    new_revision, xml)
1588
1763
                xml = self._create_basis_xml_from_inventory(new_revision, inv)
1608
1783
        # binary.
1609
1784
        if self._inventory_is_modified:
1610
1785
            raise errors.InventoryModified(self)
1611
 
        result = xml5.serializer_v5.read_inventory(
1612
 
            self._control_files.get('inventory'))
 
1786
        result = self._deserialize(self._control_files.get('inventory'))
1613
1787
        self._set_inventory(result, dirty=False)
1614
1788
        return result
1615
1789
 
1668
1842
            resolve(self, filenames, ignore_misses=True)
1669
1843
        return conflicts
1670
1844
 
 
1845
    def revision_tree(self, revision_id):
 
1846
        """See Tree.revision_tree.
 
1847
 
 
1848
        WorkingTree can supply revision_trees for the basis revision only
 
1849
        because there is only one cached inventory in the bzr directory.
 
1850
        """
 
1851
        if revision_id == self.last_revision():
 
1852
            try:
 
1853
                xml = self.read_basis_inventory()
 
1854
            except errors.NoSuchFile:
 
1855
                pass
 
1856
            else:
 
1857
                try:
 
1858
                    inv = xml7.serializer_v7.read_inventory_from_string(xml)
 
1859
                    # dont use the repository revision_tree api because we want
 
1860
                    # to supply the inventory.
 
1861
                    if inv.revision_id == revision_id:
 
1862
                        return revisiontree.RevisionTree(self.branch.repository,
 
1863
                            inv, revision_id)
 
1864
                except errors.BadInventoryFormat:
 
1865
                    pass
 
1866
        # raise if there was no inventory, or if we read the wrong inventory.
 
1867
        raise errors.NoSuchRevisionInTree(self, revision_id)
 
1868
 
1671
1869
    # XXX: This method should be deprecated in favour of taking in a proper
1672
1870
    # new Inventory object.
1673
1871
    @needs_tree_write_lock
1703
1901
                DeprecationWarning,
1704
1902
                stacklevel=3)
1705
1903
            file_id = ROOT_ID
 
1904
        else:
 
1905
            file_id = osutils.safe_file_id(file_id)
 
1906
        self._set_root_id(file_id)
 
1907
 
 
1908
    def _set_root_id(self, file_id):
 
1909
        """Set the root id for this tree, in a format specific manner.
 
1910
 
 
1911
        :param file_id: The file id to assign to the root. It must not be 
 
1912
            present in the current inventory or an error will occur. It must
 
1913
            not be None, but rather a valid file id.
 
1914
        """
1706
1915
        inv = self._inventory
1707
1916
        orig_root_id = inv.root.file_id
1708
1917
        # TODO: it might be nice to exit early if there was nothing
1798
2007
        if last_rev != self.branch.last_revision():
1799
2008
            # merge tree state up to new branch tip.
1800
2009
            basis = self.basis_tree()
1801
 
            to_tree = self.branch.basis_tree()
1802
 
            if basis.inventory.root is None:
1803
 
                self.set_root_id(to_tree.inventory.root.file_id)
1804
 
            result += merge.merge_inner(
1805
 
                                  self.branch,
1806
 
                                  to_tree,
1807
 
                                  basis,
1808
 
                                  this_tree=self)
 
2010
            basis.lock_read()
 
2011
            try:
 
2012
                to_tree = self.branch.basis_tree()
 
2013
                if basis.inventory.root is None:
 
2014
                    self.set_root_id(to_tree.inventory.root.file_id)
 
2015
                    self.flush()
 
2016
                result += merge.merge_inner(
 
2017
                                      self.branch,
 
2018
                                      to_tree,
 
2019
                                      basis,
 
2020
                                      this_tree=self)
 
2021
            finally:
 
2022
                basis.unlock()
1809
2023
            # TODO - dedup parents list with things merged by pull ?
1810
2024
            # reuse the tree we've updated to to set the basis:
1811
2025
            parent_trees = [(self.branch.last_revision(), to_tree)]
1834
2048
            # and we have converted that last revision to a pending merge.
1835
2049
            # base is somewhere between the branch tip now
1836
2050
            # and the now pending merge
 
2051
 
 
2052
            # Since we just modified the working tree and inventory, flush out
 
2053
            # the current state, before we modify it again.
 
2054
            # TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
 
2055
            #       requires it only because TreeTransform directly munges the
 
2056
            #       inventory and calls tree._write_inventory(). Ultimately we
 
2057
            #       should be able to remove this extra flush.
 
2058
            self.flush()
1837
2059
            from bzrlib.revision import common_ancestor
1838
2060
            try:
1839
2061
                base_rev_id = common_ancestor(self.branch.last_revision(),
1902
2124
                             file_id=self.path2id(conflicted)))
1903
2125
        return conflicts
1904
2126
 
 
2127
    def walkdirs(self, prefix=""):
 
2128
        """Walk the directories of this tree.
 
2129
 
 
2130
        This API returns a generator, which is only valid during the current
 
2131
        tree transaction - within a single lock_read or lock_write duration.
 
2132
 
 
2133
        If the tree is not locked, it may cause an error to be raised, depending
 
2134
        on the tree implementation.
 
2135
        """
 
2136
        disk_top = self.abspath(prefix)
 
2137
        if disk_top.endswith('/'):
 
2138
            disk_top = disk_top[:-1]
 
2139
        top_strip_len = len(disk_top) + 1
 
2140
        inventory_iterator = self._walkdirs(prefix)
 
2141
        disk_iterator = osutils.walkdirs(disk_top, prefix)
 
2142
        try:
 
2143
            current_disk = disk_iterator.next()
 
2144
            disk_finished = False
 
2145
        except OSError, e:
 
2146
            if e.errno != errno.ENOENT:
 
2147
                raise
 
2148
            current_disk = None
 
2149
            disk_finished = True
 
2150
        try:
 
2151
            current_inv = inventory_iterator.next()
 
2152
            inv_finished = False
 
2153
        except StopIteration:
 
2154
            current_inv = None
 
2155
            inv_finished = True
 
2156
        while not inv_finished or not disk_finished:
 
2157
            if not disk_finished:
 
2158
                # strip out .bzr dirs
 
2159
                if current_disk[0][1][top_strip_len:] == '':
 
2160
                    # osutils.walkdirs can be made nicer - 
 
2161
                    # yield the path-from-prefix rather than the pathjoined
 
2162
                    # value.
 
2163
                    bzrdir_loc = bisect_left(current_disk[1], ('.bzr', '.bzr'))
 
2164
                    if current_disk[1][bzrdir_loc][0] == '.bzr':
 
2165
                        # we dont yield the contents of, or, .bzr itself.
 
2166
                        del current_disk[1][bzrdir_loc]
 
2167
            if inv_finished:
 
2168
                # everything is unknown
 
2169
                direction = 1
 
2170
            elif disk_finished:
 
2171
                # everything is missing
 
2172
                direction = -1
 
2173
            else:
 
2174
                direction = cmp(current_inv[0][0], current_disk[0][0])
 
2175
            if direction > 0:
 
2176
                # disk is before inventory - unknown
 
2177
                dirblock = [(relpath, basename, kind, stat, None, None) for
 
2178
                    relpath, basename, kind, stat, top_path in current_disk[1]]
 
2179
                yield (current_disk[0][0], None), dirblock
 
2180
                try:
 
2181
                    current_disk = disk_iterator.next()
 
2182
                except StopIteration:
 
2183
                    disk_finished = True
 
2184
            elif direction < 0:
 
2185
                # inventory is before disk - missing.
 
2186
                dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
 
2187
                    for relpath, basename, dkind, stat, fileid, kind in 
 
2188
                    current_inv[1]]
 
2189
                yield (current_inv[0][0], current_inv[0][1]), dirblock
 
2190
                try:
 
2191
                    current_inv = inventory_iterator.next()
 
2192
                except StopIteration:
 
2193
                    inv_finished = True
 
2194
            else:
 
2195
                # versioned present directory
 
2196
                # merge the inventory and disk data together
 
2197
                dirblock = []
 
2198
                for relpath, subiterator in itertools.groupby(sorted(
 
2199
                    current_inv[1] + current_disk[1], key=operator.itemgetter(0)), operator.itemgetter(1)):
 
2200
                    path_elements = list(subiterator)
 
2201
                    if len(path_elements) == 2:
 
2202
                        inv_row, disk_row = path_elements
 
2203
                        # versioned, present file
 
2204
                        dirblock.append((inv_row[0],
 
2205
                            inv_row[1], disk_row[2],
 
2206
                            disk_row[3], inv_row[4],
 
2207
                            inv_row[5]))
 
2208
                    elif len(path_elements[0]) == 5:
 
2209
                        # unknown disk file
 
2210
                        dirblock.append((path_elements[0][0],
 
2211
                            path_elements[0][1], path_elements[0][2],
 
2212
                            path_elements[0][3], None, None))
 
2213
                    elif len(path_elements[0]) == 6:
 
2214
                        # versioned, absent file.
 
2215
                        dirblock.append((path_elements[0][0],
 
2216
                            path_elements[0][1], 'unknown', None,
 
2217
                            path_elements[0][4], path_elements[0][5]))
 
2218
                    else:
 
2219
                        raise NotImplementedError('unreachable code')
 
2220
                yield current_inv[0], dirblock
 
2221
                try:
 
2222
                    current_inv = inventory_iterator.next()
 
2223
                except StopIteration:
 
2224
                    inv_finished = True
 
2225
                try:
 
2226
                    current_disk = disk_iterator.next()
 
2227
                except StopIteration:
 
2228
                    disk_finished = True
 
2229
 
 
2230
    def _walkdirs(self, prefix=""):
 
2231
        _directory = 'directory'
 
2232
        # get the root in the inventory
 
2233
        inv = self.inventory
 
2234
        top_id = inv.path2id(prefix)
 
2235
        if top_id is None:
 
2236
            pending = []
 
2237
        else:
 
2238
            pending = [(prefix, '', _directory, None, top_id, None)]
 
2239
        while pending:
 
2240
            dirblock = []
 
2241
            currentdir = pending.pop()
 
2242
            # 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
 
2243
            top_id = currentdir[4]
 
2244
            if currentdir[0]:
 
2245
                relroot = currentdir[0] + '/'
 
2246
            else:
 
2247
                relroot = ""
 
2248
            # FIXME: stash the node in pending
 
2249
            entry = inv[top_id]
 
2250
            for name, child in entry.sorted_children():
 
2251
                dirblock.append((relroot + name, name, child.kind, None,
 
2252
                    child.file_id, child.kind
 
2253
                    ))
 
2254
            yield (currentdir[0], entry.file_id), dirblock
 
2255
            # push the user specified dirs from dirblock
 
2256
            for dir in reversed(dirblock):
 
2257
                if dir[2] == _directory:
 
2258
                    pending.append(dir)
 
2259
 
 
2260
    @needs_tree_write_lock
 
2261
    def auto_resolve(self):
 
2262
        """Automatically resolve text conflicts according to contents.
 
2263
 
 
2264
        Only text conflicts are auto_resolvable. Files with no conflict markers
 
2265
        are considered 'resolved', because bzr always puts conflict markers
 
2266
        into files that have text conflicts.  The corresponding .THIS .BASE and
 
2267
        .OTHER files are deleted, as per 'resolve'.
 
2268
        :return: a tuple of ConflictLists: (un_resolved, resolved).
 
2269
        """
 
2270
        un_resolved = _mod_conflicts.ConflictList()
 
2271
        resolved = _mod_conflicts.ConflictList()
 
2272
        conflict_re = re.compile('^(<{7}|={7}|>{7})')
 
2273
        for conflict in self.conflicts():
 
2274
            if (conflict.typestring != 'text conflict' or
 
2275
                self.kind(conflict.file_id) != 'file'):
 
2276
                un_resolved.append(conflict)
 
2277
                continue
 
2278
            my_file = open(self.id2abspath(conflict.file_id), 'rb')
 
2279
            try:
 
2280
                for line in my_file:
 
2281
                    if conflict_re.search(line):
 
2282
                        un_resolved.append(conflict)
 
2283
                        break
 
2284
                else:
 
2285
                    resolved.append(conflict)
 
2286
            finally:
 
2287
                my_file.close()
 
2288
        resolved.remove_files(self)
 
2289
        self.set_conflicts(un_resolved)
 
2290
        return un_resolved, resolved
 
2291
 
1905
2292
 
1906
2293
class WorkingTree2(WorkingTree):
1907
2294
    """This is the Format 2 working tree.
1953
2340
    def _last_revision(self):
1954
2341
        """See Mutable.last_revision."""
1955
2342
        try:
1956
 
            return self._control_files.get_utf8('last-revision').read()
 
2343
            return osutils.safe_revision_id(
 
2344
                        self._control_files.get('last-revision').read())
1957
2345
        except errors.NoSuchFile:
1958
2346
            return None
1959
2347
 
1966
2354
                pass
1967
2355
            return False
1968
2356
        else:
1969
 
            self._control_files.put_utf8('last-revision', revision_id)
 
2357
            self._control_files.put_bytes('last-revision', revision_id)
1970
2358
            return True
1971
2359
 
1972
2360
    @needs_tree_write_lock
2012
2400
        if path.endswith(suffix):
2013
2401
            return path[:-len(suffix)]
2014
2402
 
 
2403
 
2015
2404
@deprecated_function(zero_eight)
2016
2405
def is_control_file(filename):
2017
2406
    """See WorkingTree.is_control_filename(filename)."""
2052
2441
    _formats = {}
2053
2442
    """The known formats."""
2054
2443
 
 
2444
    requires_rich_root = False
 
2445
 
2055
2446
    @classmethod
2056
2447
    def find_format(klass, a_bzrdir):
2057
2448
        """Return the format for the working tree object in a_bzrdir."""
2064
2455
        except KeyError:
2065
2456
            raise errors.UnknownFormatError(format=format_string)
2066
2457
 
 
2458
    def __eq__(self, other):
 
2459
        return self.__class__ is other.__class__
 
2460
 
 
2461
    def __ne__(self, other):
 
2462
        return not (self == other)
 
2463
 
2067
2464
    @classmethod
2068
2465
    def get_default_format(klass):
2069
2466
        """Return the current default format."""
2124
2521
        sio.seek(0)
2125
2522
        control_files.put('inventory', sio)
2126
2523
 
2127
 
        control_files.put_utf8('pending-merges', '')
 
2524
        control_files.put_bytes('pending-merges', '')
2128
2525
        
2129
2526
 
2130
2527
    def initialize(self, a_bzrdir, revision_id=None):
2133
2530
            raise errors.NotLocalUrl(a_bzrdir.transport.base)
2134
2531
        branch = a_bzrdir.open_branch()
2135
2532
        if revision_id is not None:
 
2533
            revision_id = osutils.safe_revision_id(revision_id)
2136
2534
            branch.lock_write()
2137
2535
            try:
2138
2536
                revision_history = branch.revision_history()
2203
2601
    _lock_file_name = 'lock'
2204
2602
    _lock_class = LockDir
2205
2603
 
 
2604
    _tree_class = WorkingTree3
 
2605
 
 
2606
    def __get_matchingbzrdir(self):
 
2607
        return bzrdir.BzrDirMetaFormat1()
 
2608
 
 
2609
    _matchingbzrdir = property(__get_matchingbzrdir)
 
2610
 
2206
2611
    def _open_control_files(self, a_bzrdir):
2207
2612
        transport = a_bzrdir.get_workingtree_transport(None)
2208
2613
        return LockableFiles(transport, self._lock_file_name, 
2224
2629
        branch = a_bzrdir.open_branch()
2225
2630
        if revision_id is None:
2226
2631
            revision_id = branch.last_revision()
 
2632
        else:
 
2633
            revision_id = osutils.safe_revision_id(revision_id)
2227
2634
        # WorkingTree3 can handle an inventory which has a unique root id.
2228
2635
        # as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2229
2636
        # those trees. And because there isn't a format bump inbetween, we
2230
2637
        # are maintaining compatibility with older clients.
2231
2638
        # inv = Inventory(root_id=gen_root_id())
2232
 
        inv = Inventory()
2233
 
        wt = WorkingTree3(a_bzrdir.root_transport.local_abspath('.'),
 
2639
        inv = self._initial_inventory()
 
2640
        wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2234
2641
                         branch,
2235
2642
                         inv,
2236
2643
                         _internal=True,
2255
2662
            wt.unlock()
2256
2663
        return wt
2257
2664
 
 
2665
    def _initial_inventory(self):
 
2666
        return Inventory()
 
2667
 
2258
2668
    def __init__(self):
2259
2669
        super(WorkingTreeFormat3, self).__init__()
2260
 
        self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2261
2670
 
2262
2671
    def open(self, a_bzrdir, _found=False):
2263
2672
        """Return the WorkingTree object for a_bzrdir
2278
2687
        :param a_bzrdir: the dir for the tree.
2279
2688
        :param control_files: the control files for the tree.
2280
2689
        """
2281
 
        return WorkingTree3(a_bzrdir.root_transport.local_abspath('.'),
2282
 
                           _internal=True,
2283
 
                           _format=self,
2284
 
                           _bzrdir=a_bzrdir,
2285
 
                           _control_files=control_files)
 
2690
        return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
 
2691
                                _internal=True,
 
2692
                                _format=self,
 
2693
                                _bzrdir=a_bzrdir,
 
2694
                                _control_files=control_files)
2286
2695
 
2287
2696
    def __str__(self):
2288
2697
        return self.get_format_string()
2289
2698
 
2290
2699
 
 
2700
__default_format = WorkingTreeFormat4()
 
2701
WorkingTreeFormat.register_format(__default_format)
 
2702
WorkingTreeFormat.register_format(WorkingTreeFormat3())
 
2703
WorkingTreeFormat.set_default_format(__default_format)
2291
2704
# formats which have no format string are not discoverable
2292
2705
# and not independently creatable, so are not registered.
2293
 
__default_format = WorkingTreeFormat3()
2294
 
WorkingTreeFormat.register_format(__default_format)
2295
 
WorkingTreeFormat.set_default_format(__default_format)
2296
2706
_legacy_formats = [WorkingTreeFormat2(),
2297
2707
                   ]
2298
2708