496
617
return os.path.getsize(self.id2abspath(file_id))
499
def get_file_sha1(self, file_id):
500
path = self._inventory.id2path(file_id)
501
return self._hashcache.get_sha1(path)
503
def is_executable(self, file_id):
504
if not supports_executable():
620
def get_file_sha1(self, file_id, path=None, stat_value=None):
622
path = self._inventory.id2path(file_id)
623
return self._hashcache.get_sha1(path, stat_value)
625
def get_file_mtime(self, file_id, path=None):
627
path = self.inventory.id2path(file_id)
628
return os.lstat(self.abspath(path)).st_mtime
630
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
631
file_id = self.path2id(path)
632
return self._inventory[file_id].executable
634
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
635
mode = stat_result.st_mode
636
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
638
if not supports_executable():
639
def is_executable(self, file_id, path=None):
505
640
return self._inventory[file_id].executable
507
path = self._inventory.id2path(file_id)
642
_is_executable_from_path_and_stat = \
643
_is_executable_from_path_and_stat_from_basis
645
def is_executable(self, file_id, path=None):
647
path = self.id2path(file_id)
508
648
mode = os.lstat(self.abspath(path)).st_mode
509
return bool(stat.S_ISREG(mode) and stat.S_IEXEC&mode)
512
def add(self, files, ids=None):
513
"""Make files versioned.
515
Note that the command line normally calls smart_add instead,
516
which can automatically recurse.
518
This adds the files to the inventory, so that they will be
519
recorded by the next commit.
522
List of paths to add, relative to the base of the tree.
525
If set, use these instead of automatically generated ids.
526
Must be the same length as the list of files, but may
527
contain None for ids that are to be autogenerated.
529
TODO: Perhaps have an option to add the ids even if the files do
532
TODO: Perhaps callback with the ids and paths as they're added.
649
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
651
_is_executable_from_path_and_stat = \
652
_is_executable_from_path_and_stat_from_stat
654
@needs_tree_write_lock
655
def _add(self, files, ids, kinds):
656
"""See MutableTree._add."""
534
657
# TODO: Re-adding a file that is removed in the working copy
535
658
# should probably put it back with the previous ID.
536
if isinstance(files, basestring):
537
assert(ids is None or isinstance(ids, basestring))
543
ids = [None] * len(files)
545
assert(len(ids) == len(files))
547
inv = self.read_working_inventory()
548
for f,file_id in zip(files, ids):
549
if self.is_control_filename(f):
550
raise BzrError("cannot add control file %s" % quotefn(f))
555
raise BzrError("cannot add top-level %r" % f)
557
fullpath = normpath(self.abspath(f))
560
kind = file_kind(fullpath)
562
if e.errno == errno.ENOENT:
563
raise NoSuchFile(fullpath)
564
# maybe something better?
565
raise BzrError('cannot add: not a regular file, symlink or directory: %s' % quotefn(f))
567
if not InventoryEntry.versionable_kind(kind):
568
raise BzrError('cannot add: not a versionable file ('
569
'i.e. regular file, symlink or directory): %s' % quotefn(f))
659
# the read and write working inventory should not occur in this
660
# function - they should be part of lock_write and unlock.
662
for f, file_id, kind in zip(files, ids, kinds):
663
assert kind is not None
571
664
if file_id is None:
572
file_id = gen_file_id(f)
573
inv.add_path(f, kind=kind, file_id=file_id)
665
inv.add_path(f, kind=kind)
667
inv.add_path(f, kind=kind, file_id=file_id)
668
self._inventory_is_modified = True
575
mutter("add file %s file_id:{%s} kind=%r" % (f, file_id, kind))
576
self._write_inventory(inv)
670
@needs_tree_write_lock
671
def _gather_kinds(self, files, kinds):
672
"""See MutableTree._gather_kinds."""
673
for pos, f in enumerate(files):
674
if kinds[pos] is None:
675
fullpath = normpath(self.abspath(f))
677
kinds[pos] = file_kind(fullpath)
679
if e.errno == errno.ENOENT:
680
raise errors.NoSuchFile(fullpath)
578
682
@needs_write_lock
683
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
684
"""Add revision_id as a parent.
686
This is equivalent to retrieving the current list of parent ids
687
and setting the list to its value plus revision_id.
689
:param revision_id: The revision id to add to the parent list. It may
690
be a ghost revision as long as its not the first parent to be added,
691
or the allow_leftmost_as_ghost parameter is set True.
692
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
694
parents = self.get_parent_ids() + [revision_id]
695
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
696
or allow_leftmost_as_ghost)
698
@needs_tree_write_lock
699
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
700
"""Add revision_id, tree tuple as a parent.
702
This is equivalent to retrieving the current list of parent trees
703
and setting the list to its value plus parent_tuple. See also
704
add_parent_tree_id - if you only have a parent id available it will be
705
simpler to use that api. If you have the parent already available, using
706
this api is preferred.
708
:param parent_tuple: The (revision id, tree) to add to the parent list.
709
If the revision_id is a ghost, pass None for the tree.
710
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
712
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
713
if len(parent_ids) > 1:
714
# the leftmost may have already been a ghost, preserve that if it
716
allow_leftmost_as_ghost = True
717
self.set_parent_ids(parent_ids,
718
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
720
@needs_tree_write_lock
579
721
def add_pending_merge(self, *revision_ids):
580
722
# TODO: Perhaps should check at this point that the
581
723
# history of the revision is actually present?
582
p = self.pending_merges()
724
parents = self.get_parent_ids()
584
726
for rev_id in revision_ids:
727
if rev_id in parents:
729
parents.append(rev_id)
590
self.set_pending_merges(p)
732
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
734
def path_content_summary(self, path, _lstat=os.lstat,
735
_mapper=osutils.file_kind_from_stat_mode):
736
"""See Tree.path_content_summary."""
737
abspath = self.abspath(path)
739
stat_result = _lstat(abspath)
741
if getattr(e, 'errno', None) == errno.ENOENT:
743
return ('missing', None, None, None)
744
# propagate other errors
746
kind = _mapper(stat_result.st_mode)
748
size = stat_result.st_size
749
# try for a stat cache lookup
750
executable = self._is_executable_from_path_and_stat(path, stat_result)
751
return (kind, size, executable, self._sha_from_stat(
753
elif kind == 'directory':
754
# perhaps it looks like a plain directory, but it's really a
756
if self._directory_is_tree_reference(path):
757
kind = 'tree-reference'
758
return kind, None, None, None
759
elif kind == 'symlink':
760
return ('symlink', None, None, os.readlink(abspath))
762
return (kind, None, None, None)
764
@deprecated_method(zero_eleven)
593
766
def pending_merges(self):
594
767
"""Return a list of pending merges.
596
769
These are revisions that have been merged into the working
597
770
directory but not yet committed.
600
merges_file = self._control_files.get_utf8('pending-merges')
602
if e.errno != errno.ENOENT:
606
for l in merges_file.readlines():
607
p.append(l.rstrip('\n'))
772
As of 0.11 this is deprecated. Please see WorkingTree.get_parent_ids()
773
instead - which is available on all tree objects.
775
return self.get_parent_ids()[1:]
777
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
778
"""Common ghost checking functionality from set_parent_*.
780
This checks that the left hand-parent exists if there are any
783
if len(revision_ids) > 0:
784
leftmost_id = revision_ids[0]
785
if (not allow_leftmost_as_ghost and not
786
self.branch.repository.has_revision(leftmost_id)):
787
raise errors.GhostRevisionUnusableHere(leftmost_id)
789
def _set_merges_from_parent_ids(self, parent_ids):
790
merges = parent_ids[1:]
791
self._control_files.put_bytes('pending-merges', '\n'.join(merges))
793
@needs_tree_write_lock
794
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
795
"""Set the parent ids to revision_ids.
797
See also set_parent_trees. This api will try to retrieve the tree data
798
for each element of revision_ids from the trees repository. If you have
799
tree data already available, it is more efficient to use
800
set_parent_trees rather than set_parent_ids. set_parent_ids is however
801
an easier API to use.
803
:param revision_ids: The revision_ids to set as the parent ids of this
804
working tree. Any of these may be ghosts.
806
self._check_parents_for_ghosts(revision_ids,
807
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
808
for revision_id in revision_ids:
809
_mod_revision.check_not_reserved_id(revision_id)
811
if len(revision_ids) > 0:
812
self.set_last_revision(revision_ids[0])
814
self.set_last_revision(_mod_revision.NULL_REVISION)
816
self._set_merges_from_parent_ids(revision_ids)
818
@needs_tree_write_lock
819
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
820
"""See MutableTree.set_parent_trees."""
821
parent_ids = [rev for (rev, tree) in parents_list]
822
for revision_id in parent_ids:
823
_mod_revision.check_not_reserved_id(revision_id)
825
self._check_parents_for_ghosts(parent_ids,
826
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
828
if len(parent_ids) == 0:
829
leftmost_parent_id = _mod_revision.NULL_REVISION
830
leftmost_parent_tree = None
832
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
834
if self._change_last_revision(leftmost_parent_id):
835
if leftmost_parent_tree is None:
836
# If we don't have a tree, fall back to reading the
837
# parent tree from the repository.
838
self._cache_basis_inventory(leftmost_parent_id)
840
inv = leftmost_parent_tree.inventory
841
xml = self._create_basis_xml_from_inventory(
842
leftmost_parent_id, inv)
843
self._write_basis_inventory(xml)
844
self._set_merges_from_parent_ids(parent_ids)
846
@needs_tree_write_lock
611
847
def set_pending_merges(self, rev_list):
612
self._control_files.put_utf8('pending-merges', '\n'.join(rev_list))
848
parents = self.get_parent_ids()
849
leftmost = parents[:1]
850
new_parents = leftmost + rev_list
851
self.set_parent_ids(new_parents)
853
@needs_tree_write_lock
615
854
def set_merge_modified(self, modified_hashes):
616
855
def iter_stanzas():
617
856
for file_id, hash in modified_hashes.iteritems():
618
yield Stanza(file_id=file_id, hash=hash)
857
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
619
858
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
860
def _sha_from_stat(self, path, stat_result):
861
"""Get a sha digest from the tree's stat cache.
863
The default implementation assumes no stat cache is present.
865
:param path: The path.
866
:param stat_result: The stat result being looked up.
622
870
def _put_rio(self, filename, stanzas, header):
871
self._must_be_locked()
623
872
my_file = rio_file(stanzas, header)
624
873
self._control_files.put(filename, my_file)
875
@needs_write_lock # because merge pulls data into the branch.
876
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
878
"""Merge from a branch into this working tree.
880
:param branch: The branch to merge from.
881
:param to_revision: If non-None, the merge will merge to to_revision,
882
but not beyond it. to_revision does not need to be in the history
883
of the branch when it is supplied. If None, to_revision defaults to
884
branch.last_revision().
886
from bzrlib.merge import Merger, Merge3Merger
887
pb = bzrlib.ui.ui_factory.nested_progress_bar()
889
merger = Merger(self.branch, this_tree=self, pb=pb)
890
merger.pp = ProgressPhase("Merge phase", 5, pb)
891
merger.pp.next_phase()
892
# check that there are no
894
merger.check_basis(check_clean=True, require_commits=False)
895
if to_revision is None:
896
to_revision = _mod_revision.ensure_null(branch.last_revision())
897
merger.other_rev_id = to_revision
898
if _mod_revision.is_null(merger.other_rev_id):
899
raise errors.NoCommits(branch)
900
self.branch.fetch(branch, last_revision=merger.other_rev_id)
901
merger.other_basis = merger.other_rev_id
902
merger.other_tree = self.branch.repository.revision_tree(
904
merger.other_branch = branch
905
merger.pp.next_phase()
906
if from_revision is None:
909
merger.set_base_revision(from_revision, branch)
910
if merger.base_rev_id == merger.other_rev_id:
911
raise errors.PointlessMerge
912
merger.backup_files = False
913
if merge_type is None:
914
merger.merge_type = Merge3Merger
916
merger.merge_type = merge_type
917
merger.set_interesting_files(None)
918
merger.show_base = False
919
merger.reprocess = False
920
conflicts = merger.do_merge()
627
927
def merge_modified(self):
928
"""Return a dictionary of files modified by a merge.
930
The list is initialized by WorkingTree.set_merge_modified, which is
931
typically called after we make some automatic updates to the tree
934
This returns a map of file_id->sha1, containing only files which are
935
still in the working inventory and have that text hash.
629
938
hashfile = self._control_files.get('merge-hashes')
939
except errors.NoSuchFile:
632
941
merge_hashes = {}
634
943
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
635
raise MergeModifiedFormatError()
944
raise errors.MergeModifiedFormatError()
636
945
except StopIteration:
637
raise MergeModifiedFormatError()
946
raise errors.MergeModifiedFormatError()
638
947
for s in RioReader(hashfile):
639
file_id = s.get("file_id")
948
# RioReader reads in Unicode, so convert file_ids back to utf8
949
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
640
950
if file_id not in self.inventory:
643
if hash == self.get_file_sha1(file_id):
644
merge_hashes[file_id] = hash
952
text_hash = s.get("hash")
953
if text_hash == self.get_file_sha1(file_id):
954
merge_hashes[file_id] = text_hash
645
955
return merge_hashes
958
def mkdir(self, path, file_id=None):
959
"""See MutableTree.mkdir()."""
961
file_id = generate_ids.gen_file_id(os.path.basename(path))
962
os.mkdir(self.abspath(path))
963
self.add(path, file_id, 'directory')
647
966
def get_symlink_target(self, file_id):
648
967
return os.readlink(self.id2abspath(file_id))
650
def file_class(self, filename):
651
if self.path2id(filename):
653
elif self.is_ignored(filename):
658
def list_files(self):
659
"""Recursively list all files as (path, class, kind, id).
970
def subsume(self, other_tree):
971
def add_children(inventory, entry):
972
for child_entry in entry.children.values():
973
inventory._byid[child_entry.file_id] = child_entry
974
if child_entry.kind == 'directory':
975
add_children(inventory, child_entry)
976
if other_tree.get_root_id() == self.get_root_id():
977
raise errors.BadSubsumeSource(self, other_tree,
978
'Trees have the same root')
980
other_tree_path = self.relpath(other_tree.basedir)
981
except errors.PathNotChild:
982
raise errors.BadSubsumeSource(self, other_tree,
983
'Tree is not contained by the other')
984
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
985
if new_root_parent is None:
986
raise errors.BadSubsumeSource(self, other_tree,
987
'Parent directory is not versioned.')
988
# We need to ensure that the result of a fetch will have a
989
# versionedfile for the other_tree root, and only fetching into
990
# RepositoryKnit2 guarantees that.
991
if not self.branch.repository.supports_rich_root():
992
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
993
other_tree.lock_tree_write()
995
new_parents = other_tree.get_parent_ids()
996
other_root = other_tree.inventory.root
997
other_root.parent_id = new_root_parent
998
other_root.name = osutils.basename(other_tree_path)
999
self.inventory.add(other_root)
1000
add_children(self.inventory, other_root)
1001
self._write_inventory(self.inventory)
1002
# normally we don't want to fetch whole repositories, but i think
1003
# here we really do want to consolidate the whole thing.
1004
for parent_id in other_tree.get_parent_ids():
1005
self.branch.fetch(other_tree.branch, parent_id)
1006
self.add_parent_tree_id(parent_id)
1009
other_tree.bzrdir.retire_bzrdir()
1011
def _setup_directory_is_tree_reference(self):
1012
if self._branch.repository._format.supports_tree_reference:
1013
self._directory_is_tree_reference = \
1014
self._directory_may_be_tree_reference
1016
self._directory_is_tree_reference = \
1017
self._directory_is_never_tree_reference
1019
def _directory_is_never_tree_reference(self, relpath):
1022
def _directory_may_be_tree_reference(self, relpath):
1023
# as a special case, if a directory contains control files then
1024
# it's a tree reference, except that the root of the tree is not
1025
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1026
# TODO: We could ask all the control formats whether they
1027
# recognize this directory, but at the moment there's no cheap api
1028
# to do that. Since we probably can only nest bzr checkouts and
1029
# they always use this name it's ok for now. -- mbp 20060306
1031
# FIXME: There is an unhandled case here of a subdirectory
1032
# containing .bzr but not a branch; that will probably blow up
1033
# when you try to commit it. It might happen if there is a
1034
# checkout in a subdirectory. This can be avoided by not adding
1037
@needs_tree_write_lock
1038
def extract(self, file_id, format=None):
1039
"""Extract a subtree from this tree.
1041
A new branch will be created, relative to the path for this tree.
1045
segments = osutils.splitpath(path)
1046
transport = self.branch.bzrdir.root_transport
1047
for name in segments:
1048
transport = transport.clone(name)
1049
transport.ensure_base()
1052
sub_path = self.id2path(file_id)
1053
branch_transport = mkdirs(sub_path)
1055
format = self.bzrdir.cloning_metadir()
1056
branch_transport.ensure_base()
1057
branch_bzrdir = format.initialize_on_transport(branch_transport)
1059
repo = branch_bzrdir.find_repository()
1060
except errors.NoRepositoryPresent:
1061
repo = branch_bzrdir.create_repository()
1062
if not repo.supports_rich_root():
1063
raise errors.RootNotRich()
1064
new_branch = branch_bzrdir.create_branch()
1065
new_branch.pull(self.branch)
1066
for parent_id in self.get_parent_ids():
1067
new_branch.fetch(self.branch, parent_id)
1068
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1069
if tree_transport.base != branch_transport.base:
1070
tree_bzrdir = format.initialize_on_transport(tree_transport)
1071
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1073
tree_bzrdir = branch_bzrdir
1074
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1075
wt.set_parent_ids(self.get_parent_ids())
1076
my_inv = self.inventory
1077
child_inv = Inventory(root_id=None)
1078
new_root = my_inv[file_id]
1079
my_inv.remove_recursive_id(file_id)
1080
new_root.parent_id = None
1081
child_inv.add(new_root)
1082
self._write_inventory(my_inv)
1083
wt._write_inventory(child_inv)
1086
def _serialize(self, inventory, out_file):
1087
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1090
def _deserialize(selt, in_file):
1091
return xml5.serializer_v5.read_inventory(in_file)
1094
"""Write the in memory inventory to disk."""
1095
# TODO: Maybe this should only write on dirty ?
1096
if self._control_files._lock_mode != 'w':
1097
raise errors.NotWriteLocked(self)
1099
self._serialize(self._inventory, sio)
1101
self._control_files.put('inventory', sio)
1102
self._inventory_is_modified = False
1104
def _kind(self, relpath):
1105
return osutils.file_kind(self.abspath(relpath))
1107
def list_files(self, include_root=False):
1108
"""Recursively list all files as (path, class, kind, id, entry).
661
1110
Lists, but does not descend into unversioned directories.
666
1115
Skips the control directory.
668
inv = self._inventory
670
def descend(from_dir_relpath, from_dir_id, dp):
1117
# list_files is an iterator, so @needs_read_lock doesn't work properly
1118
# with it. So callers should be careful to always read_lock the tree.
1119
if not self.is_locked():
1120
raise errors.ObjectNotLocked(self)
1122
inv = self.inventory
1123
if include_root is True:
1124
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1125
# Convert these into local objects to save lookup times
1126
pathjoin = osutils.pathjoin
1127
file_kind = self._kind
1129
# transport.base ends in a slash, we want the piece
1130
# between the last two slashes
1131
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1133
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1135
# directory file_id, relative path, absolute path, reverse sorted children
1136
children = os.listdir(self.basedir)
1138
# jam 20060527 The kernel sized tree seems equivalent whether we
1139
# use a deque and popleft to keep them sorted, or if we use a plain
1140
# list and just reverse() them.
1141
children = collections.deque(children)
1142
stack = [(inv.root.file_id, u'', self.basedir, children)]
1144
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1147
f = children.popleft()
674
1148
## TODO: If we find a subdirectory with its own .bzr
675
1149
## directory, then that is a separate tree and we
676
1150
## should exclude it.
678
1152
# the bzrdir for this tree
679
if self.bzrdir.transport.base.endswith(f + '/'):
1153
if transport_base_dir == f:
683
fp = appendpath(from_dir_relpath, f)
1156
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1157
# and 'f' doesn't begin with one, we can do a string op, rather
1158
# than the checks of pathjoin(), all relative paths will have an extra slash
1160
fp = from_dir_relpath + '/' + f
686
fap = appendpath(dp, f)
1163
fap = from_dir_abspath + '/' + f
688
1165
f_ie = inv.get_child(from_dir_id, f)
691
elif self.is_ignored(fp):
1168
elif self.is_ignored(fp[1:]):
1171
# we may not have found this file, because of a unicode issue
1172
f_norm, can_access = osutils.normalized_filename(f)
1173
if f == f_norm or not can_access:
1174
# No change, so treat this file normally
1177
# this file can be accessed by a normalized path
1178
# check again if it is versioned
1179
# these lines are repeated here for performance
1181
fp = from_dir_relpath + '/' + f
1182
fap = from_dir_abspath + '/' + f
1183
f_ie = inv.get_child(from_dir_id, f)
1186
elif self.is_ignored(fp[1:]):
696
1191
fk = file_kind(fap)
700
raise BzrCheckError("file %r entered as kind %r id %r, "
702
% (fap, f_ie.kind, f_ie.file_id, fk))
704
1193
# make a last minute entry
1195
yield fp[1:], c, fk, f_ie.file_id, f_ie
708
if fk == 'directory':
709
entry = TreeDirectory()
712
elif fk == 'symlink':
1198
yield fp[1:], c, fk, None, fk_entries[fk]()
1200
yield fp[1:], c, fk, None, TreeEntry()
717
yield fp, c, fk, (f_ie and f_ie.file_id), entry
719
1203
if fk != 'directory':
723
# don't descend unversioned directories
726
for ff in descend(fp, f_ie.file_id, fap):
729
for f in descend(u'', inv.root.file_id, self.basedir):
733
def move(self, from_paths, to_name):
1206
# But do this child first
1207
new_children = os.listdir(fap)
1209
new_children = collections.deque(new_children)
1210
stack.append((f_ie.file_id, fp, fap, new_children))
1211
# Break out of inner loop,
1212
# so that we start outer loop with child
1215
# if we finished all children, pop it off the stack
1218
@needs_tree_write_lock
1219
def move(self, from_paths, to_dir=None, after=False, **kwargs):
734
1220
"""Rename files.
736
to_name must exist in the inventory.
1222
to_dir must exist in the inventory.
738
If to_name exists and is a directory, the files are moved into
1224
If to_dir exists and is a directory, the files are moved into
739
1225
it, keeping their old names.
741
Note that to_name is only the last component of the new name;
1227
Note that to_dir is only the last component of the new name;
742
1228
this doesn't change the directory.
1230
For each entry in from_paths the move mode will be determined
1233
The first mode moves the file in the filesystem and updates the
1234
inventory. The second mode only updates the inventory without
1235
touching the file on the filesystem. This is the new mode introduced
1238
move uses the second mode if 'after == True' and the target is not
1239
versioned but present in the working tree.
1241
move uses the second mode if 'after == False' and the source is
1242
versioned but no longer in the working tree, and the target is not
1243
versioned but present in the working tree.
1245
move uses the first mode if 'after == False' and the source is
1246
versioned and present in the working tree, and the target is not
1247
versioned and not present in the working tree.
1249
Everything else results in an error.
744
1251
This returns a list of (from_path, to_path) pairs for each
745
1252
entry that is moved.
748
## TODO: Option to move IDs only
1257
# check for deprecated use of signature
1259
to_dir = kwargs.get('to_name', None)
1261
raise TypeError('You must supply a target directory')
1263
symbol_versioning.warn('The parameter to_name was deprecated'
1264
' in version 0.13. Use to_dir instead',
1267
# check destination directory
749
1268
assert not isinstance(from_paths, basestring)
750
1269
inv = self.inventory
751
to_abs = self.abspath(to_name)
1270
to_abs = self.abspath(to_dir)
752
1271
if not isdir(to_abs):
753
raise BzrError("destination %r is not a directory" % to_abs)
754
if not self.has_filename(to_name):
755
raise BzrError("destination %r not in working directory" % to_abs)
756
to_dir_id = inv.path2id(to_name)
757
if to_dir_id == None and to_name != '':
758
raise BzrError("destination %r is not a versioned directory" % to_name)
1272
raise errors.BzrMoveFailedError('',to_dir,
1273
errors.NotADirectory(to_abs))
1274
if not self.has_filename(to_dir):
1275
raise errors.BzrMoveFailedError('',to_dir,
1276
errors.NotInWorkingDirectory(to_dir))
1277
to_dir_id = inv.path2id(to_dir)
1278
if to_dir_id is None:
1279
raise errors.BzrMoveFailedError('',to_dir,
1280
errors.NotVersionedError(path=str(to_dir)))
759
1282
to_dir_ie = inv[to_dir_id]
760
if to_dir_ie.kind not in ('directory', 'root_directory'):
761
raise BzrError("destination %r is not a directory" % to_abs)
763
to_idpath = inv.get_idpath(to_dir_id)
766
if not self.has_filename(f):
767
raise BzrError("%r does not exist in working tree" % f)
768
f_id = inv.path2id(f)
770
raise BzrError("%r is not versioned" % f)
771
name_tail = splitpath(f)[-1]
772
dest_path = appendpath(to_name, name_tail)
773
if self.has_filename(dest_path):
774
raise BzrError("destination %r already exists" % dest_path)
775
if f_id in to_idpath:
776
raise BzrError("can't move %r to a subdirectory of itself" % f)
778
# OK, so there's a race here, it's possible that someone will
779
# create a file in this interval and then the rename might be
780
# left half-done. But we should have caught most problems.
781
orig_inv = deepcopy(self.inventory)
1283
if to_dir_ie.kind != 'directory':
1284
raise errors.BzrMoveFailedError('',to_dir,
1285
errors.NotADirectory(to_abs))
1287
# create rename entries and tuples
1288
for from_rel in from_paths:
1289
from_tail = splitpath(from_rel)[-1]
1290
from_id = inv.path2id(from_rel)
1292
raise errors.BzrMoveFailedError(from_rel,to_dir,
1293
errors.NotVersionedError(path=str(from_rel)))
1295
from_entry = inv[from_id]
1296
from_parent_id = from_entry.parent_id
1297
to_rel = pathjoin(to_dir, from_tail)
1298
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1300
from_tail=from_tail,
1301
from_parent_id=from_parent_id,
1302
to_rel=to_rel, to_tail=from_tail,
1303
to_parent_id=to_dir_id)
1304
rename_entries.append(rename_entry)
1305
rename_tuples.append((from_rel, to_rel))
1307
# determine which move mode to use. checks also for movability
1308
rename_entries = self._determine_mv_mode(rename_entries, after)
1310
original_modified = self._inventory_is_modified
784
name_tail = splitpath(f)[-1]
785
dest_path = appendpath(to_name, name_tail)
786
result.append((f, dest_path))
787
inv.rename(inv.path2id(f), to_dir_id, name_tail)
789
rename(self.abspath(f), self.abspath(dest_path))
791
raise BzrError("failed to rename %r to %r: %s" %
792
(f, dest_path, e[1]),
793
["rename rolled back"])
1313
self._inventory_is_modified = True
1314
self._move(rename_entries)
795
1316
# restore the inventory on error
796
self._set_inventory(orig_inv)
1317
self._inventory_is_modified = original_modified
798
1319
self._write_inventory(inv)
802
def rename_one(self, from_rel, to_rel):
1320
return rename_tuples
1322
def _determine_mv_mode(self, rename_entries, after=False):
1323
"""Determines for each from-to pair if both inventory and working tree
1324
or only the inventory has to be changed.
1326
Also does basic plausability tests.
1328
inv = self.inventory
1330
for rename_entry in rename_entries:
1331
# store to local variables for easier reference
1332
from_rel = rename_entry.from_rel
1333
from_id = rename_entry.from_id
1334
to_rel = rename_entry.to_rel
1335
to_id = inv.path2id(to_rel)
1336
only_change_inv = False
1338
# check the inventory for source and destination
1340
raise errors.BzrMoveFailedError(from_rel,to_rel,
1341
errors.NotVersionedError(path=str(from_rel)))
1342
if to_id is not None:
1343
raise errors.BzrMoveFailedError(from_rel,to_rel,
1344
errors.AlreadyVersionedError(path=str(to_rel)))
1346
# try to determine the mode for rename (only change inv or change
1347
# inv and file system)
1349
if not self.has_filename(to_rel):
1350
raise errors.BzrMoveFailedError(from_id,to_rel,
1351
errors.NoSuchFile(path=str(to_rel),
1352
extra="New file has not been created yet"))
1353
only_change_inv = True
1354
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1355
only_change_inv = True
1356
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1357
only_change_inv = False
1358
elif (sys.platform == 'win32'
1359
and from_rel.lower() == to_rel.lower()
1360
and self.has_filename(from_rel)):
1361
only_change_inv = False
1363
# something is wrong, so lets determine what exactly
1364
if not self.has_filename(from_rel) and \
1365
not self.has_filename(to_rel):
1366
raise errors.BzrRenameFailedError(from_rel,to_rel,
1367
errors.PathsDoNotExist(paths=(str(from_rel),
1370
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1371
rename_entry.only_change_inv = only_change_inv
1372
return rename_entries
1374
def _move(self, rename_entries):
1375
"""Moves a list of files.
1377
Depending on the value of the flag 'only_change_inv', the
1378
file will be moved on the file system or not.
1380
inv = self.inventory
1383
for entry in rename_entries:
1385
self._move_entry(entry)
1387
self._rollback_move(moved)
1391
def _rollback_move(self, moved):
1392
"""Try to rollback a previous move in case of an filesystem error."""
1393
inv = self.inventory
1396
self._move_entry(_RenameEntry(entry.to_rel, entry.from_id,
1397
entry.to_tail, entry.to_parent_id, entry.from_rel,
1398
entry.from_tail, entry.from_parent_id,
1399
entry.only_change_inv))
1400
except errors.BzrMoveFailedError, e:
1401
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1402
" The working tree is in an inconsistent state."
1403
" Please consider doing a 'bzr revert'."
1404
" Error message is: %s" % e)
1406
def _move_entry(self, entry):
1407
inv = self.inventory
1408
from_rel_abs = self.abspath(entry.from_rel)
1409
to_rel_abs = self.abspath(entry.to_rel)
1410
if from_rel_abs == to_rel_abs:
1411
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1412
"Source and target are identical.")
1414
if not entry.only_change_inv:
1416
osutils.rename(from_rel_abs, to_rel_abs)
1418
raise errors.BzrMoveFailedError(entry.from_rel,
1420
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1422
@needs_tree_write_lock
1423
def rename_one(self, from_rel, to_rel, after=False):
803
1424
"""Rename one file.
805
1426
This can change the directory or the filename or both.
1428
rename_one has several 'modes' to work. First, it can rename a physical
1429
file and change the file_id. That is the normal mode. Second, it can
1430
only change the file_id without touching any physical file. This is
1431
the new mode introduced in version 0.15.
1433
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1434
versioned but present in the working tree.
1436
rename_one uses the second mode if 'after == False' and 'from_rel' is
1437
versioned but no longer in the working tree, and 'to_rel' is not
1438
versioned but present in the working tree.
1440
rename_one uses the first mode if 'after == False' and 'from_rel' is
1441
versioned and present in the working tree, and 'to_rel' is not
1442
versioned and not present in the working tree.
1444
Everything else results in an error.
807
1446
inv = self.inventory
808
if not self.has_filename(from_rel):
809
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
810
if self.has_filename(to_rel):
811
raise BzrError("can't rename: new working file %r already exists" % to_rel)
813
file_id = inv.path2id(from_rel)
815
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
818
from_parent = entry.parent_id
819
from_name = entry.name
821
if inv.path2id(to_rel):
822
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1449
# create rename entries and tuples
1450
from_tail = splitpath(from_rel)[-1]
1451
from_id = inv.path2id(from_rel)
1453
raise errors.BzrRenameFailedError(from_rel,to_rel,
1454
errors.NotVersionedError(path=str(from_rel)))
1455
from_entry = inv[from_id]
1456
from_parent_id = from_entry.parent_id
824
1457
to_dir, to_tail = os.path.split(to_rel)
825
1458
to_dir_id = inv.path2id(to_dir)
826
if to_dir_id == None and to_dir != '':
827
raise BzrError("can't determine destination directory id for %r" % to_dir)
829
mutter("rename_one:")
830
mutter(" file_id {%s}" % file_id)
831
mutter(" from_rel %r" % from_rel)
832
mutter(" to_rel %r" % to_rel)
833
mutter(" to_dir %r" % to_dir)
834
mutter(" to_dir_id {%s}" % to_dir_id)
836
inv.rename(file_id, to_dir_id, to_tail)
838
from_abs = self.abspath(from_rel)
839
to_abs = self.abspath(to_rel)
841
rename(from_abs, to_abs)
843
inv.rename(file_id, from_parent, from_name)
844
raise BzrError("failed to rename %r to %r: %s"
845
% (from_abs, to_abs, e[1]),
846
["rename rolled back"])
1459
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1461
from_tail=from_tail,
1462
from_parent_id=from_parent_id,
1463
to_rel=to_rel, to_tail=to_tail,
1464
to_parent_id=to_dir_id)
1465
rename_entries.append(rename_entry)
1467
# determine which move mode to use. checks also for movability
1468
rename_entries = self._determine_mv_mode(rename_entries, after)
1470
# check if the target changed directory and if the target directory is
1472
if to_dir_id is None:
1473
raise errors.BzrMoveFailedError(from_rel,to_rel,
1474
errors.NotVersionedError(path=str(to_dir)))
1476
# all checks done. now we can continue with our actual work
1477
mutter('rename_one:\n'
1482
' to_dir_id {%s}\n',
1483
from_id, from_rel, to_rel, to_dir, to_dir_id)
1485
self._move(rename_entries)
847
1486
self._write_inventory(inv)
1488
class _RenameEntry(object):
1489
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1490
to_rel, to_tail, to_parent_id, only_change_inv=False):
1491
self.from_rel = from_rel
1492
self.from_id = from_id
1493
self.from_tail = from_tail
1494
self.from_parent_id = from_parent_id
1495
self.to_rel = to_rel
1496
self.to_tail = to_tail
1497
self.to_parent_id = to_parent_id
1498
self.only_change_inv = only_change_inv
849
1500
@needs_read_lock
850
1501
def unknowns(self):
851
1502
"""Return all unknown files.
853
1504
These are files in the working directory that are not versioned or
854
1505
control files or ignored.
856
>>> from bzrlib.bzrdir import ScratchDir
857
>>> d = ScratchDir(files=['foo', 'foo~'])
858
>>> b = d.open_branch()
859
>>> tree = d.open_workingtree()
860
>>> map(str, tree.unknowns())
863
>>> list(b.unknowns())
865
>>> tree.remove('foo')
866
>>> list(b.unknowns())
869
for subp in self.extras():
870
if not self.is_ignored(subp):
1507
# force the extras method to be fully executed before returning, to
1508
# prevent race conditions with the lock
1510
[subp for subp in self.extras() if not self.is_ignored(subp)])
1512
@needs_tree_write_lock
1513
def unversion(self, file_ids):
1514
"""Remove the file ids in file_ids from the current versioned set.
1516
When a file_id is unversioned, all of its children are automatically
1519
:param file_ids: The file ids to stop versioning.
1520
:raises: NoSuchId if any fileid is not currently versioned.
1522
for file_id in file_ids:
1523
if self._inventory.has_id(file_id):
1524
self._inventory.remove_recursive_id(file_id)
1526
raise errors.NoSuchId(self, file_id)
1528
# in the future this should just set a dirty bit to wait for the
1529
# final unlock. However, until all methods of workingtree start
1530
# with the current in -memory inventory rather than triggering
1531
# a read, it is more complex - we need to teach read_inventory
1532
# to know when to read, and when to not read first... and possibly
1533
# to save first when the in memory one may be corrupted.
1534
# so for now, we just only write it if it is indeed dirty.
1536
self._write_inventory(self._inventory)
873
1538
@deprecated_method(zero_eight)
874
1539
def iter_conflicts(self):
875
1540
"""List all files in the tree that have text or content conflicts.
1056
1800
This is used to allow WorkingTree3 instances to not affect branch
1057
1801
when their last revision is set.
1059
if new_revision is None:
1803
if _mod_revision.is_null(new_revision):
1060
1804
self.branch.set_revision_history([])
1062
# current format is locked in with the branch
1063
revision_history = self.branch.revision_history()
1065
position = revision_history.index(new_revision)
1067
raise errors.NoSuchRevision(self.branch, new_revision)
1068
self.branch.set_revision_history(revision_history[:position + 1])
1807
self.branch.generate_revision_history(new_revision)
1808
except errors.NoSuchRevision:
1809
# not present in the repo - dont try to set it deeper than the tip
1810
self.branch.set_revision_history([new_revision])
1813
def _write_basis_inventory(self, xml):
1814
"""Write the basis inventory XML to the basis-inventory file"""
1815
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1816
path = self._basis_inventory_name()
1818
self._control_files.put(path, sio)
1820
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1821
"""Create the text that will be saved in basis-inventory"""
1822
inventory.revision_id = revision_id
1823
return xml7.serializer_v7.write_inventory_to_string(inventory)
1071
1825
def _cache_basis_inventory(self, new_revision):
1072
1826
"""Cache new_revision as the basis inventory."""
1827
# TODO: this should allow the ready-to-use inventory to be passed in,
1828
# as commit already has that ready-to-use [while the format is the
1074
1831
# this double handles the inventory - unpack and repack -
1075
1832
# but is easier to understand. We can/should put a conditional
1076
1833
# in here based on whether the inventory is in the latest format
1077
1834
# - perhaps we should repack all inventories on a repository
1079
inv = self.branch.repository.get_inventory(new_revision)
1080
inv.revision_id = new_revision
1081
xml = bzrlib.xml5.serializer_v5.write_inventory_to_string(inv)
1083
path = self._basis_inventory_name()
1084
self._control_files.put_utf8(path, xml)
1085
except WeaveRevisionNotPresent:
1836
# the fast path is to copy the raw xml from the repository. If the
1837
# xml contains 'revision_id="', then we assume the right
1838
# revision_id is set. We must check for this full string, because a
1839
# root node id can legitimately look like 'revision_id' but cannot
1841
xml = self.branch.repository.get_inventory_xml(new_revision)
1842
firstline = xml.split('\n', 1)[0]
1843
if (not 'revision_id="' in firstline or
1844
'format="7"' not in firstline):
1845
inv = self.branch.repository.deserialise_inventory(
1847
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1848
self._write_basis_inventory(xml)
1849
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1088
1852
def read_basis_inventory(self):
1089
1853
"""Read the cached basis inventory."""
1090
1854
path = self._basis_inventory_name()
1091
return self._control_files.get_utf8(path).read()
1855
return self._control_files.get(path).read()
1093
1857
@needs_read_lock
1094
1858
def read_working_inventory(self):
1095
"""Read the working inventory."""
1859
"""Read the working inventory.
1861
:raises errors.InventoryModified: read_working_inventory will fail
1862
when the current in memory inventory has been modified.
1864
# conceptually this should be an implementation detail of the tree.
1865
# XXX: Deprecate this.
1096
1866
# ElementTree does its own conversion from UTF-8, so open in
1098
result = bzrlib.xml5.serializer_v5.read_inventory(
1099
self._control_files.get('inventory'))
1100
self._set_inventory(result)
1868
if self._inventory_is_modified:
1869
raise errors.InventoryModified(self)
1870
result = self._deserialize(self._control_files.get('inventory'))
1871
self._set_inventory(result, dirty=False)
1104
def remove(self, files, verbose=False):
1105
"""Remove nominated files from the working inventory..
1107
This does not remove their text. This does not run on XXX on what? RBC
1109
TODO: Refuse to remove modified files unless --force is given?
1111
TODO: Do something useful with directories.
1113
TODO: Should this remove the text or not? Tough call; not
1114
removing may be useful and the user can just use use rm, and
1115
is the opposite of add. Removing it is consistent with most
1116
other tools. Maybe an option.
1874
@needs_tree_write_lock
1875
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1877
"""Remove nominated files from the working inventory.
1879
:files: File paths relative to the basedir.
1880
:keep_files: If true, the files will also be kept.
1881
:force: Delete files and directories, even if they are changed and
1882
even if the directories are not empty.
1118
## TODO: Normalize names
1119
## TODO: Remove nested loops; better scalability
1120
1884
if isinstance(files, basestring):
1121
1885
files = [files]
1123
inv = self.inventory
1125
# do this before any modifications
1890
unknown_nested_files=set()
1892
def recurse_directory_to_add_files(directory):
1893
# Recurse directory and add all files
1894
# so we can check if they have changed.
1895
for parent_info, file_infos in\
1896
osutils.walkdirs(self.abspath(directory),
1898
for relpath, basename, kind, lstat, abspath in file_infos:
1899
# Is it versioned or ignored?
1900
if self.path2id(relpath) or self.is_ignored(relpath):
1901
# Add nested content for deletion.
1902
new_files.add(relpath)
1904
# Files which are not versioned and not ignored
1905
# should be treated as unknown.
1906
unknown_nested_files.add((relpath, None, kind))
1908
for filename in files:
1909
# Get file name into canonical form.
1910
abspath = self.abspath(filename)
1911
filename = self.relpath(abspath)
1912
if len(filename) > 0:
1913
new_files.add(filename)
1914
if osutils.isdir(abspath):
1915
recurse_directory_to_add_files(filename)
1917
files = list(new_files)
1920
return # nothing to do
1922
# Sort needed to first handle directory content before the directory
1923
files.sort(reverse=True)
1925
# Bail out if we are going to delete files we shouldn't
1926
if not keep_files and not force:
1927
has_changed_files = len(unknown_nested_files) > 0
1928
if not has_changed_files:
1929
for (file_id, path, content_change, versioned, parent_id, name,
1930
kind, executable) in self._iter_changes(self.basis_tree(),
1931
include_unchanged=True, require_versioned=False,
1932
want_unversioned=True, specific_files=files):
1933
if versioned == (False, False):
1934
# The record is unknown ...
1935
if not self.is_ignored(path[1]):
1936
# ... but not ignored
1937
has_changed_files = True
1939
elif content_change and (kind[1] != None):
1940
# Versioned and changed, but not deleted
1941
has_changed_files = True
1944
if has_changed_files:
1945
# Make delta show ALL applicable changes in error message.
1946
tree_delta = self.changes_from(self.basis_tree(),
1947
require_versioned=False, want_unversioned=True,
1948
specific_files=files)
1949
for unknown_file in unknown_nested_files:
1950
if unknown_file not in tree_delta.unversioned:
1951
tree_delta.unversioned.extend((unknown_file,))
1952
raise errors.BzrRemoveChangedFilesError(tree_delta)
1954
# Build inv_delta and delete files where applicaple,
1955
# do this before any modifications to inventory.
1126
1956
for f in files:
1127
fid = inv.path2id(f)
1957
fid = self.path2id(f)
1129
# TODO: Perhaps make this just a warning, and continue?
1130
# This tends to happen when
1131
raise NotVersionedError(path=f)
1132
mutter("remove inventory entry %s {%s}", quotefn(f), fid)
1134
# having remove it, it must be either ignored or unknown
1135
if self.is_ignored(f):
1139
show_status(new_status, inv[fid].kind, quotefn(f))
1142
self._write_inventory(inv)
1145
def revert(self, filenames, old_tree=None, backups=True,
1146
pb=DummyProgress()):
1147
from transform import revert
1148
from conflicts import resolve
1960
message = "%s is not versioned." % (f,)
1963
# having removed it, it must be either ignored or unknown
1964
if self.is_ignored(f):
1968
textui.show_status(new_status, self.kind(fid), f,
1971
inv_delta.append((f, None, fid, None))
1972
message = "removed %s" % (f,)
1975
abs_path = self.abspath(f)
1976
if osutils.lexists(abs_path):
1977
if (osutils.isdir(abs_path) and
1978
len(os.listdir(abs_path)) > 0):
1980
osutils.rmtree(abs_path)
1982
message = "%s is not an empty directory "\
1983
"and won't be deleted." % (f,)
1985
osutils.delete_any(abs_path)
1986
message = "deleted %s" % (f,)
1987
elif message is not None:
1988
# Only care if we haven't done anything yet.
1989
message = "%s does not exist." % (f,)
1991
# Print only one message (if any) per file.
1992
if message is not None:
1994
self.apply_inventory_delta(inv_delta)
1996
@needs_tree_write_lock
1997
def revert(self, filenames=None, old_tree=None, backups=True,
1998
pb=DummyProgress(), report_changes=False):
1999
from bzrlib.conflicts import resolve
2002
symbol_versioning.warn('Using [] to revert all files is deprecated'
2003
' as of bzr 0.91. Please use None (the default) instead.',
2004
DeprecationWarning, stacklevel=2)
1149
2005
if old_tree is None:
1150
old_tree = self.basis_tree()
1151
conflicts = revert(self, old_tree, filenames, backups, pb)
1152
if not len(filenames):
1153
self.set_pending_merges([])
2006
basis_tree = self.basis_tree()
2007
basis_tree.lock_read()
2008
old_tree = basis_tree
1156
resolve(self, filenames, ignore_misses=True)
2012
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2014
if filenames is None and len(self.get_parent_ids()) > 1:
2016
last_revision = self.last_revision()
2017
if last_revision != NULL_REVISION:
2018
if basis_tree is None:
2019
basis_tree = self.basis_tree()
2020
basis_tree.lock_read()
2021
parent_trees.append((last_revision, basis_tree))
2022
self.set_parent_trees(parent_trees)
2025
resolve(self, filenames, ignore_misses=True, recursive=True)
2027
if basis_tree is not None:
1157
2029
return conflicts
2031
def revision_tree(self, revision_id):
2032
"""See Tree.revision_tree.
2034
WorkingTree can supply revision_trees for the basis revision only
2035
because there is only one cached inventory in the bzr directory.
2037
if revision_id == self.last_revision():
2039
xml = self.read_basis_inventory()
2040
except errors.NoSuchFile:
2044
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2045
# dont use the repository revision_tree api because we want
2046
# to supply the inventory.
2047
if inv.revision_id == revision_id:
2048
return revisiontree.RevisionTree(self.branch.repository,
2050
except errors.BadInventoryFormat:
2052
# raise if there was no inventory, or if we read the wrong inventory.
2053
raise errors.NoSuchRevisionInTree(self, revision_id)
1159
2055
# XXX: This method should be deprecated in favour of taking in a proper
1160
2056
# new Inventory object.
2057
@needs_tree_write_lock
1162
2058
def set_inventory(self, new_inventory_list):
1163
2059
from bzrlib.inventory import (Inventory,
1164
2060
InventoryDirectory,
1204
2126
between multiple working trees, i.e. via shared storage, then we
1205
2127
would probably want to lock both the local tree, and the branch.
1207
# FIXME: We want to write out the hashcache only when the last lock on
1208
# this working copy is released. Peeking at the lock count is a bit
1209
# of a nasty hack; probably it's better to have a transaction object,
1210
# which can do some finalization when it's either successfully or
1211
# unsuccessfully completed. (Denys's original patch did that.)
1212
# RBC 20060206 hookinhg into transaction will couple lock and transaction
1213
# wrongly. Hookinh into unllock on the control files object is fine though.
1215
# TODO: split this per format so there is no ugly if block
1216
if self._hashcache.needs_write and (
1217
# dedicated lock files
1218
self._control_files._lock_count==1 or
1220
(self._control_files is self.branch.control_files and
1221
self._control_files._lock_count==3)):
1222
self._hashcache.write()
1223
# reverse order of locking.
1224
result = self._control_files.unlock()
1226
self.branch.unlock()
2129
raise NotImplementedError(self.unlock)
2131
def update(self, change_reporter=None, possible_transports=None):
1232
2132
"""Update a working tree along its branch.
1234
This will update the branch if its bound too, which means we have multiple trees involved:
1235
The new basis tree of the master.
1236
The old basis tree of the branch.
1237
The old basis tree of the working tree.
1238
The current working tree state.
1239
pathologically all three may be different, and non ancestors of each other.
1240
Conceptually we want to:
1241
Preserve the wt.basis->wt.state changes
1242
Transform the wt.basis to the new master basis.
1243
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1244
Restore the wt.basis->wt.state changes.
2134
This will update the branch if its bound too, which means we have
2135
multiple trees involved:
2137
- The new basis tree of the master.
2138
- The old basis tree of the branch.
2139
- The old basis tree of the working tree.
2140
- The current working tree state.
2142
Pathologically, all three may be different, and non-ancestors of each
2143
other. Conceptually we want to:
2145
- Preserve the wt.basis->wt.state changes
2146
- Transform the wt.basis to the new master basis.
2147
- Apply a merge of the old branch basis to get any 'local' changes from
2149
- Restore the wt.basis->wt.state changes.
1246
2151
There isn't a single operation at the moment to do that, so we:
1247
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1248
Do a 'normal' merge of the old branch basis if it is relevant.
1250
old_tip = self.branch.update()
1251
if old_tip is not None:
1252
self.add_pending_merge(old_tip)
1253
self.branch.lock_read()
1256
if self.last_revision() != self.branch.last_revision():
1257
# merge tree state up to new branch tip.
1258
basis = self.basis_tree()
2152
- Merge current state -> basis tree of the master w.r.t. the old tree
2154
- Do a 'normal' merge of the old branch basis if it is relevant.
2156
if self.branch.get_master_branch(possible_transports) is not None:
2158
update_branch = True
2160
self.lock_tree_write()
2161
update_branch = False
2164
old_tip = self.branch.update(possible_transports)
2167
return self._update_tree(old_tip, change_reporter)
2171
@needs_tree_write_lock
2172
def _update_tree(self, old_tip=None, change_reporter=None):
2173
"""Update a tree to the master branch.
2175
:param old_tip: if supplied, the previous tip revision the branch,
2176
before it was changed to the master branch's tip.
2178
# here if old_tip is not None, it is the old tip of the branch before
2179
# it was updated from the master branch. This should become a pending
2180
# merge in the working tree to preserve the user existing work. we
2181
# cant set that until we update the working trees last revision to be
2182
# one from the new branch, because it will just get absorbed by the
2183
# parent de-duplication logic.
2185
# We MUST save it even if an error occurs, because otherwise the users
2186
# local work is unreferenced and will appear to have been lost.
2190
last_rev = self.get_parent_ids()[0]
2192
last_rev = _mod_revision.NULL_REVISION
2193
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2194
# merge tree state up to new branch tip.
2195
basis = self.basis_tree()
1259
2198
to_tree = self.branch.basis_tree()
1260
result += merge_inner(self.branch,
2199
if basis.inventory.root is None:
2200
self.set_root_id(to_tree.get_root_id())
2202
result += merge.merge_inner(
1264
self.set_last_revision(self.branch.last_revision())
1265
if old_tip and old_tip != self.last_revision():
1266
# our last revision was not the prior branch last reivison
1267
# and we have converted that last revision to a pending merge.
1268
# base is somewhere between the branch tip now
1269
# and the now pending merge
1270
from bzrlib.revision import common_ancestor
1272
base_rev_id = common_ancestor(self.branch.last_revision(),
1274
self.branch.repository)
1275
except errors.NoCommonAncestor:
1277
base_tree = self.branch.repository.revision_tree(base_rev_id)
1278
other_tree = self.branch.repository.revision_tree(old_tip)
1279
result += merge_inner(self.branch,
1285
self.branch.unlock()
2207
change_reporter=change_reporter)
2210
# TODO - dedup parents list with things merged by pull ?
2211
# reuse the tree we've updated to to set the basis:
2212
parent_trees = [(self.branch.last_revision(), to_tree)]
2213
merges = self.get_parent_ids()[1:]
2214
# Ideally we ask the tree for the trees here, that way the working
2215
# tree can decide whether to give us teh entire tree or give us a
2216
# lazy initialised tree. dirstate for instance will have the trees
2217
# in ram already, whereas a last-revision + basis-inventory tree
2218
# will not, but also does not need them when setting parents.
2219
for parent in merges:
2220
parent_trees.append(
2221
(parent, self.branch.repository.revision_tree(parent)))
2222
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2223
parent_trees.append(
2224
(old_tip, self.branch.repository.revision_tree(old_tip)))
2225
self.set_parent_trees(parent_trees)
2226
last_rev = parent_trees[0][0]
2228
# the working tree had the same last-revision as the master
2229
# branch did. We may still have pivot local work from the local
2230
# branch into old_tip:
2231
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2232
self.add_parent_tree_id(old_tip)
2233
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2234
and old_tip != last_rev):
2235
# our last revision was not the prior branch last revision
2236
# and we have converted that last revision to a pending merge.
2237
# base is somewhere between the branch tip now
2238
# and the now pending merge
2240
# Since we just modified the working tree and inventory, flush out
2241
# the current state, before we modify it again.
2242
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2243
# requires it only because TreeTransform directly munges the
2244
# inventory and calls tree._write_inventory(). Ultimately we
2245
# should be able to remove this extra flush.
2247
graph = self.branch.repository.get_graph()
2248
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2250
base_tree = self.branch.repository.revision_tree(base_rev_id)
2251
other_tree = self.branch.repository.revision_tree(old_tip)
2252
result += merge.merge_inner(
2257
change_reporter=change_reporter)
2260
def _write_hashcache_if_dirty(self):
2261
"""Write out the hashcache if it is dirty."""
2262
if self._hashcache.needs_write:
2264
self._hashcache.write()
2266
if e.errno not in (errno.EPERM, errno.EACCES):
2268
# TODO: jam 20061219 Should this be a warning? A single line
2269
# warning might be sufficient to let the user know what
2271
mutter('Could not write hashcache for %s\nError: %s',
2272
self._hashcache.cache_file_name(), e)
2274
@needs_tree_write_lock
1288
2275
def _write_inventory(self, inv):
1289
2276
"""Write inventory as the current inventory."""
1291
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1293
self._control_files.put('inventory', sio)
1294
self._set_inventory(inv)
1295
mutter('wrote working inventory')
2277
self._set_inventory(inv, dirty=True)
1297
2280
def set_conflicts(self, arg):
1298
raise UnsupportedOperation(self.set_conflicts, self)
2281
raise errors.UnsupportedOperation(self.set_conflicts, self)
2283
def add_conflicts(self, arg):
2284
raise errors.UnsupportedOperation(self.add_conflicts, self)
1300
2286
@needs_read_lock
1301
2287
def conflicts(self):
1302
conflicts = ConflictList()
2288
conflicts = _mod_conflicts.ConflictList()
1303
2289
for conflicted in self._iter_conflicts():
1306
2292
if file_kind(self.abspath(conflicted)) != "file":
1309
if e.errno == errno.ENOENT:
2294
except errors.NoSuchFile:
1313
2296
if text is True:
1314
2297
for suffix in ('.THIS', '.OTHER'):
1316
2299
kind = file_kind(self.abspath(conflicted+suffix))
1318
if e.errno == errno.ENOENT:
2302
except errors.NoSuchFile:
1326
2306
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
1327
conflicts.append(Conflict.factory(ctype, path=conflicted,
2307
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
1328
2309
file_id=self.path2id(conflicted)))
1329
2310
return conflicts
2312
def walkdirs(self, prefix=""):
2313
"""Walk the directories of this tree.
2315
returns a generator which yields items in the form:
2316
((curren_directory_path, fileid),
2317
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2320
This API returns a generator, which is only valid during the current
2321
tree transaction - within a single lock_read or lock_write duration.
2323
If the tree is not locked, it may cause an error to be raised,
2324
depending on the tree implementation.
2326
disk_top = self.abspath(prefix)
2327
if disk_top.endswith('/'):
2328
disk_top = disk_top[:-1]
2329
top_strip_len = len(disk_top) + 1
2330
inventory_iterator = self._walkdirs(prefix)
2331
disk_iterator = osutils.walkdirs(disk_top, prefix)
2333
current_disk = disk_iterator.next()
2334
disk_finished = False
2336
if not (e.errno == errno.ENOENT or
2337
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2340
disk_finished = True
2342
current_inv = inventory_iterator.next()
2343
inv_finished = False
2344
except StopIteration:
2347
while not inv_finished or not disk_finished:
2349
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2350
cur_disk_dir_content) = current_disk
2352
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2353
cur_disk_dir_content) = ((None, None), None)
2354
if not disk_finished:
2355
# strip out .bzr dirs
2356
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2357
len(cur_disk_dir_content) > 0):
2358
# osutils.walkdirs can be made nicer -
2359
# yield the path-from-prefix rather than the pathjoined
2361
bzrdir_loc = bisect_left(cur_disk_dir_content,
2363
if cur_disk_dir_content[bzrdir_loc][0] == '.bzr':
2364
# we dont yield the contents of, or, .bzr itself.
2365
del cur_disk_dir_content[bzrdir_loc]
2367
# everything is unknown
2370
# everything is missing
2373
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2375
# disk is before inventory - unknown
2376
dirblock = [(relpath, basename, kind, stat, None, None) for
2377
relpath, basename, kind, stat, top_path in
2378
cur_disk_dir_content]
2379
yield (cur_disk_dir_relpath, None), dirblock
2381
current_disk = disk_iterator.next()
2382
except StopIteration:
2383
disk_finished = True
2385
# inventory is before disk - missing.
2386
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2387
for relpath, basename, dkind, stat, fileid, kind in
2389
yield (current_inv[0][0], current_inv[0][1]), dirblock
2391
current_inv = inventory_iterator.next()
2392
except StopIteration:
2395
# versioned present directory
2396
# merge the inventory and disk data together
2398
for relpath, subiterator in itertools.groupby(sorted(
2399
current_inv[1] + cur_disk_dir_content,
2400
key=operator.itemgetter(0)), operator.itemgetter(1)):
2401
path_elements = list(subiterator)
2402
if len(path_elements) == 2:
2403
inv_row, disk_row = path_elements
2404
# versioned, present file
2405
dirblock.append((inv_row[0],
2406
inv_row[1], disk_row[2],
2407
disk_row[3], inv_row[4],
2409
elif len(path_elements[0]) == 5:
2411
dirblock.append((path_elements[0][0],
2412
path_elements[0][1], path_elements[0][2],
2413
path_elements[0][3], None, None))
2414
elif len(path_elements[0]) == 6:
2415
# versioned, absent file.
2416
dirblock.append((path_elements[0][0],
2417
path_elements[0][1], 'unknown', None,
2418
path_elements[0][4], path_elements[0][5]))
2420
raise NotImplementedError('unreachable code')
2421
yield current_inv[0], dirblock
2423
current_inv = inventory_iterator.next()
2424
except StopIteration:
2427
current_disk = disk_iterator.next()
2428
except StopIteration:
2429
disk_finished = True
2431
def _walkdirs(self, prefix=""):
2432
"""Walk the directories of this tree.
2434
:prefix: is used as the directrory to start with.
2435
returns a generator which yields items in the form:
2436
((curren_directory_path, fileid),
2437
[(file1_path, file1_name, file1_kind, None, file1_id,
2440
_directory = 'directory'
2441
# get the root in the inventory
2442
inv = self.inventory
2443
top_id = inv.path2id(prefix)
2447
pending = [(prefix, '', _directory, None, top_id, None)]
2450
currentdir = pending.pop()
2451
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2452
top_id = currentdir[4]
2454
relroot = currentdir[0] + '/'
2457
# FIXME: stash the node in pending
2459
for name, child in entry.sorted_children():
2460
dirblock.append((relroot + name, name, child.kind, None,
2461
child.file_id, child.kind
2463
yield (currentdir[0], entry.file_id), dirblock
2464
# push the user specified dirs from dirblock
2465
for dir in reversed(dirblock):
2466
if dir[2] == _directory:
2469
@needs_tree_write_lock
2470
def auto_resolve(self):
2471
"""Automatically resolve text conflicts according to contents.
2473
Only text conflicts are auto_resolvable. Files with no conflict markers
2474
are considered 'resolved', because bzr always puts conflict markers
2475
into files that have text conflicts. The corresponding .THIS .BASE and
2476
.OTHER files are deleted, as per 'resolve'.
2477
:return: a tuple of ConflictLists: (un_resolved, resolved).
2479
un_resolved = _mod_conflicts.ConflictList()
2480
resolved = _mod_conflicts.ConflictList()
2481
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2482
for conflict in self.conflicts():
2483
if (conflict.typestring != 'text conflict' or
2484
self.kind(conflict.file_id) != 'file'):
2485
un_resolved.append(conflict)
2487
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2489
for line in my_file:
2490
if conflict_re.search(line):
2491
un_resolved.append(conflict)
2494
resolved.append(conflict)
2497
resolved.remove_files(self)
2498
self.set_conflicts(un_resolved)
2499
return un_resolved, resolved
2501
def _validate(self):
2502
"""Validate internal structures.
2504
This is meant mostly for the test suite. To give it a chance to detect
2505
corruption after actions have occurred. The default implementation is a
2508
:return: None. An exception should be raised if there is an error.
2513
class WorkingTree2(WorkingTree):
2514
"""This is the Format 2 working tree.
2516
This was the first weave based working tree.
2517
- uses os locks for locking.
2518
- uses the branch last-revision.
2521
def __init__(self, *args, **kwargs):
2522
super(WorkingTree2, self).__init__(*args, **kwargs)
2523
# WorkingTree2 has more of a constraint that self._inventory must
2524
# exist. Because this is an older format, we don't mind the overhead
2525
# caused by the extra computation here.
2527
# Newer WorkingTree's should only have self._inventory set when they
2529
if self._inventory is None:
2530
self.read_working_inventory()
2532
def lock_tree_write(self):
2533
"""See WorkingTree.lock_tree_write().
2535
In Format2 WorkingTrees we have a single lock for the branch and tree
2536
so lock_tree_write() degrades to lock_write().
2538
self.branch.lock_write()
2540
return self._control_files.lock_write()
2542
self.branch.unlock()
2546
# do non-implementation specific cleanup
2549
# we share control files:
2550
if self._control_files._lock_count == 3:
2551
# _inventory_is_modified is always False during a read lock.
2552
if self._inventory_is_modified:
2554
self._write_hashcache_if_dirty()
2556
# reverse order of locking.
2558
return self._control_files.unlock()
2560
self.branch.unlock()
1332
2563
class WorkingTree3(WorkingTree):
1333
2564
"""This is the Format 3 working tree.