68
381
inv = self._inventory
69
382
for path, ie in inv.iter_entries():
70
if os.path.exists(self.abspath(path)):
383
if osutils.lexists(self.abspath(path)):
386
def all_file_ids(self):
387
"""See Tree.iter_all_file_ids"""
388
return set(self.inventory)
74
390
def __repr__(self):
75
391
return "<%s of %s>" % (self.__class__.__name__,
76
392
getattr(self, 'basedir', None))
80
394
def abspath(self, filename):
81
return os.path.join(self.basedir, filename)
395
return pathjoin(self.basedir, filename)
397
def basis_tree(self):
398
"""Return RevisionTree for the current last revision.
400
If the left most parent is a ghost then the returned tree will be an
401
empty tree - one obtained by calling
402
repository.revision_tree(NULL_REVISION).
405
revision_id = self.get_parent_ids()[0]
407
# no parents, return an empty revision tree.
408
# in the future this should return the tree for
409
# 'empty:' - the implicit root empty tree.
410
return self.branch.repository.revision_tree(
411
_mod_revision.NULL_REVISION)
413
return self.revision_tree(revision_id)
414
except errors.NoSuchRevision:
416
# No cached copy available, retrieve from the repository.
417
# FIXME? RBC 20060403 should we cache the inventory locally
420
return self.branch.repository.revision_tree(revision_id)
421
except (errors.RevisionNotPresent, errors.NoSuchRevision):
422
# the basis tree *may* be a ghost or a low level error may have
423
# occurred. If the revision is present, its a problem, if its not
425
if self.branch.repository.has_revision(revision_id):
427
# the basis tree is a ghost so return an empty tree.
428
return self.branch.repository.revision_tree(
429
_mod_revision.NULL_REVISION)
432
self._flush_ignore_list_cache()
434
def relpath(self, path):
435
"""Return the local path portion from a given path.
437
The path may be absolute or relative. If its a relative path it is
438
interpreted relative to the python current working directory.
440
return osutils.relpath(self.basedir, path)
83
442
def has_filename(self, filename):
84
return os.path.exists(self.abspath(filename))
86
def get_file(self, file_id):
87
return self.get_file_byname(self.id2path(file_id))
89
def get_file_byname(self, filename):
90
return file(self.abspath(filename), 'rb')
443
return osutils.lexists(self.abspath(filename))
445
def get_file(self, file_id, path=None, filtered=True):
446
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
448
def get_file_with_stat(self, file_id, path=None, filtered=True,
450
"""See Tree.get_file_with_stat."""
452
path = self.id2path(file_id)
453
file_obj = self.get_file_byname(path, filtered=False)
454
stat_value = _fstat(file_obj.fileno())
455
if filtered and self.supports_content_filtering():
456
filters = self._content_filter_stack(path)
457
file_obj = filtered_input_file(file_obj, filters)
458
return (file_obj, stat_value)
460
def get_file_text(self, file_id, path=None, filtered=True):
461
return self.get_file(file_id, path=path, filtered=filtered).read()
463
def get_file_byname(self, filename, filtered=True):
464
path = self.abspath(filename)
466
if filtered and self.supports_content_filtering():
467
filters = self._content_filter_stack(filename)
468
return filtered_input_file(f, filters)
472
def get_file_lines(self, file_id, path=None, filtered=True):
473
"""See Tree.get_file_lines()"""
474
file = self.get_file(file_id, path, filtered=filtered)
476
return file.readlines()
481
def annotate_iter(self, file_id, default_revision=CURRENT_REVISION):
482
"""See Tree.annotate_iter
484
This implementation will use the basis tree implementation if possible.
485
Lines not in the basis are attributed to CURRENT_REVISION
487
If there are pending merges, lines added by those merges will be
488
incorrectly attributed to CURRENT_REVISION (but after committing, the
489
attribution will be correct).
491
maybe_file_parent_keys = []
492
for parent_id in self.get_parent_ids():
494
parent_tree = self.revision_tree(parent_id)
495
except errors.NoSuchRevisionInTree:
496
parent_tree = self.branch.repository.revision_tree(parent_id)
497
parent_tree.lock_read()
499
if file_id not in parent_tree:
501
ie = parent_tree.inventory[file_id]
502
if ie.kind != 'file':
503
# Note: this is slightly unnecessary, because symlinks and
504
# directories have a "text" which is the empty text, and we
505
# know that won't mess up annotations. But it seems cleaner
507
parent_text_key = (file_id, ie.revision)
508
if parent_text_key not in maybe_file_parent_keys:
509
maybe_file_parent_keys.append(parent_text_key)
512
graph = _mod_graph.Graph(self.branch.repository.texts)
513
heads = graph.heads(maybe_file_parent_keys)
514
file_parent_keys = []
515
for key in maybe_file_parent_keys:
517
file_parent_keys.append(key)
519
# Now we have the parents of this content
520
annotator = self.branch.repository.texts.get_annotator()
521
text = self.get_file(file_id).read()
522
this_key =(file_id, default_revision)
523
annotator.add_special_text(this_key, file_parent_keys, text)
524
annotations = [(key[-1], line)
525
for key, line in annotator.annotate_flat(this_key)]
528
def _get_ancestors(self, default_revision):
529
ancestors = set([default_revision])
530
for parent_id in self.get_parent_ids():
531
ancestors.update(self.branch.repository.get_ancestry(
532
parent_id, topo_sorted=False))
535
def get_parent_ids(self):
536
"""See Tree.get_parent_ids.
538
This implementation reads the pending merges list and last_revision
539
value and uses that to decide what the parents list should be.
541
last_rev = _mod_revision.ensure_null(self._last_revision())
542
if _mod_revision.NULL_REVISION == last_rev:
547
merges_file = self._transport.get('pending-merges')
548
except errors.NoSuchFile:
551
for l in merges_file.readlines():
552
revision_id = l.rstrip('\n')
553
parents.append(revision_id)
557
def get_root_id(self):
558
"""Return the id of this trees root"""
559
return self._inventory.root.file_id
92
561
def _get_store_filename(self, file_id):
93
## XXX: badly named; this isn't in the store at all
94
return self.abspath(self.id2path(file_id))
562
## XXX: badly named; this is not in the store at all
563
return self.abspath(self.id2path(file_id))
566
def clone(self, to_bzrdir, revision_id=None):
567
"""Duplicate this working tree into to_bzr, including all state.
569
Specifically modified files are kept as modified, but
570
ignored and unknown files are discarded.
572
If you want to make a new line of development, see bzrdir.sprout()
575
If not None, the cloned tree will have its last revision set to
576
revision, and difference between the source trees last revision
577
and this one merged in.
579
# assumes the target bzr dir format is compatible.
580
result = to_bzrdir.create_workingtree()
581
self.copy_content_into(result, revision_id)
585
def copy_content_into(self, tree, revision_id=None):
586
"""Copy the current content and user files of this tree into tree."""
587
tree.set_root_id(self.get_root_id())
588
if revision_id is None:
589
merge.transform_tree(tree, self)
591
# TODO now merge from tree.last_revision to revision (to preserve
592
# user local changes)
593
merge.transform_tree(tree, self)
594
tree.set_parent_ids([revision_id])
596
def id2abspath(self, file_id):
597
return self.abspath(self.id2path(file_id))
97
599
def has_id(self, file_id):
98
600
# files that have been deleted are excluded
100
602
if not inv.has_id(file_id):
102
604
path = inv.id2path(file_id)
103
return os.path.exists(self.abspath(path))
605
return osutils.lexists(self.abspath(path))
607
def has_or_had_id(self, file_id):
608
if file_id == self.inventory.root.file_id:
610
return self.inventory.has_id(file_id)
106
612
__contains__ = has_id
109
614
def get_file_size(self, file_id):
110
# is this still called?
111
raise NotImplementedError()
114
def get_file_sha1(self, file_id):
115
path = self._inventory.id2path(file_id)
116
return self._hashcache.get_sha1(path)
119
def file_class(self, filename):
120
if self.path2id(filename):
122
elif self.is_ignored(filename):
128
def list_files(self):
129
"""Recursively list all files as (path, class, kind, id).
615
"""See Tree.get_file_size"""
616
# XXX: this returns the on-disk size; it should probably return the
619
return os.path.getsize(self.id2abspath(file_id))
621
if e.errno != errno.ENOENT:
627
def get_file_sha1(self, file_id, path=None, stat_value=None):
629
path = self._inventory.id2path(file_id)
630
return self._hashcache.get_sha1(path, stat_value)
632
def get_file_mtime(self, file_id, path=None):
634
path = self.inventory.id2path(file_id)
635
return os.lstat(self.abspath(path)).st_mtime
637
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
638
file_id = self.path2id(path)
640
# For unversioned files on win32, we just assume they are not
643
return self._inventory[file_id].executable
645
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
646
mode = stat_result.st_mode
647
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
649
if not supports_executable():
650
def is_executable(self, file_id, path=None):
651
return self._inventory[file_id].executable
653
_is_executable_from_path_and_stat = \
654
_is_executable_from_path_and_stat_from_basis
656
def is_executable(self, file_id, path=None):
658
path = self.id2path(file_id)
659
mode = os.lstat(self.abspath(path)).st_mode
660
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
662
_is_executable_from_path_and_stat = \
663
_is_executable_from_path_and_stat_from_stat
665
@needs_tree_write_lock
666
def _add(self, files, ids, kinds):
667
"""See MutableTree._add."""
668
# TODO: Re-adding a file that is removed in the working copy
669
# should probably put it back with the previous ID.
670
# the read and write working inventory should not occur in this
671
# function - they should be part of lock_write and unlock.
673
for f, file_id, kind in zip(files, ids, kinds):
675
inv.add_path(f, kind=kind)
677
inv.add_path(f, kind=kind, file_id=file_id)
678
self._inventory_is_modified = True
680
@needs_tree_write_lock
681
def _gather_kinds(self, files, kinds):
682
"""See MutableTree._gather_kinds."""
683
for pos, f in enumerate(files):
684
if kinds[pos] is None:
685
fullpath = normpath(self.abspath(f))
687
kinds[pos] = file_kind(fullpath)
689
if e.errno == errno.ENOENT:
690
raise errors.NoSuchFile(fullpath)
693
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
694
"""Add revision_id as a parent.
696
This is equivalent to retrieving the current list of parent ids
697
and setting the list to its value plus revision_id.
699
:param revision_id: The revision id to add to the parent list. It may
700
be a ghost revision as long as its not the first parent to be added,
701
or the allow_leftmost_as_ghost parameter is set True.
702
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
704
parents = self.get_parent_ids() + [revision_id]
705
self.set_parent_ids(parents, allow_leftmost_as_ghost=len(parents) > 1
706
or allow_leftmost_as_ghost)
708
@needs_tree_write_lock
709
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
710
"""Add revision_id, tree tuple as a parent.
712
This is equivalent to retrieving the current list of parent trees
713
and setting the list to its value plus parent_tuple. See also
714
add_parent_tree_id - if you only have a parent id available it will be
715
simpler to use that api. If you have the parent already available, using
716
this api is preferred.
718
:param parent_tuple: The (revision id, tree) to add to the parent list.
719
If the revision_id is a ghost, pass None for the tree.
720
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
722
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
723
if len(parent_ids) > 1:
724
# the leftmost may have already been a ghost, preserve that if it
726
allow_leftmost_as_ghost = True
727
self.set_parent_ids(parent_ids,
728
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
730
@needs_tree_write_lock
731
def add_pending_merge(self, *revision_ids):
732
# TODO: Perhaps should check at this point that the
733
# history of the revision is actually present?
734
parents = self.get_parent_ids()
736
for rev_id in revision_ids:
737
if rev_id in parents:
739
parents.append(rev_id)
742
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
744
def path_content_summary(self, path, _lstat=os.lstat,
745
_mapper=osutils.file_kind_from_stat_mode):
746
"""See Tree.path_content_summary."""
747
abspath = self.abspath(path)
749
stat_result = _lstat(abspath)
751
if getattr(e, 'errno', None) == errno.ENOENT:
753
return ('missing', None, None, None)
754
# propagate other errors
756
kind = _mapper(stat_result.st_mode)
758
return self._file_content_summary(path, stat_result)
759
elif kind == 'directory':
760
# perhaps it looks like a plain directory, but it's really a
762
if self._directory_is_tree_reference(path):
763
kind = 'tree-reference'
764
return kind, None, None, None
765
elif kind == 'symlink':
766
target = osutils.readlink(abspath)
767
return ('symlink', None, None, target)
769
return (kind, None, None, None)
771
def _file_content_summary(self, path, stat_result):
772
size = stat_result.st_size
773
executable = self._is_executable_from_path_and_stat(path, stat_result)
774
# try for a stat cache lookup
775
return ('file', size, executable, self._sha_from_stat(
778
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
779
"""Common ghost checking functionality from set_parent_*.
781
This checks that the left hand-parent exists if there are any
784
if len(revision_ids) > 0:
785
leftmost_id = revision_ids[0]
786
if (not allow_leftmost_as_ghost and not
787
self.branch.repository.has_revision(leftmost_id)):
788
raise errors.GhostRevisionUnusableHere(leftmost_id)
790
def _set_merges_from_parent_ids(self, parent_ids):
791
merges = parent_ids[1:]
792
self._transport.put_bytes('pending-merges', '\n'.join(merges),
793
mode=self.bzrdir._get_file_mode())
795
def _filter_parent_ids_by_ancestry(self, revision_ids):
796
"""Check that all merged revisions are proper 'heads'.
798
This will always return the first revision_id, and any merged revisions
801
if len(revision_ids) == 0:
803
graph = self.branch.repository.get_graph()
804
heads = graph.heads(revision_ids)
805
new_revision_ids = revision_ids[:1]
806
for revision_id in revision_ids[1:]:
807
if revision_id in heads and revision_id not in new_revision_ids:
808
new_revision_ids.append(revision_id)
809
if new_revision_ids != revision_ids:
810
trace.mutter('requested to set revision_ids = %s,'
811
' but filtered to %s', revision_ids, new_revision_ids)
812
return new_revision_ids
814
@needs_tree_write_lock
815
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
816
"""Set the parent ids to revision_ids.
818
See also set_parent_trees. This api will try to retrieve the tree data
819
for each element of revision_ids from the trees repository. If you have
820
tree data already available, it is more efficient to use
821
set_parent_trees rather than set_parent_ids. set_parent_ids is however
822
an easier API to use.
824
:param revision_ids: The revision_ids to set as the parent ids of this
825
working tree. Any of these may be ghosts.
827
self._check_parents_for_ghosts(revision_ids,
828
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
829
for revision_id in revision_ids:
830
_mod_revision.check_not_reserved_id(revision_id)
832
revision_ids = self._filter_parent_ids_by_ancestry(revision_ids)
834
if len(revision_ids) > 0:
835
self.set_last_revision(revision_ids[0])
837
self.set_last_revision(_mod_revision.NULL_REVISION)
839
self._set_merges_from_parent_ids(revision_ids)
841
@needs_tree_write_lock
842
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
843
"""See MutableTree.set_parent_trees."""
844
parent_ids = [rev for (rev, tree) in parents_list]
845
for revision_id in parent_ids:
846
_mod_revision.check_not_reserved_id(revision_id)
848
self._check_parents_for_ghosts(parent_ids,
849
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
851
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
853
if len(parent_ids) == 0:
854
leftmost_parent_id = _mod_revision.NULL_REVISION
855
leftmost_parent_tree = None
857
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
859
if self._change_last_revision(leftmost_parent_id):
860
if leftmost_parent_tree is None:
861
# If we don't have a tree, fall back to reading the
862
# parent tree from the repository.
863
self._cache_basis_inventory(leftmost_parent_id)
865
inv = leftmost_parent_tree.inventory
866
xml = self._create_basis_xml_from_inventory(
867
leftmost_parent_id, inv)
868
self._write_basis_inventory(xml)
869
self._set_merges_from_parent_ids(parent_ids)
871
@needs_tree_write_lock
872
def set_pending_merges(self, rev_list):
873
parents = self.get_parent_ids()
874
leftmost = parents[:1]
875
new_parents = leftmost + rev_list
876
self.set_parent_ids(new_parents)
878
@needs_tree_write_lock
879
def set_merge_modified(self, modified_hashes):
881
for file_id, hash in modified_hashes.iteritems():
882
yield Stanza(file_id=file_id.decode('utf8'), hash=hash)
883
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
885
def _sha_from_stat(self, path, stat_result):
886
"""Get a sha digest from the tree's stat cache.
888
The default implementation assumes no stat cache is present.
890
:param path: The path.
891
:param stat_result: The stat result being looked up.
895
def _put_rio(self, filename, stanzas, header):
896
self._must_be_locked()
897
my_file = rio_file(stanzas, header)
898
self._transport.put_file(filename, my_file,
899
mode=self.bzrdir._get_file_mode())
901
@needs_write_lock # because merge pulls data into the branch.
902
def merge_from_branch(self, branch, to_revision=None, from_revision=None,
903
merge_type=None, force=False):
904
"""Merge from a branch into this working tree.
906
:param branch: The branch to merge from.
907
:param to_revision: If non-None, the merge will merge to to_revision,
908
but not beyond it. to_revision does not need to be in the history
909
of the branch when it is supplied. If None, to_revision defaults to
910
branch.last_revision().
912
from bzrlib.merge import Merger, Merge3Merger
913
pb = ui.ui_factory.nested_progress_bar()
915
merger = Merger(self.branch, this_tree=self, pb=pb)
916
merger.pp = ProgressPhase("Merge phase", 5, pb)
917
merger.pp.next_phase()
918
# check that there are no local alterations
919
if not force and self.has_changes():
920
raise errors.UncommittedChanges(self)
921
if to_revision is None:
922
to_revision = _mod_revision.ensure_null(branch.last_revision())
923
merger.other_rev_id = to_revision
924
if _mod_revision.is_null(merger.other_rev_id):
925
raise errors.NoCommits(branch)
926
self.branch.fetch(branch, last_revision=merger.other_rev_id)
927
merger.other_basis = merger.other_rev_id
928
merger.other_tree = self.branch.repository.revision_tree(
930
merger.other_branch = branch
931
merger.pp.next_phase()
932
if from_revision is None:
935
merger.set_base_revision(from_revision, branch)
936
if merger.base_rev_id == merger.other_rev_id:
937
raise errors.PointlessMerge
938
merger.backup_files = False
939
if merge_type is None:
940
merger.merge_type = Merge3Merger
942
merger.merge_type = merge_type
943
merger.set_interesting_files(None)
944
merger.show_base = False
945
merger.reprocess = False
946
conflicts = merger.do_merge()
953
def merge_modified(self):
954
"""Return a dictionary of files modified by a merge.
956
The list is initialized by WorkingTree.set_merge_modified, which is
957
typically called after we make some automatic updates to the tree
960
This returns a map of file_id->sha1, containing only files which are
961
still in the working inventory and have that text hash.
964
hashfile = self._transport.get('merge-hashes')
965
except errors.NoSuchFile:
970
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
971
raise errors.MergeModifiedFormatError()
972
except StopIteration:
973
raise errors.MergeModifiedFormatError()
974
for s in RioReader(hashfile):
975
# RioReader reads in Unicode, so convert file_ids back to utf8
976
file_id = osutils.safe_file_id(s.get("file_id"), warn=False)
977
if file_id not in self.inventory:
979
text_hash = s.get("hash")
980
if text_hash == self.get_file_sha1(file_id):
981
merge_hashes[file_id] = text_hash
987
def mkdir(self, path, file_id=None):
988
"""See MutableTree.mkdir()."""
990
file_id = generate_ids.gen_file_id(os.path.basename(path))
991
os.mkdir(self.abspath(path))
992
self.add(path, file_id, 'directory')
995
def get_symlink_target(self, file_id):
996
abspath = self.id2abspath(file_id)
997
target = osutils.readlink(abspath)
1001
def subsume(self, other_tree):
1002
def add_children(inventory, entry):
1003
for child_entry in entry.children.values():
1004
inventory._byid[child_entry.file_id] = child_entry
1005
if child_entry.kind == 'directory':
1006
add_children(inventory, child_entry)
1007
if other_tree.get_root_id() == self.get_root_id():
1008
raise errors.BadSubsumeSource(self, other_tree,
1009
'Trees have the same root')
1011
other_tree_path = self.relpath(other_tree.basedir)
1012
except errors.PathNotChild:
1013
raise errors.BadSubsumeSource(self, other_tree,
1014
'Tree is not contained by the other')
1015
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
1016
if new_root_parent is None:
1017
raise errors.BadSubsumeSource(self, other_tree,
1018
'Parent directory is not versioned.')
1019
# We need to ensure that the result of a fetch will have a
1020
# versionedfile for the other_tree root, and only fetching into
1021
# RepositoryKnit2 guarantees that.
1022
if not self.branch.repository.supports_rich_root():
1023
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
1024
other_tree.lock_tree_write()
1026
new_parents = other_tree.get_parent_ids()
1027
other_root = other_tree.inventory.root
1028
other_root.parent_id = new_root_parent
1029
other_root.name = osutils.basename(other_tree_path)
1030
self.inventory.add(other_root)
1031
add_children(self.inventory, other_root)
1032
self._write_inventory(self.inventory)
1033
# normally we don't want to fetch whole repositories, but i think
1034
# here we really do want to consolidate the whole thing.
1035
for parent_id in other_tree.get_parent_ids():
1036
self.branch.fetch(other_tree.branch, parent_id)
1037
self.add_parent_tree_id(parent_id)
1040
other_tree.bzrdir.retire_bzrdir()
1042
def _setup_directory_is_tree_reference(self):
1043
if self._branch.repository._format.supports_tree_reference:
1044
self._directory_is_tree_reference = \
1045
self._directory_may_be_tree_reference
1047
self._directory_is_tree_reference = \
1048
self._directory_is_never_tree_reference
1050
def _directory_is_never_tree_reference(self, relpath):
1053
def _directory_may_be_tree_reference(self, relpath):
1054
# as a special case, if a directory contains control files then
1055
# it's a tree reference, except that the root of the tree is not
1056
return relpath and osutils.isdir(self.abspath(relpath) + u"/.bzr")
1057
# TODO: We could ask all the control formats whether they
1058
# recognize this directory, but at the moment there's no cheap api
1059
# to do that. Since we probably can only nest bzr checkouts and
1060
# they always use this name it's ok for now. -- mbp 20060306
1062
# FIXME: There is an unhandled case here of a subdirectory
1063
# containing .bzr but not a branch; that will probably blow up
1064
# when you try to commit it. It might happen if there is a
1065
# checkout in a subdirectory. This can be avoided by not adding
1068
@needs_tree_write_lock
1069
def extract(self, file_id, format=None):
1070
"""Extract a subtree from this tree.
1072
A new branch will be created, relative to the path for this tree.
1076
segments = osutils.splitpath(path)
1077
transport = self.branch.bzrdir.root_transport
1078
for name in segments:
1079
transport = transport.clone(name)
1080
transport.ensure_base()
1083
sub_path = self.id2path(file_id)
1084
branch_transport = mkdirs(sub_path)
1086
format = self.bzrdir.cloning_metadir()
1087
branch_transport.ensure_base()
1088
branch_bzrdir = format.initialize_on_transport(branch_transport)
1090
repo = branch_bzrdir.find_repository()
1091
except errors.NoRepositoryPresent:
1092
repo = branch_bzrdir.create_repository()
1093
if not repo.supports_rich_root():
1094
raise errors.RootNotRich()
1095
new_branch = branch_bzrdir.create_branch()
1096
new_branch.pull(self.branch)
1097
for parent_id in self.get_parent_ids():
1098
new_branch.fetch(self.branch, parent_id)
1099
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1100
if tree_transport.base != branch_transport.base:
1101
tree_bzrdir = format.initialize_on_transport(tree_transport)
1102
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1104
tree_bzrdir = branch_bzrdir
1105
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1106
wt.set_parent_ids(self.get_parent_ids())
1107
my_inv = self.inventory
1108
child_inv = inventory.Inventory(root_id=None)
1109
new_root = my_inv[file_id]
1110
my_inv.remove_recursive_id(file_id)
1111
new_root.parent_id = None
1112
child_inv.add(new_root)
1113
self._write_inventory(my_inv)
1114
wt._write_inventory(child_inv)
1117
def _serialize(self, inventory, out_file):
1118
xml5.serializer_v5.write_inventory(self._inventory, out_file,
1121
def _deserialize(selt, in_file):
1122
return xml5.serializer_v5.read_inventory(in_file)
1125
"""Write the in memory inventory to disk."""
1126
# TODO: Maybe this should only write on dirty ?
1127
if self._control_files._lock_mode != 'w':
1128
raise errors.NotWriteLocked(self)
1130
self._serialize(self._inventory, sio)
1132
self._transport.put_file('inventory', sio,
1133
mode=self.bzrdir._get_file_mode())
1134
self._inventory_is_modified = False
1136
def _kind(self, relpath):
1137
return osutils.file_kind(self.abspath(relpath))
1139
def list_files(self, include_root=False, from_dir=None, recursive=True):
1140
"""List all files as (path, class, kind, id, entry).
131
1142
Lists, but does not descend into unversioned directories.
133
1143
This does not include files that have been deleted in this
1144
tree. Skips the control directory.
136
Skips the control directory.
1146
:param include_root: if True, do not return an entry for the root
1147
:param from_dir: start from this directory or None for the root
1148
:param recursive: whether to recurse into subdirectories or not
138
from osutils import appendpath, file_kind
141
inv = self._inventory
143
def descend(from_dir_relpath, from_dir_id, dp):
1150
# list_files is an iterator, so @needs_read_lock doesn't work properly
1151
# with it. So callers should be careful to always read_lock the tree.
1152
if not self.is_locked():
1153
raise errors.ObjectNotLocked(self)
1155
inv = self.inventory
1156
if from_dir is None and include_root is True:
1157
yield ('', 'V', 'directory', inv.root.file_id, inv.root)
1158
# Convert these into local objects to save lookup times
1159
pathjoin = osutils.pathjoin
1160
file_kind = self._kind
1162
# transport.base ends in a slash, we want the piece
1163
# between the last two slashes
1164
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
1166
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1168
# directory file_id, relative path, absolute path, reverse sorted children
1169
if from_dir is not None:
1170
from_dir_id = inv.path2id(from_dir)
1171
if from_dir_id is None:
1172
# Directory not versioned
1174
from_dir_abspath = pathjoin(self.basedir, from_dir)
1176
from_dir_id = inv.root.file_id
1177
from_dir_abspath = self.basedir
1178
children = os.listdir(from_dir_abspath)
1180
# jam 20060527 The kernel sized tree seems equivalent whether we
1181
# use a deque and popleft to keep them sorted, or if we use a plain
1182
# list and just reverse() them.
1183
children = collections.deque(children)
1184
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1186
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1189
f = children.popleft()
147
1190
## TODO: If we find a subdirectory with its own .bzr
148
1191
## directory, then that is a separate tree and we
149
1192
## should exclude it.
150
if bzrlib.BZRDIR == f:
1194
# the bzrdir for this tree
1195
if transport_base_dir == f:
154
fp = appendpath(from_dir_relpath, f)
1198
# we know that from_dir_relpath and from_dir_abspath never end in a slash
1199
# and 'f' doesn't begin with one, we can do a string op, rather
1200
# than the checks of pathjoin(), all relative paths will have an extra slash
1202
fp = from_dir_relpath + '/' + f
157
fap = appendpath(dp, f)
1205
fap = from_dir_abspath + '/' + f
159
1207
f_ie = inv.get_child(from_dir_id, f)
162
elif self.is_ignored(fp):
1210
elif self.is_ignored(fp[1:]):
1213
# we may not have found this file, because of a unicode issue
1214
f_norm, can_access = osutils.normalized_filename(f)
1215
if f == f_norm or not can_access:
1216
# No change, so treat this file normally
1219
# this file can be accessed by a normalized path
1220
# check again if it is versioned
1221
# these lines are repeated here for performance
1223
fp = from_dir_relpath + '/' + f
1224
fap = from_dir_abspath + '/' + f
1225
f_ie = inv.get_child(from_dir_id, f)
1228
elif self.is_ignored(fp[1:]):
167
1233
fk = file_kind(fap)
1235
# make a last minute entry
171
raise BzrCheckError("file %r entered as kind %r id %r, "
173
% (fap, f_ie.kind, f_ie.file_id, fk))
175
yield fp, c, fk, (f_ie and f_ie.file_id)
1237
yield fp[1:], c, fk, f_ie.file_id, f_ie
1240
yield fp[1:], c, fk, None, fk_entries[fk]()
1242
yield fp[1:], c, fk, None, TreeEntry()
177
1245
if fk != 'directory':
181
# don't descend unversioned directories
184
for ff in descend(fp, f_ie.file_id, fap):
187
for f in descend('', inv.root.file_id, self.basedir):
1248
# But do this child first if recursing down
1250
new_children = os.listdir(fap)
1252
new_children = collections.deque(new_children)
1253
stack.append((f_ie.file_id, fp, fap, new_children))
1254
# Break out of inner loop,
1255
# so that we start outer loop with child
1258
# if we finished all children, pop it off the stack
1261
@needs_tree_write_lock
1262
def move(self, from_paths, to_dir=None, after=False, **kwargs):
1265
to_dir must exist in the inventory.
1267
If to_dir exists and is a directory, the files are moved into
1268
it, keeping their old names.
1270
Note that to_dir is only the last component of the new name;
1271
this doesn't change the directory.
1273
For each entry in from_paths the move mode will be determined
1276
The first mode moves the file in the filesystem and updates the
1277
inventory. The second mode only updates the inventory without
1278
touching the file on the filesystem. This is the new mode introduced
1281
move uses the second mode if 'after == True' and the target is not
1282
versioned but present in the working tree.
1284
move uses the second mode if 'after == False' and the source is
1285
versioned but no longer in the working tree, and the target is not
1286
versioned but present in the working tree.
1288
move uses the first mode if 'after == False' and the source is
1289
versioned and present in the working tree, and the target is not
1290
versioned and not present in the working tree.
1292
Everything else results in an error.
1294
This returns a list of (from_path, to_path) pairs for each
1295
entry that is moved.
1300
# check for deprecated use of signature
1302
to_dir = kwargs.get('to_name', None)
1304
raise TypeError('You must supply a target directory')
1306
symbol_versioning.warn('The parameter to_name was deprecated'
1307
' in version 0.13. Use to_dir instead',
1310
# check destination directory
1311
if isinstance(from_paths, basestring):
1313
inv = self.inventory
1314
to_abs = self.abspath(to_dir)
1315
if not isdir(to_abs):
1316
raise errors.BzrMoveFailedError('',to_dir,
1317
errors.NotADirectory(to_abs))
1318
if not self.has_filename(to_dir):
1319
raise errors.BzrMoveFailedError('',to_dir,
1320
errors.NotInWorkingDirectory(to_dir))
1321
to_dir_id = inv.path2id(to_dir)
1322
if to_dir_id is None:
1323
raise errors.BzrMoveFailedError('',to_dir,
1324
errors.NotVersionedError(path=str(to_dir)))
1326
to_dir_ie = inv[to_dir_id]
1327
if to_dir_ie.kind != 'directory':
1328
raise errors.BzrMoveFailedError('',to_dir,
1329
errors.NotADirectory(to_abs))
1331
# create rename entries and tuples
1332
for from_rel in from_paths:
1333
from_tail = splitpath(from_rel)[-1]
1334
from_id = inv.path2id(from_rel)
1336
raise errors.BzrMoveFailedError(from_rel,to_dir,
1337
errors.NotVersionedError(path=str(from_rel)))
1339
from_entry = inv[from_id]
1340
from_parent_id = from_entry.parent_id
1341
to_rel = pathjoin(to_dir, from_tail)
1342
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1344
from_tail=from_tail,
1345
from_parent_id=from_parent_id,
1346
to_rel=to_rel, to_tail=from_tail,
1347
to_parent_id=to_dir_id)
1348
rename_entries.append(rename_entry)
1349
rename_tuples.append((from_rel, to_rel))
1351
# determine which move mode to use. checks also for movability
1352
rename_entries = self._determine_mv_mode(rename_entries, after)
1354
original_modified = self._inventory_is_modified
1357
self._inventory_is_modified = True
1358
self._move(rename_entries)
1360
# restore the inventory on error
1361
self._inventory_is_modified = original_modified
1363
self._write_inventory(inv)
1364
return rename_tuples
1366
def _determine_mv_mode(self, rename_entries, after=False):
1367
"""Determines for each from-to pair if both inventory and working tree
1368
or only the inventory has to be changed.
1370
Also does basic plausability tests.
1372
inv = self.inventory
1374
for rename_entry in rename_entries:
1375
# store to local variables for easier reference
1376
from_rel = rename_entry.from_rel
1377
from_id = rename_entry.from_id
1378
to_rel = rename_entry.to_rel
1379
to_id = inv.path2id(to_rel)
1380
only_change_inv = False
1382
# check the inventory for source and destination
1384
raise errors.BzrMoveFailedError(from_rel,to_rel,
1385
errors.NotVersionedError(path=str(from_rel)))
1386
if to_id is not None:
1387
raise errors.BzrMoveFailedError(from_rel,to_rel,
1388
errors.AlreadyVersionedError(path=str(to_rel)))
1390
# try to determine the mode for rename (only change inv or change
1391
# inv and file system)
1393
if not self.has_filename(to_rel):
1394
raise errors.BzrMoveFailedError(from_id,to_rel,
1395
errors.NoSuchFile(path=str(to_rel),
1396
extra="New file has not been created yet"))
1397
only_change_inv = True
1398
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1399
only_change_inv = True
1400
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1401
only_change_inv = False
1402
elif (not self.case_sensitive
1403
and from_rel.lower() == to_rel.lower()
1404
and self.has_filename(from_rel)):
1405
only_change_inv = False
1407
# something is wrong, so lets determine what exactly
1408
if not self.has_filename(from_rel) and \
1409
not self.has_filename(to_rel):
1410
raise errors.BzrRenameFailedError(from_rel,to_rel,
1411
errors.PathsDoNotExist(paths=(str(from_rel),
1414
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1415
rename_entry.only_change_inv = only_change_inv
1416
return rename_entries
1418
def _move(self, rename_entries):
1419
"""Moves a list of files.
1421
Depending on the value of the flag 'only_change_inv', the
1422
file will be moved on the file system or not.
1424
inv = self.inventory
1427
for entry in rename_entries:
1429
self._move_entry(entry)
1431
self._rollback_move(moved)
1435
def _rollback_move(self, moved):
1436
"""Try to rollback a previous move in case of an filesystem error."""
1437
inv = self.inventory
1440
self._move_entry(WorkingTree._RenameEntry(
1441
entry.to_rel, entry.from_id,
1442
entry.to_tail, entry.to_parent_id, entry.from_rel,
1443
entry.from_tail, entry.from_parent_id,
1444
entry.only_change_inv))
1445
except errors.BzrMoveFailedError, e:
1446
raise errors.BzrMoveFailedError( '', '', "Rollback failed."
1447
" The working tree is in an inconsistent state."
1448
" Please consider doing a 'bzr revert'."
1449
" Error message is: %s" % e)
1451
def _move_entry(self, entry):
1452
inv = self.inventory
1453
from_rel_abs = self.abspath(entry.from_rel)
1454
to_rel_abs = self.abspath(entry.to_rel)
1455
if from_rel_abs == to_rel_abs:
1456
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1457
"Source and target are identical.")
1459
if not entry.only_change_inv:
1461
osutils.rename(from_rel_abs, to_rel_abs)
1463
raise errors.BzrMoveFailedError(entry.from_rel,
1465
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1467
@needs_tree_write_lock
1468
def rename_one(self, from_rel, to_rel, after=False):
1471
This can change the directory or the filename or both.
1473
rename_one has several 'modes' to work. First, it can rename a physical
1474
file and change the file_id. That is the normal mode. Second, it can
1475
only change the file_id without touching any physical file. This is
1476
the new mode introduced in version 0.15.
1478
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1479
versioned but present in the working tree.
1481
rename_one uses the second mode if 'after == False' and 'from_rel' is
1482
versioned but no longer in the working tree, and 'to_rel' is not
1483
versioned but present in the working tree.
1485
rename_one uses the first mode if 'after == False' and 'from_rel' is
1486
versioned and present in the working tree, and 'to_rel' is not
1487
versioned and not present in the working tree.
1489
Everything else results in an error.
1491
inv = self.inventory
1494
# create rename entries and tuples
1495
from_tail = splitpath(from_rel)[-1]
1496
from_id = inv.path2id(from_rel)
1498
# if file is missing in the inventory maybe it's in the basis_tree
1499
basis_tree = self.branch.basis_tree()
1500
from_id = basis_tree.path2id(from_rel)
1502
raise errors.BzrRenameFailedError(from_rel,to_rel,
1503
errors.NotVersionedError(path=str(from_rel)))
1504
# put entry back in the inventory so we can rename it
1505
from_entry = basis_tree.inventory[from_id].copy()
1508
from_entry = inv[from_id]
1509
from_parent_id = from_entry.parent_id
1510
to_dir, to_tail = os.path.split(to_rel)
1511
to_dir_id = inv.path2id(to_dir)
1512
rename_entry = WorkingTree._RenameEntry(from_rel=from_rel,
1514
from_tail=from_tail,
1515
from_parent_id=from_parent_id,
1516
to_rel=to_rel, to_tail=to_tail,
1517
to_parent_id=to_dir_id)
1518
rename_entries.append(rename_entry)
1520
# determine which move mode to use. checks also for movability
1521
rename_entries = self._determine_mv_mode(rename_entries, after)
1523
# check if the target changed directory and if the target directory is
1525
if to_dir_id is None:
1526
raise errors.BzrMoveFailedError(from_rel,to_rel,
1527
errors.NotVersionedError(path=str(to_dir)))
1529
# all checks done. now we can continue with our actual work
1530
mutter('rename_one:\n'
1535
' to_dir_id {%s}\n',
1536
from_id, from_rel, to_rel, to_dir, to_dir_id)
1538
self._move(rename_entries)
1539
self._write_inventory(inv)
1541
class _RenameEntry(object):
1542
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1543
to_rel, to_tail, to_parent_id, only_change_inv=False):
1544
self.from_rel = from_rel
1545
self.from_id = from_id
1546
self.from_tail = from_tail
1547
self.from_parent_id = from_parent_id
1548
self.to_rel = to_rel
1549
self.to_tail = to_tail
1550
self.to_parent_id = to_parent_id
1551
self.only_change_inv = only_change_inv
192
1554
def unknowns(self):
193
for subp in self.extras():
194
if not self.is_ignored(subp):
1555
"""Return all unknown files.
1557
These are files in the working directory that are not versioned or
1558
control files or ignored.
1560
# force the extras method to be fully executed before returning, to
1561
# prevent race conditions with the lock
1563
[subp for subp in self.extras() if not self.is_ignored(subp)])
1565
@needs_tree_write_lock
1566
def unversion(self, file_ids):
1567
"""Remove the file ids in file_ids from the current versioned set.
1569
When a file_id is unversioned, all of its children are automatically
1572
:param file_ids: The file ids to stop versioning.
1573
:raises: NoSuchId if any fileid is not currently versioned.
1575
for file_id in file_ids:
1576
if file_id not in self._inventory:
1577
raise errors.NoSuchId(self, file_id)
1578
for file_id in file_ids:
1579
if self._inventory.has_id(file_id):
1580
self._inventory.remove_recursive_id(file_id)
1582
# in the future this should just set a dirty bit to wait for the
1583
# final unlock. However, until all methods of workingtree start
1584
# with the current in -memory inventory rather than triggering
1585
# a read, it is more complex - we need to teach read_inventory
1586
# to know when to read, and when to not read first... and possibly
1587
# to save first when the in memory one may be corrupted.
1588
# so for now, we just only write it if it is indeed dirty.
1590
self._write_inventory(self._inventory)
1592
def _iter_conflicts(self):
1594
for info in self.list_files():
1596
stem = get_conflicted_stem(path)
1599
if stem not in conflicted:
1600
conflicted.add(stem)
1604
def pull(self, source, overwrite=False, stop_revision=None,
1605
change_reporter=None, possible_transports=None, local=False):
1606
top_pb = ui.ui_factory.nested_progress_bar()
1609
pp = ProgressPhase("Pull phase", 2, top_pb)
1611
old_revision_info = self.branch.last_revision_info()
1612
basis_tree = self.basis_tree()
1613
count = self.branch.pull(source, overwrite, stop_revision,
1614
possible_transports=possible_transports,
1616
new_revision_info = self.branch.last_revision_info()
1617
if new_revision_info != old_revision_info:
1619
repository = self.branch.repository
1620
pb = ui.ui_factory.nested_progress_bar()
1621
basis_tree.lock_read()
1623
new_basis_tree = self.branch.basis_tree()
1630
change_reporter=change_reporter)
1631
if (basis_tree.inventory.root is None and
1632
new_basis_tree.inventory.root is not None):
1633
self.set_root_id(new_basis_tree.get_root_id())
1637
# TODO - dedup parents list with things merged by pull ?
1638
# reuse the revisiontree we merged against to set the new
1640
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1641
# we have to pull the merge trees out again, because
1642
# merge_inner has set the ids. - this corner is not yet
1643
# layered well enough to prevent double handling.
1644
# XXX TODO: Fix the double handling: telling the tree about
1645
# the already known parent data is wasteful.
1646
merges = self.get_parent_ids()[1:]
1647
parent_trees.extend([
1648
(parent, repository.revision_tree(parent)) for
1650
self.set_parent_trees(parent_trees)
1657
def put_file_bytes_non_atomic(self, file_id, bytes):
1658
"""See MutableTree.put_file_bytes_non_atomic."""
1659
stream = file(self.id2abspath(file_id), 'wb')
1664
# TODO: update the hashcache here ?
198
1666
def extras(self):
199
"""Yield all unknown files in this WorkingTree.
1667
"""Yield all unversioned files in this WorkingTree.
201
If there are any unknown directories then only the directory is
202
returned, not all its children. But if there are unknown files
1669
If there are any unversioned directories then only the directory is
1670
returned, not all its children. But if there are unversioned files
203
1671
under a versioned subdirectory, they are returned.
205
1673
Currently returned depth-first, sorted by name within directories.
1674
This is the same order used by 'osutils.walkdirs'.
207
1676
## TODO: Work from given directory downwards
208
from osutils import isdir, appendpath
210
1677
for path, dir_entry in self.inventory.directories():
211
mutter("search for unknowns in %r" % path)
1678
# mutter("search for unknowns in %r", path)
212
1679
dirabs = self.abspath(path)
213
1680
if not isdir(dirabs):
214
1681
# e.g. directory deleted
259
1747
If the file is ignored, returns the pattern which caused it to
260
1748
be ignored, otherwise None. So this can simply be used as a
261
1749
boolean if desired."""
263
# TODO: Use '**' to match directories, and other extended
264
# globbing stuff from cvs/rsync.
266
# XXX: fnmatch is actually not quite what we want: it's only
267
# approximately the same as real Unix fnmatch, and doesn't
268
# treat dotfiles correctly and allows * to match /.
269
# Eventually it should be replaced with something more
273
from osutils import splitpath
275
for pat in self.get_ignore_list():
276
if '/' in pat or '\\' in pat:
278
# as a special case, you can put ./ at the start of a
279
# pattern; this is good to match in the top-level
282
if (pat[:2] == './') or (pat[:2] == '.\\'):
1750
if getattr(self, '_ignoreglobster', None) is None:
1751
self._ignoreglobster = globbing.Globster(self.get_ignore_list())
1752
return self._ignoreglobster.match(filename)
1754
def kind(self, file_id):
1755
return file_kind(self.id2abspath(file_id))
1757
def stored_kind(self, file_id):
1758
"""See Tree.stored_kind"""
1759
return self.inventory[file_id].kind
1761
def _comparison_data(self, entry, path):
1762
abspath = self.abspath(path)
1764
stat_value = os.lstat(abspath)
1766
if getattr(e, 'errno', None) == errno.ENOENT:
1773
mode = stat_value.st_mode
1774
kind = osutils.file_kind_from_stat_mode(mode)
1775
if not supports_executable():
1776
executable = entry is not None and entry.executable
1778
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1779
return kind, executable, stat_value
1781
def _file_size(self, entry, stat_value):
1782
return stat_value.st_size
1784
def last_revision(self):
1785
"""Return the last revision of the branch for this tree.
1787
This format tree does not support a separate marker for last-revision
1788
compared to the branch.
1790
See MutableTree.last_revision
1792
return self._last_revision()
1795
def _last_revision(self):
1796
"""helper for get_parent_ids."""
1797
return _mod_revision.ensure_null(self.branch.last_revision())
1799
def is_locked(self):
1800
return self._control_files.is_locked()
1802
def _must_be_locked(self):
1803
if not self.is_locked():
1804
raise errors.ObjectNotLocked(self)
1806
def lock_read(self):
1807
"""See Branch.lock_read, and WorkingTree.unlock."""
1808
if not self.is_locked():
1810
self.branch.lock_read()
1812
return self._control_files.lock_read()
1814
self.branch.unlock()
1817
def lock_tree_write(self):
1818
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1819
if not self.is_locked():
1821
self.branch.lock_read()
1823
return self._control_files.lock_write()
1825
self.branch.unlock()
1828
def lock_write(self):
1829
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1830
if not self.is_locked():
1832
self.branch.lock_write()
1834
return self._control_files.lock_write()
1836
self.branch.unlock()
1839
def get_physical_lock_status(self):
1840
return self._control_files.get_physical_lock_status()
1842
def _basis_inventory_name(self):
1843
return 'basis-inventory-cache'
1845
def _reset_data(self):
1846
"""Reset transient data that cannot be revalidated."""
1847
self._inventory_is_modified = False
1848
result = self._deserialize(self._transport.get('inventory'))
1849
self._set_inventory(result, dirty=False)
1851
@needs_tree_write_lock
1852
def set_last_revision(self, new_revision):
1853
"""Change the last revision in the working tree."""
1854
if self._change_last_revision(new_revision):
1855
self._cache_basis_inventory(new_revision)
1857
def _change_last_revision(self, new_revision):
1858
"""Template method part of set_last_revision to perform the change.
1860
This is used to allow WorkingTree3 instances to not affect branch
1861
when their last revision is set.
1863
if _mod_revision.is_null(new_revision):
1864
self.branch.set_revision_history([])
1867
self.branch.generate_revision_history(new_revision)
1868
except errors.NoSuchRevision:
1869
# not present in the repo - dont try to set it deeper than the tip
1870
self.branch.set_revision_history([new_revision])
1873
def _write_basis_inventory(self, xml):
1874
"""Write the basis inventory XML to the basis-inventory file"""
1875
path = self._basis_inventory_name()
1877
self._transport.put_file(path, sio,
1878
mode=self.bzrdir._get_file_mode())
1880
def _create_basis_xml_from_inventory(self, revision_id, inventory):
1881
"""Create the text that will be saved in basis-inventory"""
1882
inventory.revision_id = revision_id
1883
return xml7.serializer_v7.write_inventory_to_string(inventory)
1885
def _cache_basis_inventory(self, new_revision):
1886
"""Cache new_revision as the basis inventory."""
1887
# TODO: this should allow the ready-to-use inventory to be passed in,
1888
# as commit already has that ready-to-use [while the format is the
1891
# this double handles the inventory - unpack and repack -
1892
# but is easier to understand. We can/should put a conditional
1893
# in here based on whether the inventory is in the latest format
1894
# - perhaps we should repack all inventories on a repository
1896
# the fast path is to copy the raw xml from the repository. If the
1897
# xml contains 'revision_id="', then we assume the right
1898
# revision_id is set. We must check for this full string, because a
1899
# root node id can legitimately look like 'revision_id' but cannot
1901
xml = self.branch.repository.get_inventory_xml(new_revision)
1902
firstline = xml.split('\n', 1)[0]
1903
if (not 'revision_id="' in firstline or
1904
'format="7"' not in firstline):
1905
inv = self.branch.repository._serializer.read_inventory_from_string(
1907
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1908
self._write_basis_inventory(xml)
1909
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1912
def read_basis_inventory(self):
1913
"""Read the cached basis inventory."""
1914
path = self._basis_inventory_name()
1915
return self._transport.get_bytes(path)
1918
def read_working_inventory(self):
1919
"""Read the working inventory.
1921
:raises errors.InventoryModified: read_working_inventory will fail
1922
when the current in memory inventory has been modified.
1924
# conceptually this should be an implementation detail of the tree.
1925
# XXX: Deprecate this.
1926
# ElementTree does its own conversion from UTF-8, so open in
1928
if self._inventory_is_modified:
1929
raise errors.InventoryModified(self)
1930
result = self._deserialize(self._transport.get('inventory'))
1931
self._set_inventory(result, dirty=False)
1934
@needs_tree_write_lock
1935
def remove(self, files, verbose=False, to_file=None, keep_files=True,
1937
"""Remove nominated files from the working inventory.
1939
:files: File paths relative to the basedir.
1940
:keep_files: If true, the files will also be kept.
1941
:force: Delete files and directories, even if they are changed and
1942
even if the directories are not empty.
1944
if isinstance(files, basestring):
1950
unknown_nested_files=set()
1952
def recurse_directory_to_add_files(directory):
1953
# Recurse directory and add all files
1954
# so we can check if they have changed.
1955
for parent_info, file_infos in\
1956
self.walkdirs(directory):
1957
for relpath, basename, kind, lstat, fileid, kind in file_infos:
1958
# Is it versioned or ignored?
1959
if self.path2id(relpath) or self.is_ignored(relpath):
1960
# Add nested content for deletion.
1961
new_files.add(relpath)
1963
# Files which are not versioned and not ignored
1964
# should be treated as unknown.
1965
unknown_nested_files.add((relpath, None, kind))
1967
for filename in files:
1968
# Get file name into canonical form.
1969
abspath = self.abspath(filename)
1970
filename = self.relpath(abspath)
1971
if len(filename) > 0:
1972
new_files.add(filename)
1973
recurse_directory_to_add_files(filename)
1975
files = list(new_files)
1978
return # nothing to do
1980
# Sort needed to first handle directory content before the directory
1981
files.sort(reverse=True)
1983
# Bail out if we are going to delete files we shouldn't
1984
if not keep_files and not force:
1985
has_changed_files = len(unknown_nested_files) > 0
1986
if not has_changed_files:
1987
for (file_id, path, content_change, versioned, parent_id, name,
1988
kind, executable) in self.iter_changes(self.basis_tree(),
1989
include_unchanged=True, require_versioned=False,
1990
want_unversioned=True, specific_files=files):
1991
if versioned == (False, False):
1992
# The record is unknown ...
1993
if not self.is_ignored(path[1]):
1994
# ... but not ignored
1995
has_changed_files = True
1997
elif content_change and (kind[1] is not None):
1998
# Versioned and changed, but not deleted
1999
has_changed_files = True
2002
if has_changed_files:
2003
# Make delta show ALL applicable changes in error message.
2004
tree_delta = self.changes_from(self.basis_tree(),
2005
require_versioned=False, want_unversioned=True,
2006
specific_files=files)
2007
for unknown_file in unknown_nested_files:
2008
if unknown_file not in tree_delta.unversioned:
2009
tree_delta.unversioned.extend((unknown_file,))
2010
raise errors.BzrRemoveChangedFilesError(tree_delta)
2012
# Build inv_delta and delete files where applicable,
2013
# do this before any modifications to inventory.
2015
fid = self.path2id(f)
2018
message = "%s is not versioned." % (f,)
2021
# having removed it, it must be either ignored or unknown
2022
if self.is_ignored(f):
2026
textui.show_status(new_status, self.kind(fid), f,
2029
inv_delta.append((f, None, fid, None))
2030
message = "removed %s" % (f,)
2033
abs_path = self.abspath(f)
2034
if osutils.lexists(abs_path):
2035
if (osutils.isdir(abs_path) and
2036
len(os.listdir(abs_path)) > 0):
2038
osutils.rmtree(abs_path)
2040
message = "%s is not an empty directory "\
2041
"and won't be deleted." % (f,)
2043
osutils.delete_any(abs_path)
2044
message = "deleted %s" % (f,)
2045
elif message is not None:
2046
# Only care if we haven't done anything yet.
2047
message = "%s does not exist." % (f,)
2049
# Print only one message (if any) per file.
2050
if message is not None:
2052
self.apply_inventory_delta(inv_delta)
2054
@needs_tree_write_lock
2055
def revert(self, filenames=None, old_tree=None, backups=True,
2056
pb=DummyProgress(), report_changes=False):
2057
from bzrlib.conflicts import resolve
2060
symbol_versioning.warn('Using [] to revert all files is deprecated'
2061
' as of bzr 0.91. Please use None (the default) instead.',
2062
DeprecationWarning, stacklevel=2)
2063
if old_tree is None:
2064
basis_tree = self.basis_tree()
2065
basis_tree.lock_read()
2066
old_tree = basis_tree
2070
conflicts = transform.revert(self, old_tree, filenames, backups, pb,
2072
if filenames is None and len(self.get_parent_ids()) > 1:
2074
last_revision = self.last_revision()
2075
if last_revision != _mod_revision.NULL_REVISION:
2076
if basis_tree is None:
2077
basis_tree = self.basis_tree()
2078
basis_tree.lock_read()
2079
parent_trees.append((last_revision, basis_tree))
2080
self.set_parent_trees(parent_trees)
2083
resolve(self, filenames, ignore_misses=True, recursive=True)
2085
if basis_tree is not None:
2089
def revision_tree(self, revision_id):
2090
"""See Tree.revision_tree.
2092
WorkingTree can supply revision_trees for the basis revision only
2093
because there is only one cached inventory in the bzr directory.
2095
if revision_id == self.last_revision():
2097
xml = self.read_basis_inventory()
2098
except errors.NoSuchFile:
2102
inv = xml7.serializer_v7.read_inventory_from_string(xml)
2103
# dont use the repository revision_tree api because we want
2104
# to supply the inventory.
2105
if inv.revision_id == revision_id:
2106
return revisiontree.RevisionTree(self.branch.repository,
2108
except errors.BadInventoryFormat:
2110
# raise if there was no inventory, or if we read the wrong inventory.
2111
raise errors.NoSuchRevisionInTree(self, revision_id)
2113
# XXX: This method should be deprecated in favour of taking in a proper
2114
# new Inventory object.
2115
@needs_tree_write_lock
2116
def set_inventory(self, new_inventory_list):
2117
from bzrlib.inventory import (Inventory,
2121
inv = Inventory(self.get_root_id())
2122
for path, file_id, parent, kind in new_inventory_list:
2123
name = os.path.basename(path)
2126
# fixme, there should be a factory function inv,add_??
2127
if kind == 'directory':
2128
inv.add(InventoryDirectory(file_id, name, parent))
2129
elif kind == 'file':
2130
inv.add(InventoryFile(file_id, name, parent))
2131
elif kind == 'symlink':
2132
inv.add(InventoryLink(file_id, name, parent))
2134
raise errors.BzrError("unknown kind %r" % kind)
2135
self._write_inventory(inv)
2137
@needs_tree_write_lock
2138
def set_root_id(self, file_id):
2139
"""Set the root id for this tree."""
2143
'WorkingTree.set_root_id with fileid=None')
2144
file_id = osutils.safe_file_id(file_id)
2145
self._set_root_id(file_id)
2147
def _set_root_id(self, file_id):
2148
"""Set the root id for this tree, in a format specific manner.
2150
:param file_id: The file id to assign to the root. It must not be
2151
present in the current inventory or an error will occur. It must
2152
not be None, but rather a valid file id.
2154
inv = self._inventory
2155
orig_root_id = inv.root.file_id
2156
# TODO: it might be nice to exit early if there was nothing
2157
# to do, saving us from trigger a sync on unlock.
2158
self._inventory_is_modified = True
2159
# we preserve the root inventory entry object, but
2160
# unlinkit from the byid index
2161
del inv._byid[inv.root.file_id]
2162
inv.root.file_id = file_id
2163
# and link it into the index with the new changed id.
2164
inv._byid[inv.root.file_id] = inv.root
2165
# and finally update all children to reference the new id.
2166
# XXX: this should be safe to just look at the root.children
2167
# list, not the WHOLE INVENTORY.
2170
if entry.parent_id == orig_root_id:
2171
entry.parent_id = inv.root.file_id
2174
"""See Branch.unlock.
2176
WorkingTree locking just uses the Branch locking facilities.
2177
This is current because all working trees have an embedded branch
2178
within them. IF in the future, we were to make branch data shareable
2179
between multiple working trees, i.e. via shared storage, then we
2180
would probably want to lock both the local tree, and the branch.
2182
raise NotImplementedError(self.unlock)
2184
def update(self, change_reporter=None, possible_transports=None):
2185
"""Update a working tree along its branch.
2187
This will update the branch if its bound too, which means we have
2188
multiple trees involved:
2190
- The new basis tree of the master.
2191
- The old basis tree of the branch.
2192
- The old basis tree of the working tree.
2193
- The current working tree state.
2195
Pathologically, all three may be different, and non-ancestors of each
2196
other. Conceptually we want to:
2198
- Preserve the wt.basis->wt.state changes
2199
- Transform the wt.basis to the new master basis.
2200
- Apply a merge of the old branch basis to get any 'local' changes from
2202
- Restore the wt.basis->wt.state changes.
2204
There isn't a single operation at the moment to do that, so we:
2205
- Merge current state -> basis tree of the master w.r.t. the old tree
2207
- Do a 'normal' merge of the old branch basis if it is relevant.
2209
if self.branch.get_bound_location() is not None:
2211
update_branch = True
2213
self.lock_tree_write()
2214
update_branch = False
2217
old_tip = self.branch.update(possible_transports)
2220
return self._update_tree(old_tip, change_reporter)
2224
@needs_tree_write_lock
2225
def _update_tree(self, old_tip=None, change_reporter=None):
2226
"""Update a tree to the master branch.
2228
:param old_tip: if supplied, the previous tip revision the branch,
2229
before it was changed to the master branch's tip.
2231
# here if old_tip is not None, it is the old tip of the branch before
2232
# it was updated from the master branch. This should become a pending
2233
# merge in the working tree to preserve the user existing work. we
2234
# cant set that until we update the working trees last revision to be
2235
# one from the new branch, because it will just get absorbed by the
2236
# parent de-duplication logic.
2238
# We MUST save it even if an error occurs, because otherwise the users
2239
# local work is unreferenced and will appear to have been lost.
2243
last_rev = self.get_parent_ids()[0]
2245
last_rev = _mod_revision.NULL_REVISION
2246
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2247
# merge tree state up to new branch tip.
2248
basis = self.basis_tree()
2251
to_tree = self.branch.basis_tree()
2252
if basis.inventory.root is None:
2253
self.set_root_id(to_tree.get_root_id())
2255
result += merge.merge_inner(
2260
change_reporter=change_reporter)
2263
# TODO - dedup parents list with things merged by pull ?
2264
# reuse the tree we've updated to to set the basis:
2265
parent_trees = [(self.branch.last_revision(), to_tree)]
2266
merges = self.get_parent_ids()[1:]
2267
# Ideally we ask the tree for the trees here, that way the working
2268
# tree can decide whether to give us the entire tree or give us a
2269
# lazy initialised tree. dirstate for instance will have the trees
2270
# in ram already, whereas a last-revision + basis-inventory tree
2271
# will not, but also does not need them when setting parents.
2272
for parent in merges:
2273
parent_trees.append(
2274
(parent, self.branch.repository.revision_tree(parent)))
2275
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2276
parent_trees.append(
2277
(old_tip, self.branch.repository.revision_tree(old_tip)))
2278
self.set_parent_trees(parent_trees)
2279
last_rev = parent_trees[0][0]
2281
# the working tree had the same last-revision as the master
2282
# branch did. We may still have pivot local work from the local
2283
# branch into old_tip:
2284
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2285
self.add_parent_tree_id(old_tip)
2286
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2287
and old_tip != last_rev):
2288
# our last revision was not the prior branch last revision
2289
# and we have converted that last revision to a pending merge.
2290
# base is somewhere between the branch tip now
2291
# and the now pending merge
2293
# Since we just modified the working tree and inventory, flush out
2294
# the current state, before we modify it again.
2295
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2296
# requires it only because TreeTransform directly munges the
2297
# inventory and calls tree._write_inventory(). Ultimately we
2298
# should be able to remove this extra flush.
2300
graph = self.branch.repository.get_graph()
2301
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2303
base_tree = self.branch.repository.revision_tree(base_rev_id)
2304
other_tree = self.branch.repository.revision_tree(old_tip)
2305
result += merge.merge_inner(
2310
change_reporter=change_reporter)
2313
def _write_hashcache_if_dirty(self):
2314
"""Write out the hashcache if it is dirty."""
2315
if self._hashcache.needs_write:
2317
self._hashcache.write()
2319
if e.errno not in (errno.EPERM, errno.EACCES):
2321
# TODO: jam 20061219 Should this be a warning? A single line
2322
# warning might be sufficient to let the user know what
2324
mutter('Could not write hashcache for %s\nError: %s',
2325
self._hashcache.cache_file_name(), e)
2327
@needs_tree_write_lock
2328
def _write_inventory(self, inv):
2329
"""Write inventory as the current inventory."""
2330
self._set_inventory(inv, dirty=True)
2333
def set_conflicts(self, arg):
2334
raise errors.UnsupportedOperation(self.set_conflicts, self)
2336
def add_conflicts(self, arg):
2337
raise errors.UnsupportedOperation(self.add_conflicts, self)
2340
def conflicts(self):
2341
conflicts = _mod_conflicts.ConflictList()
2342
for conflicted in self._iter_conflicts():
2345
if file_kind(self.abspath(conflicted)) != "file":
2347
except errors.NoSuchFile:
2350
for suffix in ('.THIS', '.OTHER'):
2352
kind = file_kind(self.abspath(conflicted+suffix))
2355
except errors.NoSuchFile:
2359
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
2360
conflicts.append(_mod_conflicts.Conflict.factory(ctype,
2362
file_id=self.path2id(conflicted)))
2365
def walkdirs(self, prefix=""):
2366
"""Walk the directories of this tree.
2368
returns a generator which yields items in the form:
2369
((curren_directory_path, fileid),
2370
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
2373
This API returns a generator, which is only valid during the current
2374
tree transaction - within a single lock_read or lock_write duration.
2376
If the tree is not locked, it may cause an error to be raised,
2377
depending on the tree implementation.
2379
disk_top = self.abspath(prefix)
2380
if disk_top.endswith('/'):
2381
disk_top = disk_top[:-1]
2382
top_strip_len = len(disk_top) + 1
2383
inventory_iterator = self._walkdirs(prefix)
2384
disk_iterator = osutils.walkdirs(disk_top, prefix)
2386
current_disk = disk_iterator.next()
2387
disk_finished = False
2389
if not (e.errno == errno.ENOENT or
2390
(sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
2393
disk_finished = True
2395
current_inv = inventory_iterator.next()
2396
inv_finished = False
2397
except StopIteration:
2400
while not inv_finished or not disk_finished:
2402
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2403
cur_disk_dir_content) = current_disk
2405
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
2406
cur_disk_dir_content) = ((None, None), None)
2407
if not disk_finished:
2408
# strip out .bzr dirs
2409
if (cur_disk_dir_path_from_top[top_strip_len:] == '' and
2410
len(cur_disk_dir_content) > 0):
2411
# osutils.walkdirs can be made nicer -
2412
# yield the path-from-prefix rather than the pathjoined
2414
bzrdir_loc = bisect_left(cur_disk_dir_content,
2416
if (bzrdir_loc < len(cur_disk_dir_content)
2417
and self.bzrdir.is_control_filename(
2418
cur_disk_dir_content[bzrdir_loc][0])):
2419
# we dont yield the contents of, or, .bzr itself.
2420
del cur_disk_dir_content[bzrdir_loc]
2422
# everything is unknown
2425
# everything is missing
2428
direction = cmp(current_inv[0][0], cur_disk_dir_relpath)
2430
# disk is before inventory - unknown
2431
dirblock = [(relpath, basename, kind, stat, None, None) for
2432
relpath, basename, kind, stat, top_path in
2433
cur_disk_dir_content]
2434
yield (cur_disk_dir_relpath, None), dirblock
2436
current_disk = disk_iterator.next()
2437
except StopIteration:
2438
disk_finished = True
2440
# inventory is before disk - missing.
2441
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
2442
for relpath, basename, dkind, stat, fileid, kind in
2444
yield (current_inv[0][0], current_inv[0][1]), dirblock
2446
current_inv = inventory_iterator.next()
2447
except StopIteration:
2450
# versioned present directory
2451
# merge the inventory and disk data together
2453
for relpath, subiterator in itertools.groupby(sorted(
2454
current_inv[1] + cur_disk_dir_content,
2455
key=operator.itemgetter(0)), operator.itemgetter(1)):
2456
path_elements = list(subiterator)
2457
if len(path_elements) == 2:
2458
inv_row, disk_row = path_elements
2459
# versioned, present file
2460
dirblock.append((inv_row[0],
2461
inv_row[1], disk_row[2],
2462
disk_row[3], inv_row[4],
2464
elif len(path_elements[0]) == 5:
2466
dirblock.append((path_elements[0][0],
2467
path_elements[0][1], path_elements[0][2],
2468
path_elements[0][3], None, None))
2469
elif len(path_elements[0]) == 6:
2470
# versioned, absent file.
2471
dirblock.append((path_elements[0][0],
2472
path_elements[0][1], 'unknown', None,
2473
path_elements[0][4], path_elements[0][5]))
2475
raise NotImplementedError('unreachable code')
2476
yield current_inv[0], dirblock
2478
current_inv = inventory_iterator.next()
2479
except StopIteration:
2482
current_disk = disk_iterator.next()
2483
except StopIteration:
2484
disk_finished = True
2486
def _walkdirs(self, prefix=""):
2487
"""Walk the directories of this tree.
2489
:prefix: is used as the directrory to start with.
2490
returns a generator which yields items in the form:
2491
((curren_directory_path, fileid),
2492
[(file1_path, file1_name, file1_kind, None, file1_id,
2495
_directory = 'directory'
2496
# get the root in the inventory
2497
inv = self.inventory
2498
top_id = inv.path2id(prefix)
2502
pending = [(prefix, '', _directory, None, top_id, None)]
2505
currentdir = pending.pop()
2506
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
2507
top_id = currentdir[4]
2509
relroot = currentdir[0] + '/'
2512
# FIXME: stash the node in pending
2514
if entry.kind == 'directory':
2515
for name, child in entry.sorted_children():
2516
dirblock.append((relroot + name, name, child.kind, None,
2517
child.file_id, child.kind
2519
yield (currentdir[0], entry.file_id), dirblock
2520
# push the user specified dirs from dirblock
2521
for dir in reversed(dirblock):
2522
if dir[2] == _directory:
2525
@needs_tree_write_lock
2526
def auto_resolve(self):
2527
"""Automatically resolve text conflicts according to contents.
2529
Only text conflicts are auto_resolvable. Files with no conflict markers
2530
are considered 'resolved', because bzr always puts conflict markers
2531
into files that have text conflicts. The corresponding .THIS .BASE and
2532
.OTHER files are deleted, as per 'resolve'.
2533
:return: a tuple of ConflictLists: (un_resolved, resolved).
2535
un_resolved = _mod_conflicts.ConflictList()
2536
resolved = _mod_conflicts.ConflictList()
2537
conflict_re = re.compile('^(<{7}|={7}|>{7})')
2538
for conflict in self.conflicts():
2539
if (conflict.typestring != 'text conflict' or
2540
self.kind(conflict.file_id) != 'file'):
2541
un_resolved.append(conflict)
2543
my_file = open(self.id2abspath(conflict.file_id), 'rb')
2545
for line in my_file:
2546
if conflict_re.search(line):
2547
un_resolved.append(conflict)
286
if fnmatch.fnmatchcase(filename, newpat):
2550
resolved.append(conflict)
2553
resolved.remove_files(self)
2554
self.set_conflicts(un_resolved)
2555
return un_resolved, resolved
2558
def _check(self, references):
2559
"""Check the tree for consistency.
2561
:param references: A dict with keys matching the items returned by
2562
self._get_check_refs(), and values from looking those keys up in
2565
tree_basis = self.basis_tree()
2566
tree_basis.lock_read()
2568
repo_basis = references[('trees', self.last_revision())]
2569
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
2570
raise errors.BzrCheckError(
2571
"Mismatched basis inventory content.")
2576
def _validate(self):
2577
"""Validate internal structures.
2579
This is meant mostly for the test suite. To give it a chance to detect
2580
corruption after actions have occurred. The default implementation is a
2583
:return: None. An exception should be raised if there is an error.
2588
def _get_rules_searcher(self, default_searcher):
2589
"""See Tree._get_rules_searcher."""
2590
if self._rules_searcher is None:
2591
self._rules_searcher = super(WorkingTree,
2592
self)._get_rules_searcher(default_searcher)
2593
return self._rules_searcher
2595
def get_shelf_manager(self):
2596
"""Return the ShelfManager for this WorkingTree."""
2597
from bzrlib.shelf import ShelfManager
2598
return ShelfManager(self, self._transport)
2601
class WorkingTree2(WorkingTree):
2602
"""This is the Format 2 working tree.
2604
This was the first weave based working tree.
2605
- uses os locks for locking.
2606
- uses the branch last-revision.
2609
def __init__(self, *args, **kwargs):
2610
super(WorkingTree2, self).__init__(*args, **kwargs)
2611
# WorkingTree2 has more of a constraint that self._inventory must
2612
# exist. Because this is an older format, we don't mind the overhead
2613
# caused by the extra computation here.
2615
# Newer WorkingTree's should only have self._inventory set when they
2617
if self._inventory is None:
2618
self.read_working_inventory()
2620
def _get_check_refs(self):
2621
"""Return the references needed to perform a check of this tree."""
2622
return [('trees', self.last_revision())]
2624
def lock_tree_write(self):
2625
"""See WorkingTree.lock_tree_write().
2627
In Format2 WorkingTrees we have a single lock for the branch and tree
2628
so lock_tree_write() degrades to lock_write().
2630
self.branch.lock_write()
2632
return self._control_files.lock_write()
2634
self.branch.unlock()
2638
# do non-implementation specific cleanup
2641
# we share control files:
2642
if self._control_files._lock_count == 3:
2643
# _inventory_is_modified is always False during a read lock.
2644
if self._inventory_is_modified:
2646
self._write_hashcache_if_dirty()
2648
# reverse order of locking.
2650
return self._control_files.unlock()
2652
self.branch.unlock()
2655
class WorkingTree3(WorkingTree):
2656
"""This is the Format 3 working tree.
2658
This differs from the base WorkingTree by:
2659
- having its own file lock
2660
- having its own last-revision property.
2662
This is new in bzr 0.8
2666
def _last_revision(self):
2667
"""See Mutable.last_revision."""
2669
return self._transport.get_bytes('last-revision')
2670
except errors.NoSuchFile:
2671
return _mod_revision.NULL_REVISION
2673
def _change_last_revision(self, revision_id):
2674
"""See WorkingTree._change_last_revision."""
2675
if revision_id is None or revision_id == _mod_revision.NULL_REVISION:
2677
self._transport.delete('last-revision')
2678
except errors.NoSuchFile:
2682
self._transport.put_bytes('last-revision', revision_id,
2683
mode=self.bzrdir._get_file_mode())
2686
def _get_check_refs(self):
2687
"""Return the references needed to perform a check of this tree."""
2688
return [('trees', self.last_revision())]
2690
@needs_tree_write_lock
2691
def set_conflicts(self, conflicts):
2692
self._put_rio('conflicts', conflicts.to_stanzas(),
2695
@needs_tree_write_lock
2696
def add_conflicts(self, new_conflicts):
2697
conflict_set = set(self.conflicts())
2698
conflict_set.update(set(list(new_conflicts)))
2699
self.set_conflicts(_mod_conflicts.ConflictList(sorted(conflict_set,
2700
key=_mod_conflicts.Conflict.sort_key)))
2703
def conflicts(self):
2705
confile = self._transport.get('conflicts')
2706
except errors.NoSuchFile:
2707
return _mod_conflicts.ConflictList()
2710
if confile.next() != CONFLICT_HEADER_1 + '\n':
2711
raise errors.ConflictFormatError()
2712
except StopIteration:
2713
raise errors.ConflictFormatError()
2714
return _mod_conflicts.ConflictList.from_stanzas(RioReader(confile))
2719
# do non-implementation specific cleanup
2721
if self._control_files._lock_count == 1:
2722
# _inventory_is_modified is always False during a read lock.
2723
if self._inventory_is_modified:
2725
self._write_hashcache_if_dirty()
2726
# reverse order of locking.
2728
return self._control_files.unlock()
2730
self.branch.unlock()
2733
def get_conflicted_stem(path):
2734
for suffix in _mod_conflicts.CONFLICT_SUFFIXES:
2735
if path.endswith(suffix):
2736
return path[:-len(suffix)]
2739
class WorkingTreeFormat(object):
2740
"""An encapsulation of the initialization and open routines for a format.
2742
Formats provide three things:
2743
* An initialization routine,
2747
Formats are placed in an dict by their format string for reference
2748
during workingtree opening. Its not required that these be instances, they
2749
can be classes themselves with class methods - it simply depends on
2750
whether state is needed for a given format or not.
2752
Once a format is deprecated, just deprecate the initialize and open
2753
methods on the format class. Do not deprecate the object, as the
2754
object will be created every time regardless.
2757
_default_format = None
2758
"""The default format used for new trees."""
2761
"""The known formats."""
2763
requires_rich_root = False
2765
upgrade_recommended = False
2768
def find_format(klass, a_bzrdir):
2769
"""Return the format for the working tree object in a_bzrdir."""
2771
transport = a_bzrdir.get_workingtree_transport(None)
2772
format_string = transport.get("format").read()
2773
return klass._formats[format_string]
2774
except errors.NoSuchFile:
2775
raise errors.NoWorkingTree(base=transport.base)
2777
raise errors.UnknownFormatError(format=format_string,
2778
kind="working tree")
2780
def __eq__(self, other):
2781
return self.__class__ is other.__class__
2783
def __ne__(self, other):
2784
return not (self == other)
2787
def get_default_format(klass):
2788
"""Return the current default format."""
2789
return klass._default_format
2791
def get_format_string(self):
2792
"""Return the ASCII format string that identifies this format."""
2793
raise NotImplementedError(self.get_format_string)
2795
def get_format_description(self):
2796
"""Return the short description for this format."""
2797
raise NotImplementedError(self.get_format_description)
2799
def is_supported(self):
2800
"""Is this format supported?
2802
Supported formats can be initialized and opened.
2803
Unsupported formats may not support initialization or committing or
2804
some other features depending on the reason for not being supported.
2808
def supports_content_filtering(self):
2809
"""True if this format supports content filtering."""
2812
def supports_views(self):
2813
"""True if this format supports stored views."""
2817
def register_format(klass, format):
2818
klass._formats[format.get_format_string()] = format
2821
def set_default_format(klass, format):
2822
klass._default_format = format
2825
def unregister_format(klass, format):
2826
del klass._formats[format.get_format_string()]
2829
class WorkingTreeFormat2(WorkingTreeFormat):
2830
"""The second working tree format.
2832
This format modified the hash cache from the format 1 hash cache.
2835
upgrade_recommended = True
2837
def get_format_description(self):
2838
"""See WorkingTreeFormat.get_format_description()."""
2839
return "Working tree format 2"
2841
def _stub_initialize_on_transport(self, transport, file_mode):
2842
"""Workaround: create control files for a remote working tree.
2844
This ensures that it can later be updated and dealt with locally,
2845
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
2846
no working tree. (See bug #43064).
2849
inv = inventory.Inventory()
2850
xml5.serializer_v5.write_inventory(inv, sio, working=True)
2852
transport.put_file('inventory', sio, file_mode)
2853
transport.put_bytes('pending-merges', '', file_mode)
2855
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2856
accelerator_tree=None, hardlink=False):
2857
"""See WorkingTreeFormat.initialize()."""
2858
if not isinstance(a_bzrdir.transport, LocalTransport):
2859
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2860
if from_branch is not None:
2861
branch = from_branch
2863
branch = a_bzrdir.open_branch()
2864
if revision_id is None:
2865
revision_id = _mod_revision.ensure_null(branch.last_revision())
2868
branch.generate_revision_history(revision_id)
2871
inv = inventory.Inventory()
2872
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2878
basis_tree = branch.repository.revision_tree(revision_id)
2879
if basis_tree.inventory.root is not None:
2880
wt.set_root_id(basis_tree.get_root_id())
2881
# set the parent list and cache the basis tree.
2882
if _mod_revision.is_null(revision_id):
2885
parent_trees = [(revision_id, basis_tree)]
2886
wt.set_parent_trees(parent_trees)
2887
transform.build_tree(basis_tree, wt)
2891
super(WorkingTreeFormat2, self).__init__()
2892
self._matchingbzrdir = bzrdir.BzrDirFormat6()
2894
def open(self, a_bzrdir, _found=False):
2895
"""Return the WorkingTree object for a_bzrdir
2897
_found is a private parameter, do not use it. It is used to indicate
2898
if format probing has already been done.
2901
# we are being called directly and must probe.
2902
raise NotImplementedError
2903
if not isinstance(a_bzrdir.transport, LocalTransport):
2904
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2905
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
2911
class WorkingTreeFormat3(WorkingTreeFormat):
2912
"""The second working tree format updated to record a format marker.
2915
- exists within a metadir controlling .bzr
2916
- includes an explicit version marker for the workingtree control
2917
files, separate from the BzrDir format
2918
- modifies the hash cache format
2920
- uses a LockDir to guard access for writes.
2923
upgrade_recommended = True
2925
def get_format_string(self):
2926
"""See WorkingTreeFormat.get_format_string()."""
2927
return "Bazaar-NG Working Tree format 3"
2929
def get_format_description(self):
2930
"""See WorkingTreeFormat.get_format_description()."""
2931
return "Working tree format 3"
2933
_lock_file_name = 'lock'
2934
_lock_class = LockDir
2936
_tree_class = WorkingTree3
2938
def __get_matchingbzrdir(self):
2939
return bzrdir.BzrDirMetaFormat1()
2941
_matchingbzrdir = property(__get_matchingbzrdir)
2943
def _open_control_files(self, a_bzrdir):
2944
transport = a_bzrdir.get_workingtree_transport(None)
2945
return LockableFiles(transport, self._lock_file_name,
2948
def initialize(self, a_bzrdir, revision_id=None, from_branch=None,
2949
accelerator_tree=None, hardlink=False):
2950
"""See WorkingTreeFormat.initialize().
2952
:param revision_id: if supplied, create a working tree at a different
2953
revision than the branch is at.
2954
:param accelerator_tree: A tree which can be used for retrieving file
2955
contents more quickly than the revision tree, i.e. a workingtree.
2956
The revision tree will be used for cases where accelerator_tree's
2957
content is different.
2958
:param hardlink: If true, hard-link files from accelerator_tree,
2961
if not isinstance(a_bzrdir.transport, LocalTransport):
2962
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2963
transport = a_bzrdir.get_workingtree_transport(self)
2964
control_files = self._open_control_files(a_bzrdir)
2965
control_files.create_lock()
2966
control_files.lock_write()
2967
transport.put_bytes('format', self.get_format_string(),
2968
mode=a_bzrdir._get_file_mode())
2969
if from_branch is not None:
2970
branch = from_branch
2972
branch = a_bzrdir.open_branch()
2973
if revision_id is None:
2974
revision_id = _mod_revision.ensure_null(branch.last_revision())
2975
# WorkingTree3 can handle an inventory which has a unique root id.
2976
# as of bzr 0.12. However, bzr 0.11 and earlier fail to handle
2977
# those trees. And because there isn't a format bump inbetween, we
2978
# are maintaining compatibility with older clients.
2979
# inv = Inventory(root_id=gen_root_id())
2980
inv = self._initial_inventory()
2981
wt = self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
2987
_control_files=control_files)
2988
wt.lock_tree_write()
2990
basis_tree = branch.repository.revision_tree(revision_id)
2991
# only set an explicit root id if there is one to set.
2992
if basis_tree.inventory.root is not None:
2993
wt.set_root_id(basis_tree.get_root_id())
2994
if revision_id == _mod_revision.NULL_REVISION:
2995
wt.set_parent_trees([])
289
if fnmatch.fnmatchcase(splitpath(filename)[-1], pat):
b'\\ No newline at end of file'
2997
wt.set_parent_trees([(revision_id, basis_tree)])
2998
transform.build_tree(basis_tree, wt)
3000
# Unlock in this order so that the unlock-triggers-flush in
3001
# WorkingTree is given a chance to fire.
3002
control_files.unlock()
3006
def _initial_inventory(self):
3007
return inventory.Inventory()
3010
super(WorkingTreeFormat3, self).__init__()
3012
def open(self, a_bzrdir, _found=False):
3013
"""Return the WorkingTree object for a_bzrdir
3015
_found is a private parameter, do not use it. It is used to indicate
3016
if format probing has already been done.
3019
# we are being called directly and must probe.
3020
raise NotImplementedError
3021
if not isinstance(a_bzrdir.transport, LocalTransport):
3022
raise errors.NotLocalUrl(a_bzrdir.transport.base)
3023
wt = self._open(a_bzrdir, self._open_control_files(a_bzrdir))
3026
def _open(self, a_bzrdir, control_files):
3027
"""Open the tree itself.
3029
:param a_bzrdir: the dir for the tree.
3030
:param control_files: the control files for the tree.
3032
return self._tree_class(a_bzrdir.root_transport.local_abspath('.'),
3036
_control_files=control_files)
3039
return self.get_format_string()
3042
__default_format = WorkingTreeFormat6()
3043
WorkingTreeFormat.register_format(__default_format)
3044
WorkingTreeFormat.register_format(WorkingTreeFormat5())
3045
WorkingTreeFormat.register_format(WorkingTreeFormat4())
3046
WorkingTreeFormat.register_format(WorkingTreeFormat3())
3047
WorkingTreeFormat.set_default_format(__default_format)
3048
# formats which have no format string are not discoverable
3049
# and not independently creatable, so are not registered.
3050
_legacy_formats = [WorkingTreeFormat2(),