1
# Copyright (C) 2005, 2006 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""WorkingTree object and friends.
19
A WorkingTree represents the editable working copy of a branch.
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
23
new revision based on the workingtree and its inventory.
25
At the moment every WorkingTree has its own branch. Remote
26
WorkingTrees aren't supported.
28
To get a WorkingTree, call bzrdir.open_workingtree() or
29
WorkingTree.open(dir).
32
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
33
CONFLICT_HEADER_1 = "BZR conflict list format 1"
35
# TODO: Give the workingtree sole responsibility for the working inventory;
36
# remove the variable and references to it from the branch. This may require
37
# updating the commit code so as to update the inventory within the working
38
# copy, and making sure there's only one WorkingTree for any directory on disk.
39
# At the moment they may alias the inventory and have old copies of it in
40
# memory. (Now done? -- mbp 20060309)
42
from binascii import hexlify
44
from copy import deepcopy
45
from cStringIO import StringIO
55
from bzrlib import bzrdir, errors, ignores, osutils, urlutils
56
from bzrlib.atomicfile import AtomicFile
58
from bzrlib.conflicts import Conflict, ConflictList, CONFLICT_SUFFIXES
59
from bzrlib.decorators import needs_read_lock, needs_write_lock
60
from bzrlib.errors import (BzrCheckError,
63
WeaveRevisionNotPresent,
67
MergeModifiedFormatError,
70
from bzrlib.inventory import InventoryEntry, Inventory
71
from bzrlib.lockable_files import LockableFiles, TransportLock
72
from bzrlib.lockdir import LockDir
73
from bzrlib.merge import merge_inner, transform_tree
74
import bzrlib.mutabletree
75
from bzrlib.osutils import (
92
from bzrlib.progress import DummyProgress, ProgressPhase
93
from bzrlib.revision import NULL_REVISION
94
import bzrlib.revisiontree
95
from bzrlib.rio import RioReader, rio_file, Stanza
96
from bzrlib.symbol_versioning import (deprecated_passed,
103
from bzrlib.trace import mutter, note
104
from bzrlib.transform import build_tree
105
from bzrlib.transport import get_transport
106
from bzrlib.transport.local import LocalTransport
107
from bzrlib.textui import show_status
112
# the regex removes any weird characters; we don't escape them
113
# but rather just pull them out
114
_gen_file_id_re = re.compile(r'[^\w.]')
115
_gen_id_suffix = None
119
def _next_id_suffix():
120
"""Create a new file id suffix that is reasonably unique.
122
On the first call we combine the current time with 64 bits of randomness
123
to give a highly probably globally unique number. Then each call in the same
124
process adds 1 to a serial number we append to that unique value.
126
# XXX TODO: change bzrlib.add.smart_add to call workingtree.add() rather
127
# than having to move the id randomness out of the inner loop like this.
128
# XXX TODO: for the global randomness this uses we should add the thread-id
129
# before the serial #.
130
global _gen_id_suffix, _gen_id_serial
131
if _gen_id_suffix is None:
132
_gen_id_suffix = "-%s-%s-" % (compact_date(time()), rand_chars(16))
134
return _gen_id_suffix + str(_gen_id_serial)
137
def gen_file_id(name):
138
"""Return new file id for the basename 'name'.
140
The uniqueness is supplied from _next_id_suffix.
142
# The real randomness is in the _next_id_suffix, the
143
# rest of the identifier is just to be nice.
145
# 1) Remove non-ascii word characters to keep the ids portable
146
# 2) squash to lowercase, so the file id doesn't have to
147
# be escaped (case insensitive filesystems would bork for ids
148
# that only differred in case without escaping).
149
# 3) truncate the filename to 20 chars. Long filenames also bork on some
151
# 4) Removing starting '.' characters to prevent the file ids from
152
# being considered hidden.
153
ascii_word_only = _gen_file_id_re.sub('', name.lower())
154
short_no_dots = ascii_word_only.lstrip('.')[:20]
155
return short_no_dots + _next_id_suffix()
159
"""Return a new tree-root file id."""
160
return gen_file_id('TREE_ROOT')
163
def needs_tree_write_lock(unbound):
164
"""Decorate unbound to take out and release a tree_write lock."""
165
def tree_write_locked(self, *args, **kwargs):
166
self.lock_tree_write()
168
return unbound(self, *args, **kwargs)
171
tree_write_locked.__doc__ = unbound.__doc__
172
tree_write_locked.__name__ = unbound.__name__
173
return tree_write_locked
176
class TreeEntry(object):
177
"""An entry that implements the minimum interface used by commands.
179
This needs further inspection, it may be better to have
180
InventoryEntries without ids - though that seems wrong. For now,
181
this is a parallel hierarchy to InventoryEntry, and needs to become
182
one of several things: decorates to that hierarchy, children of, or
184
Another note is that these objects are currently only used when there is
185
no InventoryEntry available - i.e. for unversioned objects.
186
Perhaps they should be UnversionedEntry et al. ? - RBC 20051003
189
def __eq__(self, other):
190
# yes, this us ugly, TODO: best practice __eq__ style.
191
return (isinstance(other, TreeEntry)
192
and other.__class__ == self.__class__)
194
def kind_character(self):
198
class TreeDirectory(TreeEntry):
199
"""See TreeEntry. This is a directory in a working tree."""
201
def __eq__(self, other):
202
return (isinstance(other, TreeDirectory)
203
and other.__class__ == self.__class__)
205
def kind_character(self):
209
class TreeFile(TreeEntry):
210
"""See TreeEntry. This is a regular file in a working tree."""
212
def __eq__(self, other):
213
return (isinstance(other, TreeFile)
214
and other.__class__ == self.__class__)
216
def kind_character(self):
220
class TreeLink(TreeEntry):
221
"""See TreeEntry. This is a symlink in a working tree."""
223
def __eq__(self, other):
224
return (isinstance(other, TreeLink)
225
and other.__class__ == self.__class__)
227
def kind_character(self):
231
class WorkingTree(bzrlib.mutabletree.MutableTree):
232
"""Working copy tree.
234
The inventory is held in the `Branch` working-inventory, and the
235
files are in a directory on disk.
237
It is possible for a `WorkingTree` to have a filename which is
238
not listed in the Inventory and vice versa.
241
def __init__(self, basedir='.',
242
branch=DEPRECATED_PARAMETER,
248
"""Construct a WorkingTree for basedir.
250
If the branch is not supplied, it is opened automatically.
251
If the branch is supplied, it must be the branch for this basedir.
252
(branch.base is not cross checked, because for remote branches that
253
would be meaningless).
255
self._format = _format
256
self.bzrdir = _bzrdir
258
# not created via open etc.
259
warnings.warn("WorkingTree() is deprecated as of bzr version 0.8. "
260
"Please use bzrdir.open_workingtree or WorkingTree.open().",
263
wt = WorkingTree.open(basedir)
264
self._branch = wt.branch
265
self.basedir = wt.basedir
266
self._control_files = wt._control_files
267
self._hashcache = wt._hashcache
268
self._set_inventory(wt._inventory)
269
self._format = wt._format
270
self.bzrdir = wt.bzrdir
271
from bzrlib.hashcache import HashCache
272
from bzrlib.trace import note, mutter
273
assert isinstance(basedir, basestring), \
274
"base directory %r is not a string" % basedir
275
basedir = safe_unicode(basedir)
276
mutter("opening working tree %r", basedir)
277
if deprecated_passed(branch):
279
warnings.warn("WorkingTree(..., branch=XXX) is deprecated as of bzr 0.8."
280
" Please use bzrdir.open_workingtree() or"
281
" WorkingTree.open().",
285
self._branch = branch
287
self._branch = self.bzrdir.open_branch()
288
self.basedir = realpath(basedir)
289
# if branch is at our basedir and is a format 6 or less
290
if isinstance(self._format, WorkingTreeFormat2):
291
# share control object
292
self._control_files = self.branch.control_files
294
# assume all other formats have their own control files.
295
assert isinstance(_control_files, LockableFiles), \
296
"_control_files must be a LockableFiles, not %r" \
298
self._control_files = _control_files
299
# update the whole cache up front and write to disk if anything changed;
300
# in the future we might want to do this more selectively
301
# two possible ways offer themselves : in self._unlock, write the cache
302
# if needed, or, when the cache sees a change, append it to the hash
303
# cache file, and have the parser take the most recent entry for a
305
cache_filename = self.bzrdir.get_workingtree_transport(None).local_abspath('stat-cache')
306
hc = self._hashcache = HashCache(basedir, cache_filename, self._control_files._file_mode)
308
# is this scan needed ? it makes things kinda slow.
315
if _inventory is None:
316
self._set_inventory(self.read_working_inventory())
318
self._set_inventory(_inventory)
321
fget=lambda self: self._branch,
322
doc="""The branch this WorkingTree is connected to.
324
This cannot be set - it is reflective of the actual disk structure
325
the working tree has been constructed from.
328
def break_lock(self):
329
"""Break a lock if one is present from another instance.
331
Uses the ui factory to ask for confirmation if the lock may be from
334
This will probe the repository for its lock as well.
336
self._control_files.break_lock()
337
self.branch.break_lock()
339
def _set_inventory(self, inv):
340
assert inv.root is not None
341
self._inventory = inv
344
def open(path=None, _unsupported=False):
345
"""Open an existing working tree at path.
349
path = os.path.getcwdu()
350
control = bzrdir.BzrDir.open(path, _unsupported)
351
return control.open_workingtree(_unsupported)
354
def open_containing(path=None):
355
"""Open an existing working tree which has its root about path.
357
This probes for a working tree at path and searches upwards from there.
359
Basically we keep looking up until we find the control directory or
360
run into /. If there isn't one, raises NotBranchError.
361
TODO: give this a new exception.
362
If there is one, it is returned, along with the unused portion of path.
364
:return: The WorkingTree that contains 'path', and the rest of path
367
path = osutils.getcwd()
368
control, relpath = bzrdir.BzrDir.open_containing(path)
370
return control.open_workingtree(), relpath
373
def open_downlevel(path=None):
374
"""Open an unsupported working tree.
376
Only intended for advanced situations like upgrading part of a bzrdir.
378
return WorkingTree.open(path, _unsupported=True)
381
"""Iterate through file_ids for this tree.
383
file_ids are in a WorkingTree if they are in the working inventory
384
and the working file exists.
386
inv = self._inventory
387
for path, ie in inv.iter_entries():
388
if osutils.lexists(self.abspath(path)):
392
return "<%s of %s>" % (self.__class__.__name__,
393
getattr(self, 'basedir', None))
395
def abspath(self, filename):
396
return pathjoin(self.basedir, filename)
398
def basis_tree(self):
399
"""Return RevisionTree for the current last revision.
401
If the left most parent is a ghost then the returned tree will be an
402
empty tree - one obtained by calling repository.revision_tree(None).
405
revision_id = self.get_parent_ids()[0]
407
# no parents, return an empty revision tree.
408
# in the future this should return the tree for
409
# 'empty:' - the implicit root empty tree.
410
return self.branch.repository.revision_tree(None)
413
xml = self.read_basis_inventory()
414
inv = bzrlib.xml6.serializer_v6.read_inventory_from_string(xml)
415
if inv is not None and inv.revision_id == revision_id:
416
return bzrlib.tree.RevisionTree(self.branch.repository,
418
except (NoSuchFile, errors.BadInventoryFormat):
420
# No cached copy available, retrieve from the repository.
421
# FIXME? RBC 20060403 should we cache the inventory locally
424
return self.branch.repository.revision_tree(revision_id)
425
except errors.RevisionNotPresent:
426
# the basis tree *may* be a ghost or a low level error may have
427
# occured. If the revision is present, its a problem, if its not
429
if self.branch.repository.has_revision(revision_id):
431
# the basis tree is a ghost so return an empty tree.
432
return self.branch.repository.revision_tree(None)
435
@deprecated_method(zero_eight)
436
def create(branch, directory):
437
"""Create a workingtree for branch at directory.
439
If existing_directory already exists it must have a .bzr directory.
440
If it does not exist, it will be created.
442
This returns a new WorkingTree object for the new checkout.
444
TODO FIXME RBC 20060124 when we have checkout formats in place this
445
should accept an optional revisionid to checkout [and reject this if
446
checking out into the same dir as a pre-checkout-aware branch format.]
448
XXX: When BzrDir is present, these should be created through that
451
warnings.warn('delete WorkingTree.create', stacklevel=3)
452
transport = get_transport(directory)
453
if branch.bzrdir.root_transport.base == transport.base:
455
return branch.bzrdir.create_workingtree()
456
# different directory,
457
# create a branch reference
458
# and now a working tree.
459
raise NotImplementedError
462
@deprecated_method(zero_eight)
463
def create_standalone(directory):
464
"""Create a checkout and a branch and a repo at directory.
466
Directory must exist and be empty.
468
please use BzrDir.create_standalone_workingtree
470
return bzrdir.BzrDir.create_standalone_workingtree(directory)
472
def relpath(self, path):
473
"""Return the local path portion from a given path.
475
The path may be absolute or relative. If its a relative path it is
476
interpreted relative to the python current working directory.
478
return relpath(self.basedir, path)
480
def has_filename(self, filename):
481
return osutils.lexists(self.abspath(filename))
483
def get_file(self, file_id):
484
return self.get_file_byname(self.id2path(file_id))
486
def get_file_text(self, file_id):
487
return self.get_file(file_id).read()
489
def get_file_byname(self, filename):
490
return file(self.abspath(filename), 'rb')
492
def get_parent_ids(self):
493
"""See Tree.get_parent_ids.
495
This implementation reads the pending merges list and last_revision
496
value and uses that to decide what the parents list should be.
498
last_rev = self._last_revision()
504
merges_file = self._control_files.get_utf8('pending-merges')
508
for l in merges_file.readlines():
509
parents.append(l.rstrip('\n'))
512
def get_root_id(self):
513
"""Return the id of this trees root"""
514
inv = self.read_working_inventory()
515
return inv.root.file_id
517
def _get_store_filename(self, file_id):
518
## XXX: badly named; this is not in the store at all
519
return self.abspath(self.id2path(file_id))
522
def clone(self, to_bzrdir, revision_id=None, basis=None):
523
"""Duplicate this working tree into to_bzr, including all state.
525
Specifically modified files are kept as modified, but
526
ignored and unknown files are discarded.
528
If you want to make a new line of development, see bzrdir.sprout()
531
If not None, the cloned tree will have its last revision set to
532
revision, and and difference between the source trees last revision
533
and this one merged in.
536
If not None, a closer copy of a tree which may have some files in
537
common, and which file content should be preferentially copied from.
539
# assumes the target bzr dir format is compatible.
540
result = self._format.initialize(to_bzrdir)
541
self.copy_content_into(result, revision_id)
545
def copy_content_into(self, tree, revision_id=None):
546
"""Copy the current content and user files of this tree into tree."""
547
if revision_id is None:
548
transform_tree(tree, self)
550
# TODO now merge from tree.last_revision to revision (to preserve
551
# user local changes)
552
transform_tree(tree, self)
553
tree.set_parent_ids([revision_id])
555
def id2abspath(self, file_id):
556
return self.abspath(self.id2path(file_id))
558
def has_id(self, file_id):
559
# files that have been deleted are excluded
560
inv = self._inventory
561
if not inv.has_id(file_id):
563
path = inv.id2path(file_id)
564
return osutils.lexists(self.abspath(path))
566
def has_or_had_id(self, file_id):
567
if file_id == self.inventory.root.file_id:
569
return self.inventory.has_id(file_id)
571
__contains__ = has_id
573
def get_file_size(self, file_id):
574
return os.path.getsize(self.id2abspath(file_id))
577
def get_file_sha1(self, file_id, path=None):
579
path = self._inventory.id2path(file_id)
580
return self._hashcache.get_sha1(path)
582
def get_file_mtime(self, file_id, path=None):
584
path = self._inventory.id2path(file_id)
585
return os.lstat(self.abspath(path)).st_mtime
587
if not supports_executable():
588
def is_executable(self, file_id, path=None):
589
return self._inventory[file_id].executable
591
def is_executable(self, file_id, path=None):
593
path = self._inventory.id2path(file_id)
594
mode = os.lstat(self.abspath(path)).st_mode
595
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
598
def _add(self, files, ids, kinds):
599
"""See MutableTree._add."""
600
# TODO: Re-adding a file that is removed in the working copy
601
# should probably put it back with the previous ID.
602
# the read and write working inventory should not occur in this
603
# function - they should be part of lock_write and unlock.
604
inv = self.read_working_inventory()
605
for f, file_id, kind in zip(files, ids, kinds):
606
assert kind is not None
608
inv.add_path(f, kind=kind)
610
inv.add_path(f, kind=kind, file_id=file_id)
611
self._write_inventory(inv)
613
@needs_tree_write_lock
614
def _gather_kinds(self, files, kinds):
615
"""See MutableTree._gather_kinds."""
616
for pos, f in enumerate(files):
617
if kinds[pos] is None:
618
fullpath = normpath(self.abspath(f))
620
kinds[pos] = file_kind(fullpath)
622
if e.errno == errno.ENOENT:
623
raise NoSuchFile(fullpath)
626
def add_parent_tree_id(self, revision_id, allow_leftmost_as_ghost=False):
627
"""Add revision_id as a parent.
629
This is equivalent to retrieving the current list of parent ids
630
and setting the list to its value plus revision_id.
632
:param revision_id: The revision id to add to the parent list. It may
633
be a ghost revision as long as its not the first parent to be added,
634
or the allow_leftmost_as_ghost parameter is set True.
635
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
637
parents = self.get_parent_ids() + [revision_id]
638
self.set_parent_ids(parents,
639
allow_leftmost_as_ghost=len(parents) > 1 or allow_leftmost_as_ghost)
641
@needs_tree_write_lock
642
def add_parent_tree(self, parent_tuple, allow_leftmost_as_ghost=False):
643
"""Add revision_id, tree tuple as a parent.
645
This is equivalent to retrieving the current list of parent trees
646
and setting the list to its value plus parent_tuple. See also
647
add_parent_tree_id - if you only have a parent id available it will be
648
simpler to use that api. If you have the parent already available, using
649
this api is preferred.
651
:param parent_tuple: The (revision id, tree) to add to the parent list.
652
If the revision_id is a ghost, pass None for the tree.
653
:param allow_leftmost_as_ghost: Allow the first parent to be a ghost.
655
parent_ids = self.get_parent_ids() + [parent_tuple[0]]
656
if len(parent_ids) > 1:
657
# the leftmost may have already been a ghost, preserve that if it
659
allow_leftmost_as_ghost = True
660
self.set_parent_ids(parent_ids,
661
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
663
@needs_tree_write_lock
664
def add_pending_merge(self, *revision_ids):
665
# TODO: Perhaps should check at this point that the
666
# history of the revision is actually present?
667
parents = self.get_parent_ids()
669
for rev_id in revision_ids:
670
if rev_id in parents:
672
parents.append(rev_id)
675
self.set_parent_ids(parents, allow_leftmost_as_ghost=True)
677
@deprecated_method(zero_eleven)
679
def pending_merges(self):
680
"""Return a list of pending merges.
682
These are revisions that have been merged into the working
683
directory but not yet committed.
685
As of 0.11 this is deprecated. Please see WorkingTree.get_parent_ids()
686
instead - which is available on all tree objects.
688
return self.get_parent_ids()[1:]
690
@needs_tree_write_lock
691
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
692
"""Set the parent ids to revision_ids.
694
See also set_parent_trees. This api will try to retrieve the tree data
695
for each element of revision_ids from the trees repository. If you have
696
tree data already available, it is more efficient to use
697
set_parent_trees rather than set_parent_ids. set_parent_ids is however
698
an easier API to use.
700
:param revision_ids: The revision_ids to set as the parent ids of this
701
working tree. Any of these may be ghosts.
703
if len(revision_ids) > 0:
704
leftmost_id = revision_ids[0]
705
if (not allow_leftmost_as_ghost and not
706
self.branch.repository.has_revision(leftmost_id)):
707
raise errors.GhostRevisionUnusableHere(leftmost_id)
708
self.set_last_revision(leftmost_id)
710
self.set_last_revision(None)
711
merges = revision_ids[1:]
712
self._control_files.put_utf8('pending-merges', '\n'.join(merges))
714
@needs_tree_write_lock
715
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
716
"""See MutableTree.set_parent_trees."""
717
# parent trees are not used in current format trees, delegate to
719
self.set_parent_ids([rev for (rev, tree) in parents_list],
720
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
722
@needs_tree_write_lock
723
def set_pending_merges(self, rev_list):
724
parents = self.get_parent_ids()
725
leftmost = parents[:1]
726
new_parents = leftmost + rev_list
727
self.set_parent_ids(new_parents)
729
@needs_tree_write_lock
730
def set_merge_modified(self, modified_hashes):
732
for file_id, hash in modified_hashes.iteritems():
733
yield Stanza(file_id=file_id, hash=hash)
734
self._put_rio('merge-hashes', iter_stanzas(), MERGE_MODIFIED_HEADER_1)
736
@needs_tree_write_lock
737
def _put_rio(self, filename, stanzas, header):
738
my_file = rio_file(stanzas, header)
739
self._control_files.put(filename, my_file)
741
@needs_write_lock # because merge pulls data into the branch.
742
def merge_from_branch(self, branch, to_revision=None):
743
"""Merge from a branch into this working tree.
745
:param branch: The branch to merge from.
746
:param to_revision: If non-None, the merge will merge to to_revision, but
747
not beyond it. to_revision does not need to be in the history of
748
the branch when it is supplied. If None, to_revision defaults to
749
branch.last_revision().
751
from bzrlib.merge import Merger, Merge3Merger
752
pb = bzrlib.ui.ui_factory.nested_progress_bar()
754
merger = Merger(self.branch, this_tree=self, pb=pb)
755
merger.pp = ProgressPhase("Merge phase", 5, pb)
756
merger.pp.next_phase()
757
# check that there are no
759
merger.check_basis(check_clean=True, require_commits=False)
760
if to_revision is None:
761
to_revision = branch.last_revision()
762
merger.other_rev_id = to_revision
763
if merger.other_rev_id is None:
764
raise error.NoCommits(branch)
765
self.branch.fetch(branch, last_revision=merger.other_rev_id)
766
merger.other_basis = merger.other_rev_id
767
merger.other_tree = self.branch.repository.revision_tree(
769
merger.pp.next_phase()
771
if merger.base_rev_id == merger.other_rev_id:
772
raise errors.PointlessMerge
773
merger.backup_files = False
774
merger.merge_type = Merge3Merger
775
merger.set_interesting_files(None)
776
merger.show_base = False
777
merger.reprocess = False
778
conflicts = merger.do_merge()
785
def merge_modified(self):
787
hashfile = self._control_files.get('merge-hashes')
792
if hashfile.next() != MERGE_MODIFIED_HEADER_1 + '\n':
793
raise MergeModifiedFormatError()
794
except StopIteration:
795
raise MergeModifiedFormatError()
796
for s in RioReader(hashfile):
797
file_id = s.get("file_id")
798
if file_id not in self.inventory:
801
if hash == self.get_file_sha1(file_id):
802
merge_hashes[file_id] = hash
806
def mkdir(self, path, file_id=None):
807
"""See MutableTree.mkdir()."""
809
file_id = gen_file_id(os.path.basename(path))
810
os.mkdir(self.abspath(path))
811
self.add(path, file_id, 'directory')
814
def get_symlink_target(self, file_id):
815
return os.readlink(self.id2abspath(file_id))
817
def file_class(self, filename):
818
if self.path2id(filename):
820
elif self.is_ignored(filename):
825
def list_files(self):
826
"""Recursively list all files as (path, class, kind, id, entry).
828
Lists, but does not descend into unversioned directories.
830
This does not include files that have been deleted in this
833
Skips the control directory.
835
inv = self._inventory
836
# Convert these into local objects to save lookup times
837
pathjoin = osutils.pathjoin
838
file_kind = osutils.file_kind
840
# transport.base ends in a slash, we want the piece
841
# between the last two slashes
842
transport_base_dir = self.bzrdir.transport.base.rsplit('/', 2)[1]
844
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
846
# directory file_id, relative path, absolute path, reverse sorted children
847
children = os.listdir(self.basedir)
849
# jam 20060527 The kernel sized tree seems equivalent whether we
850
# use a deque and popleft to keep them sorted, or if we use a plain
851
# list and just reverse() them.
852
children = collections.deque(children)
853
stack = [(inv.root.file_id, u'', self.basedir, children)]
855
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
858
f = children.popleft()
859
## TODO: If we find a subdirectory with its own .bzr
860
## directory, then that is a separate tree and we
861
## should exclude it.
863
# the bzrdir for this tree
864
if transport_base_dir == f:
867
# we know that from_dir_relpath and from_dir_abspath never end in a slash
868
# and 'f' doesn't begin with one, we can do a string op, rather
869
# than the checks of pathjoin(), all relative paths will have an extra slash
871
fp = from_dir_relpath + '/' + f
874
fap = from_dir_abspath + '/' + f
876
f_ie = inv.get_child(from_dir_id, f)
879
elif self.is_ignored(fp[1:]):
882
# we may not have found this file, because of a unicode issue
883
f_norm, can_access = osutils.normalized_filename(f)
884
if f == f_norm or not can_access:
885
# No change, so treat this file normally
888
# this file can be accessed by a normalized path
889
# check again if it is versioned
890
# these lines are repeated here for performance
892
fp = from_dir_relpath + '/' + f
893
fap = from_dir_abspath + '/' + f
894
f_ie = inv.get_child(from_dir_id, f)
897
elif self.is_ignored(fp[1:]):
906
raise BzrCheckError("file %r entered as kind %r id %r, "
908
% (fap, f_ie.kind, f_ie.file_id, fk))
910
# make a last minute entry
912
yield fp[1:], c, fk, f_ie.file_id, f_ie
915
yield fp[1:], c, fk, None, fk_entries[fk]()
917
yield fp[1:], c, fk, None, TreeEntry()
920
if fk != 'directory':
923
# But do this child first
924
new_children = os.listdir(fap)
926
new_children = collections.deque(new_children)
927
stack.append((f_ie.file_id, fp, fap, new_children))
928
# Break out of inner loop, so that we start outer loop with child
931
# if we finished all children, pop it off the stack
934
@needs_tree_write_lock
935
def move(self, from_paths, to_name):
938
to_name must exist in the inventory.
940
If to_name exists and is a directory, the files are moved into
941
it, keeping their old names.
943
Note that to_name is only the last component of the new name;
944
this doesn't change the directory.
946
This returns a list of (from_path, to_path) pairs for each
950
## TODO: Option to move IDs only
951
assert not isinstance(from_paths, basestring)
953
to_abs = self.abspath(to_name)
954
if not isdir(to_abs):
955
raise BzrError("destination %r is not a directory" % to_abs)
956
if not self.has_filename(to_name):
957
raise BzrError("destination %r not in working directory" % to_abs)
958
to_dir_id = inv.path2id(to_name)
959
if to_dir_id is None and to_name != '':
960
raise BzrError("destination %r is not a versioned directory" % to_name)
961
to_dir_ie = inv[to_dir_id]
962
if to_dir_ie.kind != 'directory':
963
raise BzrError("destination %r is not a directory" % to_abs)
965
to_idpath = inv.get_idpath(to_dir_id)
968
if not self.has_filename(f):
969
raise BzrError("%r does not exist in working tree" % f)
970
f_id = inv.path2id(f)
972
raise BzrError("%r is not versioned" % f)
973
name_tail = splitpath(f)[-1]
974
dest_path = pathjoin(to_name, name_tail)
975
if self.has_filename(dest_path):
976
raise BzrError("destination %r already exists" % dest_path)
977
if f_id in to_idpath:
978
raise BzrError("can't move %r to a subdirectory of itself" % f)
980
# OK, so there's a race here, it's possible that someone will
981
# create a file in this interval and then the rename might be
982
# left half-done. But we should have caught most problems.
983
orig_inv = deepcopy(self.inventory)
986
name_tail = splitpath(f)[-1]
987
dest_path = pathjoin(to_name, name_tail)
988
result.append((f, dest_path))
989
inv.rename(inv.path2id(f), to_dir_id, name_tail)
991
rename(self.abspath(f), self.abspath(dest_path))
993
raise BzrError("failed to rename %r to %r: %s" %
994
(f, dest_path, e[1]),
995
["rename rolled back"])
997
# restore the inventory on error
998
self._set_inventory(orig_inv)
1000
self._write_inventory(inv)
1003
@needs_tree_write_lock
1004
def rename_one(self, from_rel, to_rel):
1007
This can change the directory or the filename or both.
1009
inv = self.inventory
1010
if not self.has_filename(from_rel):
1011
raise BzrError("can't rename: old working file %r does not exist" % from_rel)
1012
if self.has_filename(to_rel):
1013
raise BzrError("can't rename: new working file %r already exists" % to_rel)
1015
file_id = inv.path2id(from_rel)
1017
raise BzrError("can't rename: old name %r is not versioned" % from_rel)
1019
entry = inv[file_id]
1020
from_parent = entry.parent_id
1021
from_name = entry.name
1023
if inv.path2id(to_rel):
1024
raise BzrError("can't rename: new name %r is already versioned" % to_rel)
1026
to_dir, to_tail = os.path.split(to_rel)
1027
to_dir_id = inv.path2id(to_dir)
1028
if to_dir_id is None and to_dir != '':
1029
raise BzrError("can't determine destination directory id for %r" % to_dir)
1031
mutter("rename_one:")
1032
mutter(" file_id {%s}" % file_id)
1033
mutter(" from_rel %r" % from_rel)
1034
mutter(" to_rel %r" % to_rel)
1035
mutter(" to_dir %r" % to_dir)
1036
mutter(" to_dir_id {%s}" % to_dir_id)
1038
inv.rename(file_id, to_dir_id, to_tail)
1040
from_abs = self.abspath(from_rel)
1041
to_abs = self.abspath(to_rel)
1043
rename(from_abs, to_abs)
1045
inv.rename(file_id, from_parent, from_name)
1046
raise BzrError("failed to rename %r to %r: %s"
1047
% (from_abs, to_abs, e[1]),
1048
["rename rolled back"])
1049
self._write_inventory(inv)
1053
"""Return all unknown files.
1055
These are files in the working directory that are not versioned or
1056
control files or ignored.
1058
for subp in self.extras():
1059
if not self.is_ignored(subp):
1062
@needs_tree_write_lock
1063
def unversion(self, file_ids):
1064
"""Remove the file ids in file_ids from the current versioned set.
1066
When a file_id is unversioned, all of its children are automatically
1069
:param file_ids: The file ids to stop versioning.
1070
:raises: NoSuchId if any fileid is not currently versioned.
1072
for file_id in file_ids:
1073
if self._inventory.has_id(file_id):
1074
self._inventory.remove_recursive_id(file_id)
1076
raise errors.NoSuchId(self, file_id)
1078
# in the future this should just set a dirty bit to wait for the
1079
# final unlock. However, until all methods of workingtree start
1080
# with the current in -memory inventory rather than triggering
1081
# a read, it is more complex - we need to teach read_inventory
1082
# to know when to read, and when to not read first... and possibly
1083
# to save first when the in memory one may be corrupted.
1084
# so for now, we just only write it if it is indeed dirty.
1086
self._write_inventory(self._inventory)
1088
@deprecated_method(zero_eight)
1089
def iter_conflicts(self):
1090
"""List all files in the tree that have text or content conflicts.
1091
DEPRECATED. Use conflicts instead."""
1092
return self._iter_conflicts()
1094
def _iter_conflicts(self):
1096
for info in self.list_files():
1098
stem = get_conflicted_stem(path)
1101
if stem not in conflicted:
1102
conflicted.add(stem)
1106
def pull(self, source, overwrite=False, stop_revision=None):
1107
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
1110
pp = ProgressPhase("Pull phase", 2, top_pb)
1112
old_revision_history = self.branch.revision_history()
1113
basis_tree = self.basis_tree()
1114
count = self.branch.pull(source, overwrite, stop_revision)
1115
new_revision_history = self.branch.revision_history()
1116
if new_revision_history != old_revision_history:
1118
if len(old_revision_history):
1119
other_revision = old_revision_history[-1]
1121
other_revision = None
1122
repository = self.branch.repository
1123
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1125
new_basis_tree = self.branch.basis_tree()
1126
merge_inner(self.branch,
1133
# TODO - dedup parents list with things merged by pull ?
1134
# reuse the revisiontree we merged against to set the new
1136
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1137
# we have to pull the merge trees out again, because
1138
# merge_inner has set the ids. - this corner is not yet
1139
# layered well enough to prevent double handling.
1140
merges = self.get_parent_ids()[1:]
1141
parent_trees.extend([
1142
(parent, repository.revision_tree(parent)) for
1144
self.set_parent_trees(parent_trees)
1151
def put_file_bytes_non_atomic(self, file_id, bytes):
1152
"""See MutableTree.put_file_bytes_non_atomic."""
1153
stream = file(self.id2abspath(file_id), 'wb')
1158
# TODO: update the hashcache here ?
1161
"""Yield all unknown files in this WorkingTree.
1163
If there are any unknown directories then only the directory is
1164
returned, not all its children. But if there are unknown files
1165
under a versioned subdirectory, they are returned.
1167
Currently returned depth-first, sorted by name within directories.
1169
## TODO: Work from given directory downwards
1170
for path, dir_entry in self.inventory.directories():
1171
# mutter("search for unknowns in %r", path)
1172
dirabs = self.abspath(path)
1173
if not isdir(dirabs):
1174
# e.g. directory deleted
1178
for subf in os.listdir(dirabs):
1181
if subf not in dir_entry.children:
1182
subf_norm, can_access = osutils.normalized_filename(subf)
1183
if subf_norm != subf and can_access:
1184
if subf_norm not in dir_entry.children:
1185
fl.append(subf_norm)
1191
subp = pathjoin(path, subf)
1194
def _translate_ignore_rule(self, rule):
1195
"""Translate a single ignore rule to a regex.
1197
There are two types of ignore rules. Those that do not contain a / are
1198
matched against the tail of the filename (that is, they do not care
1199
what directory the file is in.) Rules which do contain a slash must
1200
match the entire path. As a special case, './' at the start of the
1201
string counts as a slash in the string but is removed before matching
1202
(e.g. ./foo.c, ./src/foo.c)
1204
:return: The translated regex.
1206
if rule[:2] in ('./', '.\\'):
1208
result = fnmatch.translate(rule[2:])
1209
elif '/' in rule or '\\' in rule:
1211
result = fnmatch.translate(rule)
1213
# default rule style.
1214
result = "(?:.*/)?(?!.*/)" + fnmatch.translate(rule)
1215
assert result[-1] == '$', "fnmatch.translate did not add the expected $"
1216
return "(" + result + ")"
1218
def _combine_ignore_rules(self, rules):
1219
"""Combine a list of ignore rules into a single regex object.
1221
Each individual rule is combined with | to form a big regex, which then
1222
has $ added to it to form something like ()|()|()$. The group index for
1223
each subregex's outermost group is placed in a dictionary mapping back
1224
to the rule. This allows quick identification of the matching rule that
1226
:return: a list of the compiled regex and the matching-group index
1227
dictionaries. We return a list because python complains if you try to
1228
combine more than 100 regexes.
1233
translated_rules = []
1235
translated_rule = self._translate_ignore_rule(rule)
1236
compiled_rule = re.compile(translated_rule)
1237
groups[next_group] = rule
1238
next_group += compiled_rule.groups
1239
translated_rules.append(translated_rule)
1240
if next_group == 99:
1241
result.append((re.compile("|".join(translated_rules)), groups))
1244
translated_rules = []
1245
if len(translated_rules):
1246
result.append((re.compile("|".join(translated_rules)), groups))
1249
def ignored_files(self):
1250
"""Yield list of PATH, IGNORE_PATTERN"""
1251
for subp in self.extras():
1252
pat = self.is_ignored(subp)
1256
def get_ignore_list(self):
1257
"""Return list of ignore patterns.
1259
Cached in the Tree object after the first call.
1261
ignoreset = getattr(self, '_ignoreset', None)
1262
if ignoreset is not None:
1265
ignore_globs = set(bzrlib.DEFAULT_IGNORE)
1266
ignore_globs.update(ignores.get_runtime_ignores())
1268
ignore_globs.update(ignores.get_user_ignores())
1270
if self.has_filename(bzrlib.IGNORE_FILENAME):
1271
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1273
ignore_globs.update(ignores.parse_ignore_file(f))
1277
self._ignoreset = ignore_globs
1278
self._ignore_regex = self._combine_ignore_rules(ignore_globs)
1281
def _get_ignore_rules_as_regex(self):
1282
"""Return a regex of the ignore rules and a mapping dict.
1284
:return: (ignore rules compiled regex, dictionary mapping rule group
1285
indices to original rule.)
1287
if getattr(self, '_ignoreset', None) is None:
1288
self.get_ignore_list()
1289
return self._ignore_regex
1291
def is_ignored(self, filename):
1292
r"""Check whether the filename matches an ignore pattern.
1294
Patterns containing '/' or '\' need to match the whole path;
1295
others match against only the last component.
1297
If the file is ignored, returns the pattern which caused it to
1298
be ignored, otherwise None. So this can simply be used as a
1299
boolean if desired."""
1301
# TODO: Use '**' to match directories, and other extended
1302
# globbing stuff from cvs/rsync.
1304
# XXX: fnmatch is actually not quite what we want: it's only
1305
# approximately the same as real Unix fnmatch, and doesn't
1306
# treat dotfiles correctly and allows * to match /.
1307
# Eventually it should be replaced with something more
1310
rules = self._get_ignore_rules_as_regex()
1311
for regex, mapping in rules:
1312
match = regex.match(filename)
1313
if match is not None:
1314
# one or more of the groups in mapping will have a non-None
1316
groups = match.groups()
1317
rules = [mapping[group] for group in
1318
mapping if groups[group] is not None]
1322
def kind(self, file_id):
1323
return file_kind(self.id2abspath(file_id))
1325
def last_revision(self):
1326
"""Return the last revision of the branch for this tree.
1328
This format tree does not support a separate marker for last-revision
1329
compared to the branch.
1331
See MutableTree.last_revision
1333
return self._last_revision()
1336
def _last_revision(self):
1337
"""helper for get_parent_ids."""
1338
return self.branch.last_revision()
1340
def is_locked(self):
1341
return self._control_files.is_locked()
1343
def lock_read(self):
1344
"""See Branch.lock_read, and WorkingTree.unlock."""
1345
self.branch.lock_read()
1347
return self._control_files.lock_read()
1349
self.branch.unlock()
1352
def lock_tree_write(self):
1353
"""Lock the working tree for write, and the branch for read.
1355
This is useful for operations which only need to mutate the working
1356
tree. Taking out branch write locks is a relatively expensive process
1357
and may fail if the branch is on read only media. So branch write locks
1358
should only be taken out when we are modifying branch data - such as in
1359
operations like commit, pull, uncommit and update.
1361
self.branch.lock_read()
1363
return self._control_files.lock_write()
1365
self.branch.unlock()
1368
def lock_write(self):
1369
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1370
self.branch.lock_write()
1372
return self._control_files.lock_write()
1374
self.branch.unlock()
1377
def get_physical_lock_status(self):
1378
return self._control_files.get_physical_lock_status()
1380
def _basis_inventory_name(self):
1381
return 'basis-inventory-cache'
1383
@needs_tree_write_lock
1384
def set_last_revision(self, new_revision):
1385
"""Change the last revision in the working tree."""
1386
if self._change_last_revision(new_revision):
1387
self._cache_basis_inventory(new_revision)
1389
def _change_last_revision(self, new_revision):
1390
"""Template method part of set_last_revision to perform the change.
1392
This is used to allow WorkingTree3 instances to not affect branch
1393
when their last revision is set.
1395
if new_revision is None:
1396
self.branch.set_revision_history([])
1399
self.branch.generate_revision_history(new_revision)
1400
except errors.NoSuchRevision:
1401
# not present in the repo - dont try to set it deeper than the tip
1402
self.branch.set_revision_history([new_revision])
1405
def _cache_basis_inventory(self, new_revision):
1406
"""Cache new_revision as the basis inventory."""
1407
# TODO: this should allow the ready-to-use inventory to be passed in,
1408
# as commit already has that ready-to-use [while the format is the
1411
# this double handles the inventory - unpack and repack -
1412
# but is easier to understand. We can/should put a conditional
1413
# in here based on whether the inventory is in the latest format
1414
# - perhaps we should repack all inventories on a repository
1416
# the fast path is to copy the raw xml from the repository. If the
1417
# xml contains 'revision_id="', then we assume the right
1418
# revision_id is set. We must check for this full string, because a
1419
# root node id can legitimately look like 'revision_id' but cannot
1421
xml = self.branch.repository.get_inventory_xml(new_revision)
1422
firstline = xml.split('\n', 1)[0]
1423
if (not 'revision_id="' in firstline or
1424
'format="6"' not in firstline):
1425
inv = self.branch.repository.deserialise_inventory(
1427
inv.revision_id = new_revision
1428
xml = bzrlib.xml6.serializer_v6.write_inventory_to_string(inv)
1429
assert isinstance(xml, str), 'serialised xml must be bytestring.'
1430
path = self._basis_inventory_name()
1432
self._control_files.put(path, sio)
1433
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1436
def read_basis_inventory(self):
1437
"""Read the cached basis inventory."""
1438
path = self._basis_inventory_name()
1439
return self._control_files.get(path).read()
1442
def read_working_inventory(self):
1443
"""Read the working inventory."""
1444
# ElementTree does its own conversion from UTF-8, so open in
1446
result = bzrlib.xml5.serializer_v5.read_inventory(
1447
self._control_files.get('inventory'))
1448
self._set_inventory(result)
1451
@needs_tree_write_lock
1452
def remove(self, files, verbose=False, to_file=None):
1453
"""Remove nominated files from the working inventory..
1455
This does not remove their text. This does not run on XXX on what? RBC
1457
TODO: Refuse to remove modified files unless --force is given?
1459
TODO: Do something useful with directories.
1461
TODO: Should this remove the text or not? Tough call; not
1462
removing may be useful and the user can just use use rm, and
1463
is the opposite of add. Removing it is consistent with most
1464
other tools. Maybe an option.
1466
## TODO: Normalize names
1467
## TODO: Remove nested loops; better scalability
1468
if isinstance(files, basestring):
1471
inv = self.inventory
1473
# do this before any modifications
1475
fid = inv.path2id(f)
1477
# TODO: Perhaps make this just a warning, and continue?
1478
# This tends to happen when
1479
raise NotVersionedError(path=f)
1481
# having remove it, it must be either ignored or unknown
1482
if self.is_ignored(f):
1486
show_status(new_status, inv[fid].kind, f, to_file=to_file)
1489
self._write_inventory(inv)
1491
@needs_tree_write_lock
1492
def revert(self, filenames, old_tree=None, backups=True,
1493
pb=DummyProgress()):
1494
from transform import revert
1495
from conflicts import resolve
1496
if old_tree is None:
1497
old_tree = self.basis_tree()
1498
conflicts = revert(self, old_tree, filenames, backups, pb)
1499
if not len(filenames):
1500
self.set_parent_ids(self.get_parent_ids()[:1])
1503
resolve(self, filenames, ignore_misses=True)
1506
# XXX: This method should be deprecated in favour of taking in a proper
1507
# new Inventory object.
1508
@needs_tree_write_lock
1509
def set_inventory(self, new_inventory_list):
1510
from bzrlib.inventory import (Inventory,
1515
inv = Inventory(self.get_root_id())
1516
for path, file_id, parent, kind in new_inventory_list:
1517
name = os.path.basename(path)
1520
# fixme, there should be a factory function inv,add_??
1521
if kind == 'directory':
1522
inv.add(InventoryDirectory(file_id, name, parent))
1523
elif kind == 'file':
1524
inv.add(InventoryFile(file_id, name, parent))
1525
elif kind == 'symlink':
1526
inv.add(InventoryLink(file_id, name, parent))
1528
raise BzrError("unknown kind %r" % kind)
1529
self._write_inventory(inv)
1531
@needs_tree_write_lock
1532
def set_root_id(self, file_id):
1533
"""Set the root id for this tree."""
1534
inv = self.read_working_inventory()
1535
orig_root_id = inv.root.file_id
1536
del inv._byid[inv.root.file_id]
1537
inv.root.file_id = file_id
1538
inv._byid[inv.root.file_id] = inv.root
1541
if entry.parent_id == orig_root_id:
1542
entry.parent_id = inv.root.file_id
1543
self._write_inventory(inv)
1546
"""See Branch.unlock.
1548
WorkingTree locking just uses the Branch locking facilities.
1549
This is current because all working trees have an embedded branch
1550
within them. IF in the future, we were to make branch data shareable
1551
between multiple working trees, i.e. via shared storage, then we
1552
would probably want to lock both the local tree, and the branch.
1554
raise NotImplementedError(self.unlock)
1558
"""Update a working tree along its branch.
1560
This will update the branch if its bound too, which means we have multiple trees involved:
1561
The new basis tree of the master.
1562
The old basis tree of the branch.
1563
The old basis tree of the working tree.
1564
The current working tree state.
1565
pathologically all three may be different, and non ancestors of each other.
1566
Conceptually we want to:
1567
Preserve the wt.basis->wt.state changes
1568
Transform the wt.basis to the new master basis.
1569
Apply a merge of the old branch basis to get any 'local' changes from it into the tree.
1570
Restore the wt.basis->wt.state changes.
1572
There isn't a single operation at the moment to do that, so we:
1573
Merge current state -> basis tree of the master w.r.t. the old tree basis.
1574
Do a 'normal' merge of the old branch basis if it is relevant.
1576
old_tip = self.branch.update()
1577
# here if old_tip is not None, it is the old tip of the branch before
1578
# it was updated from the master branch. This should become a pending
1579
# merge in the working tree to preserve the user existing work. we
1580
# cant set that until we update the working trees last revision to be
1581
# one from the new branch, because it will just get absorbed by the
1582
# parent de-duplication logic.
1584
# We MUST save it even if an error occurs, because otherwise the users
1585
# local work is unreferenced and will appear to have been lost.
1589
last_rev = self.get_parent_ids()[0]
1592
if last_rev != self.branch.last_revision():
1593
# merge tree state up to new branch tip.
1594
basis = self.basis_tree()
1595
to_tree = self.branch.basis_tree()
1596
result += merge_inner(self.branch,
1600
# TODO - dedup parents list with things merged by pull ?
1601
# reuse the tree we've updated to to set the basis:
1602
parent_trees = [(self.branch.last_revision(), to_tree)]
1603
merges = self.get_parent_ids()[1:]
1604
# Ideally we ask the tree for the trees here, that way the working
1605
# tree can decide whether to give us teh entire tree or give us a
1606
# lazy initialised tree. dirstate for instance will have the trees
1607
# in ram already, whereas a last-revision + basis-inventory tree
1608
# will not, but also does not need them when setting parents.
1609
for parent in merges:
1610
parent_trees.append(
1611
(parent, self.branch.repository.revision_tree(parent)))
1612
if old_tip is not None:
1613
parent_trees.append(
1614
(old_tip, self.branch.repository.revision_tree(old_tip)))
1615
self.set_parent_trees(parent_trees)
1616
last_rev = parent_trees[0][0]
1618
# the working tree had the same last-revision as the master
1619
# branch did. We may still have pivot local work from the local
1620
# branch into old_tip:
1621
if old_tip is not None:
1622
self.add_parent_tree_id(old_tip)
1623
if old_tip and old_tip != last_rev:
1624
# our last revision was not the prior branch last revision
1625
# and we have converted that last revision to a pending merge.
1626
# base is somewhere between the branch tip now
1627
# and the now pending merge
1628
from bzrlib.revision import common_ancestor
1630
base_rev_id = common_ancestor(self.branch.last_revision(),
1632
self.branch.repository)
1633
except errors.NoCommonAncestor:
1635
base_tree = self.branch.repository.revision_tree(base_rev_id)
1636
other_tree = self.branch.repository.revision_tree(old_tip)
1637
result += merge_inner(self.branch,
1643
@needs_tree_write_lock
1644
def _write_inventory(self, inv):
1645
"""Write inventory as the current inventory."""
1647
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1649
self._control_files.put('inventory', sio)
1650
self._set_inventory(inv)
1651
mutter('wrote working inventory')
1653
def set_conflicts(self, arg):
1654
raise UnsupportedOperation(self.set_conflicts, self)
1656
def add_conflicts(self, arg):
1657
raise UnsupportedOperation(self.add_conflicts, self)
1660
def conflicts(self):
1661
conflicts = ConflictList()
1662
for conflicted in self._iter_conflicts():
1665
if file_kind(self.abspath(conflicted)) != "file":
1667
except errors.NoSuchFile:
1670
for suffix in ('.THIS', '.OTHER'):
1672
kind = file_kind(self.abspath(conflicted+suffix))
1675
except errors.NoSuchFile:
1679
ctype = {True: 'text conflict', False: 'contents conflict'}[text]
1680
conflicts.append(Conflict.factory(ctype, path=conflicted,
1681
file_id=self.path2id(conflicted)))
1685
class WorkingTree2(WorkingTree):
1686
"""This is the Format 2 working tree.
1688
This was the first weave based working tree.
1689
- uses os locks for locking.
1690
- uses the branch last-revision.
1693
def lock_tree_write(self):
1694
"""See WorkingTree.lock_tree_write().
1696
In Format2 WorkingTrees we have a single lock for the branch and tree
1697
so lock_tree_write() degrades to lock_write().
1699
self.branch.lock_write()
1701
return self._control_files.lock_write()
1703
self.branch.unlock()
1707
# we share control files:
1708
if self._hashcache.needs_write and self._control_files._lock_count==3:
1709
self._hashcache.write()
1710
# reverse order of locking.
1712
return self._control_files.unlock()
1714
self.branch.unlock()
1717
class WorkingTree3(WorkingTree):
1718
"""This is the Format 3 working tree.
1720
This differs from the base WorkingTree by:
1721
- having its own file lock
1722
- having its own last-revision property.
1724
This is new in bzr 0.8
1728
def _last_revision(self):
1729
"""See Mutable.last_revision."""
1731
return self._control_files.get_utf8('last-revision').read()
1735
def _change_last_revision(self, revision_id):
1736
"""See WorkingTree._change_last_revision."""
1737
if revision_id is None or revision_id == NULL_REVISION:
1739
self._control_files._transport.delete('last-revision')
1740
except errors.NoSuchFile:
1744
self._control_files.put_utf8('last-revision', revision_id)
1747
@needs_tree_write_lock
1748
def set_conflicts(self, conflicts):
1749
self._put_rio('conflicts', conflicts.to_stanzas(),
1752
@needs_tree_write_lock
1753
def add_conflicts(self, new_conflicts):
1754
conflict_set = set(self.conflicts())
1755
conflict_set.update(set(list(new_conflicts)))
1756
self.set_conflicts(ConflictList(sorted(conflict_set,
1757
key=Conflict.sort_key)))
1760
def conflicts(self):
1762
confile = self._control_files.get('conflicts')
1764
return ConflictList()
1766
if confile.next() != CONFLICT_HEADER_1 + '\n':
1767
raise ConflictFormatError()
1768
except StopIteration:
1769
raise ConflictFormatError()
1770
return ConflictList.from_stanzas(RioReader(confile))
1773
if self._hashcache.needs_write and self._control_files._lock_count==1:
1774
self._hashcache.write()
1775
# reverse order of locking.
1777
return self._control_files.unlock()
1779
self.branch.unlock()
1782
def get_conflicted_stem(path):
1783
for suffix in CONFLICT_SUFFIXES:
1784
if path.endswith(suffix):
1785
return path[:-len(suffix)]
1787
@deprecated_function(zero_eight)
1788
def is_control_file(filename):
1789
"""See WorkingTree.is_control_filename(filename)."""
1790
## FIXME: better check
1791
filename = normpath(filename)
1792
while filename != '':
1793
head, tail = os.path.split(filename)
1794
## mutter('check %r for control file' % ((head, tail),))
1797
if filename == head:
1803
class WorkingTreeFormat(object):
1804
"""An encapsulation of the initialization and open routines for a format.
1806
Formats provide three things:
1807
* An initialization routine,
1811
Formats are placed in an dict by their format string for reference
1812
during workingtree opening. Its not required that these be instances, they
1813
can be classes themselves with class methods - it simply depends on
1814
whether state is needed for a given format or not.
1816
Once a format is deprecated, just deprecate the initialize and open
1817
methods on the format class. Do not deprecate the object, as the
1818
object will be created every time regardless.
1821
_default_format = None
1822
"""The default format used for new trees."""
1825
"""The known formats."""
1828
def find_format(klass, a_bzrdir):
1829
"""Return the format for the working tree object in a_bzrdir."""
1831
transport = a_bzrdir.get_workingtree_transport(None)
1832
format_string = transport.get("format").read()
1833
return klass._formats[format_string]
1835
raise errors.NoWorkingTree(base=transport.base)
1837
raise errors.UnknownFormatError(format=format_string)
1840
def get_default_format(klass):
1841
"""Return the current default format."""
1842
return klass._default_format
1844
def get_format_string(self):
1845
"""Return the ASCII format string that identifies this format."""
1846
raise NotImplementedError(self.get_format_string)
1848
def get_format_description(self):
1849
"""Return the short description for this format."""
1850
raise NotImplementedError(self.get_format_description)
1852
def is_supported(self):
1853
"""Is this format supported?
1855
Supported formats can be initialized and opened.
1856
Unsupported formats may not support initialization or committing or
1857
some other features depending on the reason for not being supported.
1862
def register_format(klass, format):
1863
klass._formats[format.get_format_string()] = format
1866
def set_default_format(klass, format):
1867
klass._default_format = format
1870
def unregister_format(klass, format):
1871
assert klass._formats[format.get_format_string()] is format
1872
del klass._formats[format.get_format_string()]
1876
class WorkingTreeFormat2(WorkingTreeFormat):
1877
"""The second working tree format.
1879
This format modified the hash cache from the format 1 hash cache.
1882
def get_format_description(self):
1883
"""See WorkingTreeFormat.get_format_description()."""
1884
return "Working tree format 2"
1886
def stub_initialize_remote(self, control_files):
1887
"""As a special workaround create critical control files for a remote working tree
1889
This ensures that it can later be updated and dealt with locally,
1890
since BzrDirFormat6 and BzrDirFormat5 cannot represent dirs with
1891
no working tree. (See bug #43064).
1895
bzrlib.xml5.serializer_v5.write_inventory(inv, sio)
1897
control_files.put('inventory', sio)
1899
control_files.put_utf8('pending-merges', '')
1902
def initialize(self, a_bzrdir, revision_id=None):
1903
"""See WorkingTreeFormat.initialize()."""
1904
if not isinstance(a_bzrdir.transport, LocalTransport):
1905
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1906
branch = a_bzrdir.open_branch()
1907
if revision_id is not None:
1910
revision_history = branch.revision_history()
1912
position = revision_history.index(revision_id)
1914
raise errors.NoSuchRevision(branch, revision_id)
1915
branch.set_revision_history(revision_history[:position + 1])
1918
revision = branch.last_revision()
1920
wt = WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
1926
wt._write_inventory(inv)
1927
wt.set_root_id(inv.root.file_id)
1928
basis_tree = branch.repository.revision_tree(revision)
1929
wt.set_parent_trees([(revision, basis_tree)])
1930
build_tree(basis_tree, wt)
1934
super(WorkingTreeFormat2, self).__init__()
1935
self._matchingbzrdir = bzrdir.BzrDirFormat6()
1937
def open(self, a_bzrdir, _found=False):
1938
"""Return the WorkingTree object for a_bzrdir
1940
_found is a private parameter, do not use it. It is used to indicate
1941
if format probing has already been done.
1944
# we are being called directly and must probe.
1945
raise NotImplementedError
1946
if not isinstance(a_bzrdir.transport, LocalTransport):
1947
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1948
return WorkingTree2(a_bzrdir.root_transport.local_abspath('.'),
1954
class WorkingTreeFormat3(WorkingTreeFormat):
1955
"""The second working tree format updated to record a format marker.
1958
- exists within a metadir controlling .bzr
1959
- includes an explicit version marker for the workingtree control
1960
files, separate from the BzrDir format
1961
- modifies the hash cache format
1963
- uses a LockDir to guard access for writes.
1966
def get_format_string(self):
1967
"""See WorkingTreeFormat.get_format_string()."""
1968
return "Bazaar-NG Working Tree format 3"
1970
def get_format_description(self):
1971
"""See WorkingTreeFormat.get_format_description()."""
1972
return "Working tree format 3"
1974
_lock_file_name = 'lock'
1975
_lock_class = LockDir
1977
def _open_control_files(self, a_bzrdir):
1978
transport = a_bzrdir.get_workingtree_transport(None)
1979
return LockableFiles(transport, self._lock_file_name,
1982
def initialize(self, a_bzrdir, revision_id=None):
1983
"""See WorkingTreeFormat.initialize().
1985
revision_id allows creating a working tree at a different
1986
revision than the branch is at.
1988
if not isinstance(a_bzrdir.transport, LocalTransport):
1989
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1990
transport = a_bzrdir.get_workingtree_transport(self)
1991
control_files = self._open_control_files(a_bzrdir)
1992
control_files.create_lock()
1993
control_files.lock_write()
1994
control_files.put_utf8('format', self.get_format_string())
1995
branch = a_bzrdir.open_branch()
1996
if revision_id is None:
1997
revision_id = branch.last_revision()
1999
wt = WorkingTree3(a_bzrdir.root_transport.local_abspath('.'),
2005
_control_files=control_files)
2006
wt.lock_tree_write()
2008
wt._write_inventory(inv)
2009
wt.set_root_id(inv.root.file_id)
2010
basis_tree = branch.repository.revision_tree(revision_id)
2011
if revision_id == bzrlib.revision.NULL_REVISION:
2012
wt.set_parent_trees([])
2014
wt.set_parent_trees([(revision_id, basis_tree)])
2015
build_tree(basis_tree, wt)
2018
control_files.unlock()
2022
super(WorkingTreeFormat3, self).__init__()
2023
self._matchingbzrdir = bzrdir.BzrDirMetaFormat1()
2025
def open(self, a_bzrdir, _found=False):
2026
"""Return the WorkingTree object for a_bzrdir
2028
_found is a private parameter, do not use it. It is used to indicate
2029
if format probing has already been done.
2032
# we are being called directly and must probe.
2033
raise NotImplementedError
2034
if not isinstance(a_bzrdir.transport, LocalTransport):
2035
raise errors.NotLocalUrl(a_bzrdir.transport.base)
2036
return self._open(a_bzrdir, self._open_control_files(a_bzrdir))
2038
def _open(self, a_bzrdir, control_files):
2039
"""Open the tree itself.
2041
:param a_bzrdir: the dir for the tree.
2042
:param control_files: the control files for the tree.
2044
return WorkingTree3(a_bzrdir.root_transport.local_abspath('.'),
2048
_control_files=control_files)
2051
return self.get_format_string()
2054
# formats which have no format string are not discoverable
2055
# and not independently creatable, so are not registered.
2056
__default_format = WorkingTreeFormat3()
2057
WorkingTreeFormat.register_format(__default_format)
2058
WorkingTreeFormat.set_default_format(__default_format)
2059
_legacy_formats = [WorkingTreeFormat2(),
2063
class WorkingTreeTestProviderAdapter(object):
2064
"""A tool to generate a suite testing multiple workingtree formats at once.
2066
This is done by copying the test once for each transport and injecting
2067
the transport_server, transport_readonly_server, and workingtree_format
2068
classes into each copy. Each copy is also given a new id() to make it
2072
def __init__(self, transport_server, transport_readonly_server, formats):
2073
self._transport_server = transport_server
2074
self._transport_readonly_server = transport_readonly_server
2075
self._formats = formats
2077
def _clone_test(self, test, bzrdir_format, workingtree_format, variation):
2078
"""Clone test for adaption."""
2079
new_test = deepcopy(test)
2080
new_test.transport_server = self._transport_server
2081
new_test.transport_readonly_server = self._transport_readonly_server
2082
new_test.bzrdir_format = bzrdir_format
2083
new_test.workingtree_format = workingtree_format
2084
def make_new_test_id():
2085
new_id = "%s(%s)" % (test.id(), variation)
2086
return lambda: new_id
2087
new_test.id = make_new_test_id()
2090
def adapt(self, test):
2091
from bzrlib.tests import TestSuite
2092
result = TestSuite()
2093
for workingtree_format, bzrdir_format in self._formats:
2094
new_test = self._clone_test(
2097
workingtree_format, workingtree_format.__class__.__name__)
2098
result.addTest(new_test)