1
# Copyright (C) 2005-2011 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""InventoryWorkingTree object and friends.
19
A WorkingTree represents the editable working copy of a branch.
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
23
new revision based on the workingtree and its inventory.
25
At the moment every WorkingTree has its own branch. Remote
26
WorkingTrees aren't supported.
28
To get a WorkingTree, call bzrdir.open_workingtree() or
29
WorkingTree.open(dir).
33
from __future__ import absolute_import
35
from bisect import bisect_left
45
# Explicitly import breezy.bzrdir so that the BzrProber
46
# is guaranteed to be registered.
49
from .. import lazy_import
50
lazy_import.lazy_import(globals(), """
53
conflicts as _mod_conflicts,
56
revision as _mod_revision,
59
from breezy.bzr import (
70
from ..lock import LogicalLockResult
71
from .inventorytree import InventoryRevisionTree, MutableInventoryTree
72
from ..sixish import (
76
from ..trace import mutter, note
84
from ..workingtree import (
91
MERGE_MODIFIED_HEADER_1 = b"BZR merge-modified list format 1"
92
# TODO: Modifying the conflict objects or their type is currently nearly
93
# impossible as there is no clear relationship between the working tree format
94
# and the conflict list file format.
95
CONFLICT_HEADER_1 = b"BZR conflict list format 1"
98
class InventoryWorkingTree(WorkingTree, MutableInventoryTree):
99
"""Base class for working trees that are inventory-oriented.
101
The inventory is held in the `Branch` working-inventory, and the
102
files are in a directory on disk.
104
It is possible for a `WorkingTree` to have a filename which is
105
not listed in the Inventory and vice versa.
108
def __init__(self, basedir='.',
115
"""Construct a InventoryWorkingTree instance. This is not a public API.
117
:param branch: A branch to override probing for the branch.
119
super(InventoryWorkingTree, self).__init__(
120
basedir=basedir, branch=branch,
121
_transport=_control_files._transport, _internal=_internal,
122
_format=_format, _controldir=_controldir)
124
self._control_files = _control_files
125
self._detect_case_handling()
127
if _inventory is None:
128
# This will be acquired on lock_read() or lock_write()
129
self._inventory_is_modified = False
130
self._inventory = None
132
# the caller of __init__ has provided an inventory,
133
# we assume they know what they are doing - as its only
134
# the Format factory and creation methods that are
135
# permitted to do this.
136
self._set_inventory(_inventory, dirty=False)
138
def _set_inventory(self, inv, dirty):
139
"""Set the internal cached inventory.
141
:param inv: The inventory to set.
142
:param dirty: A boolean indicating whether the inventory is the same
143
logical inventory as whats on disk. If True the inventory is not
144
the same and should be written to disk or data will be lost, if
145
False then the inventory is the same as that on disk and any
146
serialisation would be unneeded overhead.
148
self._inventory = inv
149
self._inventory_is_modified = dirty
151
def _detect_case_handling(self):
152
wt_trans = self.controldir.get_workingtree_transport(None)
154
wt_trans.stat(self._format.case_sensitive_filename)
155
except errors.NoSuchFile:
156
self.case_sensitive = True
158
self.case_sensitive = False
160
self._setup_directory_is_tree_reference()
162
def _serialize(self, inventory, out_file):
163
xml5.serializer_v5.write_inventory(
164
self._inventory, out_file, working=True)
166
def _deserialize(selt, in_file):
167
return xml5.serializer_v5.read_inventory(in_file)
169
def break_lock(self):
170
"""Break a lock if one is present from another instance.
172
Uses the ui factory to ask for confirmation if the lock may be from
175
This will probe the repository for its lock as well.
177
self._control_files.break_lock()
178
self.branch.break_lock()
181
return self._control_files.is_locked()
183
def _must_be_locked(self):
184
if not self.is_locked():
185
raise errors.ObjectNotLocked(self)
188
"""Lock the tree for reading.
190
This also locks the branch, and can be unlocked via self.unlock().
192
:return: A breezy.lock.LogicalLockResult.
194
if not self.is_locked():
196
self.branch.lock_read()
198
self._control_files.lock_read()
199
return LogicalLockResult(self.unlock)
200
except BaseException:
204
def lock_tree_write(self):
205
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
207
:return: A breezy.lock.LogicalLockResult.
209
if not self.is_locked():
211
self.branch.lock_read()
213
self._control_files.lock_write()
214
return LogicalLockResult(self.unlock)
215
except BaseException:
219
def lock_write(self):
220
"""See MutableTree.lock_write, and WorkingTree.unlock.
222
:return: A breezy.lock.LogicalLockResult.
224
if not self.is_locked():
226
self.branch.lock_write()
228
self._control_files.lock_write()
229
return LogicalLockResult(self.unlock)
230
except BaseException:
234
def get_physical_lock_status(self):
235
return self._control_files.get_physical_lock_status()
237
def _write_inventory(self, inv):
238
"""Write inventory as the current inventory."""
239
with self.lock_tree_write():
240
self._set_inventory(inv, dirty=True)
243
# XXX: This method should be deprecated in favour of taking in a proper
244
# new Inventory object.
245
def set_inventory(self, new_inventory_list):
246
from .inventory import (
251
with self.lock_tree_write():
252
inv = Inventory(self.get_root_id())
253
for path, file_id, parent, kind in new_inventory_list:
254
name = os.path.basename(path)
257
# fixme, there should be a factory function inv,add_??
258
if kind == 'directory':
259
inv.add(InventoryDirectory(file_id, name, parent))
261
inv.add(InventoryFile(file_id, name, parent))
262
elif kind == 'symlink':
263
inv.add(InventoryLink(file_id, name, parent))
265
raise errors.BzrError("unknown kind %r" % kind)
266
self._write_inventory(inv)
268
def _write_basis_inventory(self, xml):
269
"""Write the basis inventory XML to the basis-inventory file"""
270
path = self._basis_inventory_name()
272
self._transport.put_file(path, sio,
273
mode=self.controldir._get_file_mode())
275
def _reset_data(self):
276
"""Reset transient data that cannot be revalidated."""
277
self._inventory_is_modified = False
278
with self._transport.get('inventory') as f:
279
result = self._deserialize(f)
280
self._set_inventory(result, dirty=False)
282
def store_uncommitted(self):
283
"""Store uncommitted changes from the tree in the branch."""
284
with self.lock_write():
285
target_tree = self.basis_tree()
286
from ..shelf import ShelfCreator
287
shelf_creator = ShelfCreator(self, target_tree)
289
if not shelf_creator.shelve_all():
291
self.branch.store_uncommitted(shelf_creator)
292
shelf_creator.transform()
294
shelf_creator.finalize()
295
note('Uncommitted changes stored in branch "%s".',
298
def restore_uncommitted(self):
299
"""Restore uncommitted changes from the branch into the tree."""
300
with self.lock_write():
301
unshelver = self.branch.get_unshelver(self)
302
if unshelver is None:
305
merger = unshelver.make_merger()
306
merger.ignore_zero = True
308
self.branch.store_uncommitted(None)
312
def get_shelf_manager(self):
313
"""Return the ShelfManager for this WorkingTree."""
314
from ..shelf import ShelfManager
315
return ShelfManager(self, self._transport)
317
def _set_root_id(self, file_id):
318
"""Set the root id for this tree, in a format specific manner.
320
:param file_id: The file id to assign to the root. It must not be
321
present in the current inventory or an error will occur. It must
322
not be None, but rather a valid file id.
324
inv = self._inventory
325
orig_root_id = inv.root.file_id
326
# TODO: it might be nice to exit early if there was nothing
327
# to do, saving us from trigger a sync on unlock.
328
self._inventory_is_modified = True
329
# we preserve the root inventory entry object, but
330
# unlinkit from the byid index
331
inv.delete(inv.root.file_id)
332
inv.root.file_id = file_id
333
# and link it into the index with the new changed id.
334
inv._byid[inv.root.file_id] = inv.root
335
# and finally update all children to reference the new id.
336
# XXX: this should be safe to just look at the root.children
337
# list, not the WHOLE INVENTORY.
338
for fid in inv.iter_all_ids():
339
entry = inv.get_entry(fid)
340
if entry.parent_id == orig_root_id:
341
entry.parent_id = inv.root.file_id
343
def remove(self, files, verbose=False, to_file=None, keep_files=True,
345
"""Remove nominated files from the working tree metadata.
347
:files: File paths relative to the basedir.
348
:keep_files: If true, the files will also be kept.
349
:force: Delete files and directories, even if they are changed and
350
even if the directories are not empty.
352
if isinstance(files, (str, text_type)):
357
all_files = set() # specified and nested files
363
def recurse_directory_to_add_files(directory):
364
# Recurse directory and add all files
365
# so we can check if they have changed.
366
for parent_info, file_infos in self.walkdirs(directory):
367
for relpath, basename, kind, lstat, fileid, kind in file_infos:
368
# Is it versioned or ignored?
369
if self.is_versioned(relpath):
370
# Add nested content for deletion.
371
all_files.add(relpath)
373
# Files which are not versioned
374
# should be treated as unknown.
375
files_to_backup.append(relpath)
377
with self.lock_tree_write():
379
for filename in files:
380
# Get file name into canonical form.
381
abspath = self.abspath(filename)
382
filename = self.relpath(abspath)
383
if len(filename) > 0:
384
all_files.add(filename)
385
recurse_directory_to_add_files(filename)
387
files = list(all_files)
390
return # nothing to do
392
# Sort needed to first handle directory content before the
394
files.sort(reverse=True)
396
# Bail out if we are going to delete files we shouldn't
397
if not keep_files and not force:
398
for (file_id, path, content_change, versioned, parent_id, name,
399
kind, executable) in self.iter_changes(
400
self.basis_tree(), include_unchanged=True,
401
require_versioned=False, want_unversioned=True,
402
specific_files=files):
403
if versioned[0] is False:
404
# The record is unknown or newly added
405
files_to_backup.append(path[1])
406
elif (content_change and (kind[1] is not None)
407
and osutils.is_inside_any(files, path[1])):
408
# Versioned and changed, but not deleted, and still
409
# in one of the dirs to be deleted.
410
files_to_backup.append(path[1])
412
def backup(file_to_backup):
413
backup_name = self.controldir._available_backup_name(
415
osutils.rename(abs_path, self.abspath(backup_name))
416
return "removed %s (but kept a copy: %s)" % (file_to_backup,
419
# Build inv_delta and delete files where applicable,
420
# do this before any modifications to meta data.
422
fid = self.path2id(f)
425
message = "%s is not versioned." % (f,)
428
# having removed it, it must be either ignored or
430
if self.is_ignored(f):
434
# XXX: Really should be a more abstract reporter
436
kind_ch = osutils.kind_marker(self.kind(f))
438
new_status + ' ' + f + kind_ch + '\n')
440
inv_delta.append((f, None, fid, None))
441
message = "removed %s" % (f,)
444
abs_path = self.abspath(f)
445
if osutils.lexists(abs_path):
446
if (osutils.isdir(abs_path)
447
and len(os.listdir(abs_path)) > 0):
449
osutils.rmtree(abs_path)
450
message = "deleted %s" % (f,)
454
if f in files_to_backup:
457
osutils.delete_any(abs_path)
458
message = "deleted %s" % (f,)
459
elif message is not None:
460
# Only care if we haven't done anything yet.
461
message = "%s does not exist." % (f,)
463
# Print only one message (if any) per file.
464
if message is not None:
466
self.apply_inventory_delta(inv_delta)
468
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
469
"""See MutableTree.set_parent_trees."""
470
parent_ids = [rev for (rev, tree) in parents_list]
471
for revision_id in parent_ids:
472
_mod_revision.check_not_reserved_id(revision_id)
474
with self.lock_tree_write():
475
self._check_parents_for_ghosts(parent_ids,
476
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
478
parent_ids = self._filter_parent_ids_by_ancestry(parent_ids)
480
if len(parent_ids) == 0:
481
leftmost_parent_id = _mod_revision.NULL_REVISION
482
leftmost_parent_tree = None
484
leftmost_parent_id, leftmost_parent_tree = parents_list[0]
486
if self._change_last_revision(leftmost_parent_id):
487
if leftmost_parent_tree is None:
488
# If we don't have a tree, fall back to reading the
489
# parent tree from the repository.
490
self._cache_basis_inventory(leftmost_parent_id)
492
inv = leftmost_parent_tree.root_inventory
493
xml = self._create_basis_xml_from_inventory(
494
leftmost_parent_id, inv)
495
self._write_basis_inventory(xml)
496
self._set_merges_from_parent_ids(parent_ids)
498
def _cache_basis_inventory(self, new_revision):
499
"""Cache new_revision as the basis inventory."""
500
# TODO: this should allow the ready-to-use inventory to be passed in,
501
# as commit already has that ready-to-use [while the format is the
504
# this double handles the inventory - unpack and repack -
505
# but is easier to understand. We can/should put a conditional
506
# in here based on whether the inventory is in the latest format
507
# - perhaps we should repack all inventories on a repository
509
# the fast path is to copy the raw xml from the repository. If the
510
# xml contains 'revision_id="', then we assume the right
511
# revision_id is set. We must check for this full string, because a
512
# root node id can legitimately look like 'revision_id' but cannot
514
xml = self.branch.repository._get_inventory_xml(new_revision)
515
firstline = xml.split(b'\n', 1)[0]
516
if (b'revision_id="' not in firstline
517
or b'format="7"' not in firstline):
518
inv = self.branch.repository._serializer.read_inventory_from_string(
520
xml = self._create_basis_xml_from_inventory(new_revision, inv)
521
self._write_basis_inventory(xml)
522
except (errors.NoSuchRevision, errors.RevisionNotPresent):
525
def _basis_inventory_name(self):
526
return 'basis-inventory-cache'
528
def _create_basis_xml_from_inventory(self, revision_id, inventory):
529
"""Create the text that will be saved in basis-inventory"""
530
inventory.revision_id = revision_id
531
return xml7.serializer_v7.write_inventory_to_string(inventory)
533
def set_conflicts(self, conflicts):
534
with self.lock_tree_write():
535
self._put_rio('conflicts', conflicts.to_stanzas(),
538
def add_conflicts(self, new_conflicts):
539
with self.lock_tree_write():
540
conflict_set = set(self.conflicts())
541
conflict_set.update(set(list(new_conflicts)))
542
self.set_conflicts(_mod_conflicts.ConflictList(
543
sorted(conflict_set, key=_mod_conflicts.Conflict.sort_key)))
546
with self.lock_read():
548
confile = self._transport.get('conflicts')
549
except errors.NoSuchFile:
550
return _mod_conflicts.ConflictList()
553
if next(confile) != CONFLICT_HEADER_1 + b'\n':
554
raise errors.ConflictFormatError()
555
except StopIteration:
556
raise errors.ConflictFormatError()
557
reader = _mod_rio.RioReader(confile)
558
return _mod_conflicts.ConflictList.from_stanzas(reader)
562
def get_ignore_list(self):
563
"""Return list of ignore patterns.
565
Cached in the Tree object after the first call.
567
ignoreset = getattr(self, '_ignoreset', None)
568
if ignoreset is not None:
572
ignore_globs.update(ignores.get_runtime_ignores())
573
ignore_globs.update(ignores.get_user_ignores())
574
if self.has_filename(self._format.ignore_filename):
575
with self.get_file(self._format.ignore_filename) as f:
576
ignore_globs.update(ignores.parse_ignore_file(f))
577
self._ignoreset = ignore_globs
581
self._flush_ignore_list_cache()
583
def _flush_ignore_list_cache(self):
584
"""Resets the cached ignore list to force a cache rebuild."""
585
self._ignoreset = None
586
self._ignoreglobster = None
588
def is_ignored(self, filename):
589
r"""Check whether the filename matches an ignore pattern.
591
Patterns containing '/' or '\' need to match the whole path;
592
others match against only the last component. Patterns starting
593
with '!' are ignore exceptions. Exceptions take precedence
594
over regular patterns and cause the filename to not be ignored.
596
If the file is ignored, returns the pattern which caused it to
597
be ignored, otherwise None. So this can simply be used as a
598
boolean if desired."""
599
if getattr(self, '_ignoreglobster', None) is None:
600
self._ignoreglobster = globbing.ExceptionGlobster(
601
self.get_ignore_list())
602
return self._ignoreglobster.match(filename)
604
def read_basis_inventory(self):
605
"""Read the cached basis inventory."""
606
path = self._basis_inventory_name()
607
return self._transport.get_bytes(path)
609
def read_working_inventory(self):
610
"""Read the working inventory.
612
:raises errors.InventoryModified: read_working_inventory will fail
613
when the current in memory inventory has been modified.
615
# conceptually this should be an implementation detail of the tree.
616
# XXX: Deprecate this.
617
# ElementTree does its own conversion from UTF-8, so open in
619
with self.lock_read():
620
if self._inventory_is_modified:
621
raise errors.InventoryModified(self)
622
with self._transport.get('inventory') as f:
623
result = self._deserialize(f)
624
self._set_inventory(result, dirty=False)
627
def get_root_id(self):
628
"""Return the id of this trees root"""
629
with self.lock_read():
630
return self._inventory.root.file_id
632
def has_id(self, file_id):
633
# files that have been deleted are excluded
634
inv, inv_file_id = self._unpack_file_id(file_id)
635
if not inv.has_id(inv_file_id):
637
path = inv.id2path(inv_file_id)
638
return osutils.lexists(self.abspath(path))
640
def has_or_had_id(self, file_id):
641
if file_id == self.get_root_id():
643
inv, inv_file_id = self._unpack_file_id(file_id)
644
return inv.has_id(inv_file_id)
646
def all_file_ids(self):
647
"""Iterate through file_ids for this tree.
649
file_ids are in a WorkingTree if they are in the working inventory
650
and the working file exists.
652
return {ie.file_id for path, ie in self.iter_entries_by_dir()}
654
def all_versioned_paths(self):
655
return {path for path, ie in self.iter_entries_by_dir()}
657
def set_last_revision(self, new_revision):
658
"""Change the last revision in the working tree."""
659
with self.lock_tree_write():
660
if self._change_last_revision(new_revision):
661
self._cache_basis_inventory(new_revision)
663
def _get_check_refs(self):
664
"""Return the references needed to perform a check of this tree.
666
The default implementation returns no refs, and is only suitable for
667
trees that have no local caching and can commit on ghosts at any time.
669
:seealso: breezy.check for details about check_refs.
673
def _check(self, references):
674
"""Check the tree for consistency.
676
:param references: A dict with keys matching the items returned by
677
self._get_check_refs(), and values from looking those keys up in
680
with self.lock_read():
681
tree_basis = self.basis_tree()
682
with tree_basis.lock_read():
683
repo_basis = references[('trees', self.last_revision())]
684
if len(list(repo_basis.iter_changes(tree_basis))) > 0:
685
raise errors.BzrCheckError(
686
"Mismatched basis inventory content.")
689
def check_state(self):
690
"""Check that the working state is/isn't valid."""
691
with self.lock_read():
692
check_refs = self._get_check_refs()
694
for ref in check_refs:
697
refs[ref] = self.branch.repository.revision_tree(value)
700
def reset_state(self, revision_ids=None):
701
"""Reset the state of the working tree.
703
This does a hard-reset to a last-known-good state. This is a way to
704
fix if something got corrupted (like the .bzr/checkout/dirstate file)
706
with self.lock_tree_write():
707
if revision_ids is None:
708
revision_ids = self.get_parent_ids()
710
rt = self.branch.repository.revision_tree(
711
_mod_revision.NULL_REVISION)
713
rt = self.branch.repository.revision_tree(revision_ids[0])
714
self._write_inventory(rt.root_inventory)
715
self.set_parent_ids(revision_ids)
718
"""Write the in memory inventory to disk."""
719
# TODO: Maybe this should only write on dirty ?
720
if self._control_files._lock_mode != 'w':
721
raise errors.NotWriteLocked(self)
723
self._serialize(self._inventory, sio)
725
self._transport.put_file('inventory', sio,
726
mode=self.controldir._get_file_mode())
727
self._inventory_is_modified = False
729
def get_file_mtime(self, path):
730
"""See Tree.get_file_mtime."""
732
return os.lstat(self.abspath(path)).st_mtime
734
if e.errno == errno.ENOENT:
735
raise errors.NoSuchFile(path)
738
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
740
return self._path2ie(path).executable
741
except errors.NoSuchFile:
742
# For unversioned files on win32, we just assume they are not
746
def _is_executable_from_path_and_stat_from_stat(self, path, stat_result):
747
mode = stat_result.st_mode
748
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
750
def is_executable(self, path):
751
if not self._supports_executable():
752
ie = self._path2ie(path)
755
mode = os.lstat(self.abspath(path)).st_mode
756
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
758
def _is_executable_from_path_and_stat(self, path, stat_result):
759
if not self._supports_executable():
760
return self._is_executable_from_path_and_stat_from_basis(
763
return self._is_executable_from_path_and_stat_from_stat(
766
def _add(self, files, ids, kinds):
767
"""See MutableTree._add."""
768
with self.lock_tree_write():
769
# TODO: Re-adding a file that is removed in the working copy
770
# should probably put it back with the previous ID.
771
# the read and write working inventory should not occur in this
772
# function - they should be part of lock_write and unlock.
773
# FIXME: nested trees
774
inv = self.root_inventory
775
for f, file_id, kind in zip(files, ids, kinds):
777
inv.add_path(f, kind=kind)
779
inv.add_path(f, kind=kind, file_id=file_id)
780
self._inventory_is_modified = True
782
def revision_tree(self, revision_id):
783
"""See WorkingTree.revision_id."""
784
if revision_id == self.last_revision():
786
xml = self.read_basis_inventory()
787
except errors.NoSuchFile:
791
inv = xml7.serializer_v7.read_inventory_from_string(xml)
792
# dont use the repository revision_tree api because we want
793
# to supply the inventory.
794
if inv.revision_id == revision_id:
795
return InventoryRevisionTree(
796
self.branch.repository, inv, revision_id)
797
except errors.BadInventoryFormat:
799
# raise if there was no inventory, or if we read the wrong inventory.
800
raise errors.NoSuchRevisionInTree(self, revision_id)
802
def annotate_iter(self, path,
803
default_revision=_mod_revision.CURRENT_REVISION):
804
"""See Tree.annotate_iter
806
This implementation will use the basis tree implementation if possible.
807
Lines not in the basis are attributed to CURRENT_REVISION
809
If there are pending merges, lines added by those merges will be
810
incorrectly attributed to CURRENT_REVISION (but after committing, the
811
attribution will be correct).
813
with self.lock_read():
814
file_id = self.path2id(path)
816
raise errors.NoSuchFile(path)
817
maybe_file_parent_keys = []
818
for parent_id in self.get_parent_ids():
820
parent_tree = self.revision_tree(parent_id)
821
except errors.NoSuchRevisionInTree:
822
parent_tree = self.branch.repository.revision_tree(
824
with parent_tree.lock_read():
827
kind = parent_tree.kind(path)
828
except errors.NoSuchFile:
831
# Note: this is slightly unnecessary, because symlinks
832
# and directories have a "text" which is the empty
833
# text, and we know that won't mess up annotations. But
836
parent_path = parent_tree.id2path(file_id)
839
parent_tree.get_file_revision(parent_path))
840
if parent_text_key not in maybe_file_parent_keys:
841
maybe_file_parent_keys.append(parent_text_key)
842
graph = self.branch.repository.get_file_graph()
843
heads = graph.heads(maybe_file_parent_keys)
844
file_parent_keys = []
845
for key in maybe_file_parent_keys:
847
file_parent_keys.append(key)
849
# Now we have the parents of this content
850
annotator = self.branch.repository.texts.get_annotator()
851
text = self.get_file_text(path)
852
this_key = (file_id, default_revision)
853
annotator.add_special_text(this_key, file_parent_keys, text)
854
annotations = [(key[-1], line)
855
for key, line in annotator.annotate_flat(this_key)]
858
def _put_rio(self, filename, stanzas, header):
859
self._must_be_locked()
860
my_file = _mod_rio.rio_file(stanzas, header)
861
self._transport.put_file(filename, my_file,
862
mode=self.controldir._get_file_mode())
864
def set_merge_modified(self, modified_hashes):
866
for file_id in modified_hashes:
867
yield _mod_rio.Stanza(file_id=file_id.decode('utf8'),
868
hash=modified_hashes[file_id])
869
with self.lock_tree_write():
870
self._put_rio('merge-hashes', iter_stanzas(),
871
MERGE_MODIFIED_HEADER_1)
873
def merge_modified(self):
874
"""Return a dictionary of files modified by a merge.
876
The list is initialized by WorkingTree.set_merge_modified, which is
877
typically called after we make some automatic updates to the tree
880
This returns a map of file_id->sha1, containing only files which are
881
still in the working inventory and have that text hash.
883
with self.lock_read():
885
hashfile = self._transport.get('merge-hashes')
886
except errors.NoSuchFile:
891
if next(hashfile) != MERGE_MODIFIED_HEADER_1 + b'\n':
892
raise errors.MergeModifiedFormatError()
893
except StopIteration:
894
raise errors.MergeModifiedFormatError()
895
for s in _mod_rio.RioReader(hashfile):
896
# RioReader reads in Unicode, so convert file_ids back to
898
file_id = cache_utf8.encode(s.get("file_id"))
899
if not self.has_id(file_id):
901
text_hash = s.get("hash").encode('ascii')
902
path = self.id2path(file_id)
903
if text_hash == self.get_file_sha1(path):
904
merge_hashes[file_id] = text_hash
909
def subsume(self, other_tree):
910
def add_children(inventory, entry):
911
for child_entry in entry.children.values():
912
inventory._byid[child_entry.file_id] = child_entry
913
if child_entry.kind == 'directory':
914
add_children(inventory, child_entry)
915
with self.lock_write():
916
if other_tree.get_root_id() == self.get_root_id():
917
raise errors.BadSubsumeSource(self, other_tree,
918
'Trees have the same root')
920
other_tree_path = self.relpath(other_tree.basedir)
921
except errors.PathNotChild:
922
raise errors.BadSubsumeSource(
923
self, other_tree, 'Tree is not contained by the other')
924
new_root_parent = self.path2id(osutils.dirname(other_tree_path))
925
if new_root_parent is None:
926
raise errors.BadSubsumeSource(
927
self, other_tree, 'Parent directory is not versioned.')
928
# We need to ensure that the result of a fetch will have a
929
# versionedfile for the other_tree root, and only fetching into
930
# RepositoryKnit2 guarantees that.
931
if not self.branch.repository.supports_rich_root():
932
raise errors.SubsumeTargetNeedsUpgrade(other_tree)
933
with other_tree.lock_tree_write():
934
other_root = other_tree.root_inventory.root
935
other_root.parent_id = new_root_parent
936
other_root.name = osutils.basename(other_tree_path)
937
self.root_inventory.add(other_root)
938
add_children(self.root_inventory, other_root)
939
self._write_inventory(self.root_inventory)
940
# normally we don't want to fetch whole repositories, but i
941
# think here we really do want to consolidate the whole thing.
942
for parent_id in other_tree.get_parent_ids():
943
self.branch.fetch(other_tree.branch, parent_id)
944
self.add_parent_tree_id(parent_id)
945
other_tree.controldir.retire_bzrdir()
947
def extract(self, sub_path, format=None):
948
"""Extract a subtree from this tree.
950
A new branch will be created, relative to the path for this tree.
953
segments = osutils.splitpath(path)
954
transport = self.branch.controldir.root_transport
955
for name in segments:
956
transport = transport.clone(name)
957
transport.ensure_base()
960
with self.lock_tree_write():
962
branch_transport = mkdirs(sub_path)
964
format = self.controldir.cloning_metadir()
965
branch_transport.ensure_base()
966
branch_bzrdir = format.initialize_on_transport(branch_transport)
968
repo = branch_bzrdir.find_repository()
969
except errors.NoRepositoryPresent:
970
repo = branch_bzrdir.create_repository()
971
if not repo.supports_rich_root():
972
raise errors.RootNotRich()
973
new_branch = branch_bzrdir.create_branch()
974
new_branch.pull(self.branch)
975
for parent_id in self.get_parent_ids():
976
new_branch.fetch(self.branch, parent_id)
977
tree_transport = self.controldir.root_transport.clone(sub_path)
978
if tree_transport.base != branch_transport.base:
979
tree_bzrdir = format.initialize_on_transport(tree_transport)
980
tree_bzrdir.set_branch_reference(new_branch)
982
tree_bzrdir = branch_bzrdir
983
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
984
wt.set_parent_ids(self.get_parent_ids())
985
# FIXME: Support nested trees
986
my_inv = self.root_inventory
987
child_inv = inventory.Inventory(root_id=None)
988
file_id = self.path2id(sub_path)
989
new_root = my_inv.get_entry(file_id)
990
my_inv.remove_recursive_id(file_id)
991
new_root.parent_id = None
992
child_inv.add(new_root)
993
self._write_inventory(my_inv)
994
wt._write_inventory(child_inv)
997
def list_files(self, include_root=False, from_dir=None, recursive=True):
998
"""List all files as (path, class, kind, id, entry).
1000
Lists, but does not descend into unversioned directories.
1001
This does not include files that have been deleted in this
1002
tree. Skips the control directory.
1004
:param include_root: if True, return an entry for the root
1005
:param from_dir: start from this directory or None for the root
1006
:param recursive: whether to recurse into subdirectories or not
1008
with self.lock_read():
1009
if from_dir is None and include_root is True:
1010
yield ('', 'V', 'directory', self.root_inventory.root)
1011
# Convert these into local objects to save lookup times
1012
pathjoin = osutils.pathjoin
1014
# transport.base ends in a slash, we want the piece
1015
# between the last two slashes
1016
transport_base_dir = self.controldir.transport.base.rsplit(
1020
'directory': TreeDirectory,
1025
# directory file_id, relative path, absolute path, reverse sorted
1027
if from_dir is not None:
1028
inv, from_dir_id = self._path2inv_file_id(from_dir)
1029
if from_dir_id is None:
1030
# Directory not versioned
1032
from_dir_abspath = pathjoin(self.basedir, from_dir)
1034
inv = self.root_inventory
1035
from_dir_id = inv.root.file_id
1036
from_dir_abspath = self.basedir
1037
children = sorted(os.listdir(from_dir_abspath))
1038
# jam 20060527 The kernel sized tree seems equivalent whether we
1039
# use a deque and popleft to keep them sorted, or if we use a plain
1040
# list and just reverse() them.
1041
children = collections.deque(children)
1042
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1044
(from_dir_id, from_dir_relpath, from_dir_abspath,
1045
children) = stack[-1]
1048
f = children.popleft()
1049
# TODO: If we find a subdirectory with its own .bzr
1050
# directory, then that is a separate tree and we
1051
# should exclude it.
1053
# the bzrdir for this tree
1054
if transport_base_dir == f:
1057
# we know that from_dir_relpath and from_dir_abspath never
1058
# end in a slash and 'f' doesn't begin with one, we can do
1059
# a string op, rather than the checks of pathjoin(), all
1060
# relative paths will have an extra slash at the beginning
1061
fp = from_dir_relpath + '/' + f
1064
fap = from_dir_abspath + '/' + f
1066
dir_ie = inv.get_entry(from_dir_id)
1067
if dir_ie.kind == 'directory':
1068
f_ie = dir_ie.children.get(f)
1073
elif self.is_ignored(fp[1:]):
1076
# we may not have found this file, because of a unicode
1077
# issue, or because the directory was actually a
1079
f_norm, can_access = osutils.normalized_filename(f)
1080
if f == f_norm or not can_access:
1081
# No change, so treat this file normally
1084
# this file can be accessed by a normalized path
1085
# check again if it is versioned
1086
# these lines are repeated here for performance
1088
fp = from_dir_relpath + '/' + f
1089
fap = from_dir_abspath + '/' + f
1090
f_ie = inv.get_child(from_dir_id, f)
1093
elif self.is_ignored(fp[1:]):
1098
fk = osutils.file_kind(fap)
1100
# make a last minute entry
1102
yield fp[1:], c, fk, f_ie
1105
yield fp[1:], c, fk, fk_entries[fk]()
1107
yield fp[1:], c, fk, TreeEntry()
1110
if fk != 'directory':
1113
# But do this child first if recursing down
1115
new_children = sorted(os.listdir(fap))
1116
new_children = collections.deque(new_children)
1117
stack.append((f_ie.file_id, fp, fap, new_children))
1118
# Break out of inner loop,
1119
# so that we start outer loop with child
1122
# if we finished all children, pop it off the stack
1125
def move(self, from_paths, to_dir=None, after=False):
1128
to_dir must exist in the inventory.
1130
If to_dir exists and is a directory, the files are moved into
1131
it, keeping their old names.
1133
Note that to_dir is only the last component of the new name;
1134
this doesn't change the directory.
1136
For each entry in from_paths the move mode will be determined
1139
The first mode moves the file in the filesystem and updates the
1140
inventory. The second mode only updates the inventory without
1141
touching the file on the filesystem.
1143
move uses the second mode if 'after == True' and the target is
1144
either not versioned or newly added, and present in the working tree.
1146
move uses the second mode if 'after == False' and the source is
1147
versioned but no longer in the working tree, and the target is not
1148
versioned but present in the working tree.
1150
move uses the first mode if 'after == False' and the source is
1151
versioned and present in the working tree, and the target is not
1152
versioned and not present in the working tree.
1154
Everything else results in an error.
1156
This returns a list of (from_path, to_path) pairs for each
1157
entry that is moved.
1162
# check for deprecated use of signature
1164
raise TypeError('You must supply a target directory')
1165
# check destination directory
1166
if isinstance(from_paths, (str, text_type)):
1168
with self.lock_tree_write():
1169
to_abs = self.abspath(to_dir)
1170
if not osutils.isdir(to_abs):
1171
raise errors.BzrMoveFailedError(
1172
'', to_dir, errors.NotADirectory(to_abs))
1173
if not self.has_filename(to_dir):
1174
raise errors.BzrMoveFailedError(
1175
'', to_dir, errors.NotInWorkingDirectory(to_dir))
1176
to_inv, to_dir_id = self._path2inv_file_id(to_dir)
1177
if to_dir_id is None:
1178
raise errors.BzrMoveFailedError(
1179
'', to_dir, errors.NotVersionedError(path=to_dir))
1181
to_dir_ie = to_inv.get_entry(to_dir_id)
1182
if to_dir_ie.kind != 'directory':
1183
raise errors.BzrMoveFailedError(
1184
'', to_dir, errors.NotADirectory(to_abs))
1186
# create rename entries and tuples
1187
for from_rel in from_paths:
1188
from_tail = osutils.splitpath(from_rel)[-1]
1189
from_inv, from_id = self._path2inv_file_id(from_rel)
1191
raise errors.BzrMoveFailedError(from_rel, to_dir,
1192
errors.NotVersionedError(path=from_rel))
1194
from_entry = from_inv.get_entry(from_id)
1195
from_parent_id = from_entry.parent_id
1196
to_rel = osutils.pathjoin(to_dir, from_tail)
1197
rename_entry = InventoryWorkingTree._RenameEntry(
1200
from_tail=from_tail,
1201
from_parent_id=from_parent_id,
1202
to_rel=to_rel, to_tail=from_tail,
1203
to_parent_id=to_dir_id)
1204
rename_entries.append(rename_entry)
1205
rename_tuples.append((from_rel, to_rel))
1207
# determine which move mode to use. checks also for movability
1208
rename_entries = self._determine_mv_mode(rename_entries, after)
1210
original_modified = self._inventory_is_modified
1213
self._inventory_is_modified = True
1214
self._move(rename_entries)
1215
except BaseException:
1216
# restore the inventory on error
1217
self._inventory_is_modified = original_modified
1219
# FIXME: Should potentially also write the from_invs
1220
self._write_inventory(to_inv)
1221
return rename_tuples
1223
def rename_one(self, from_rel, to_rel, after=False):
1226
This can change the directory or the filename or both.
1228
rename_one has several 'modes' to work. First, it can rename a physical
1229
file and change the file_id. That is the normal mode. Second, it can
1230
only change the file_id without touching any physical file.
1232
rename_one uses the second mode if 'after == True' and 'to_rel' is not
1233
versioned but present in the working tree.
1235
rename_one uses the second mode if 'after == False' and 'from_rel' is
1236
versioned but no longer in the working tree, and 'to_rel' is not
1237
versioned but present in the working tree.
1239
rename_one uses the first mode if 'after == False' and 'from_rel' is
1240
versioned and present in the working tree, and 'to_rel' is not
1241
versioned and not present in the working tree.
1243
Everything else results in an error.
1245
with self.lock_tree_write():
1248
# create rename entries and tuples
1249
from_tail = osutils.splitpath(from_rel)[-1]
1250
from_inv, from_id = self._path2inv_file_id(from_rel)
1252
# if file is missing in the inventory maybe it's in the
1254
basis_tree = self.branch.basis_tree()
1255
from_id = basis_tree.path2id(from_rel)
1257
raise errors.BzrRenameFailedError(
1259
errors.NotVersionedError(path=from_rel))
1260
# put entry back in the inventory so we can rename it
1261
from_entry = basis_tree.root_inventory.get_entry(
1263
from_inv.add(from_entry)
1265
from_inv, from_inv_id = self._unpack_file_id(from_id)
1266
from_entry = from_inv.get_entry(from_inv_id)
1267
from_parent_id = from_entry.parent_id
1268
to_dir, to_tail = os.path.split(to_rel)
1269
to_inv, to_dir_id = self._path2inv_file_id(to_dir)
1270
rename_entry = InventoryWorkingTree._RenameEntry(
1273
from_tail=from_tail,
1274
from_parent_id=from_parent_id,
1275
to_rel=to_rel, to_tail=to_tail,
1276
to_parent_id=to_dir_id)
1277
rename_entries.append(rename_entry)
1279
# determine which move mode to use. checks also for movability
1280
rename_entries = self._determine_mv_mode(rename_entries, after)
1282
# check if the target changed directory and if the target directory
1284
if to_dir_id is None:
1285
raise errors.BzrMoveFailedError(
1286
from_rel, to_rel, errors.NotVersionedError(path=to_dir))
1288
# all checks done. now we can continue with our actual work
1289
mutter('rename_one:\n'
1294
' to_dir_id {%s}\n',
1295
from_id, from_rel, to_rel, to_dir, to_dir_id)
1297
self._move(rename_entries)
1298
self._write_inventory(to_inv)
1300
class _RenameEntry(object):
1301
def __init__(self, from_rel, from_id, from_tail, from_parent_id,
1302
to_rel, to_tail, to_parent_id, only_change_inv=False,
1304
self.from_rel = from_rel
1305
self.from_id = from_id
1306
self.from_tail = from_tail
1307
self.from_parent_id = from_parent_id
1308
self.to_rel = to_rel
1309
self.to_tail = to_tail
1310
self.to_parent_id = to_parent_id
1311
self.change_id = change_id
1312
self.only_change_inv = only_change_inv
1314
def _determine_mv_mode(self, rename_entries, after=False):
1315
"""Determines for each from-to pair if both inventory and working tree
1316
or only the inventory has to be changed.
1318
Also does basic plausability tests.
1320
# FIXME: Handling of nested trees
1321
inv = self.root_inventory
1323
for rename_entry in rename_entries:
1324
# store to local variables for easier reference
1325
from_rel = rename_entry.from_rel
1326
from_id = rename_entry.from_id
1327
to_rel = rename_entry.to_rel
1328
to_id = inv.path2id(to_rel)
1329
only_change_inv = False
1331
# check the inventory for source and destination
1333
raise errors.BzrMoveFailedError(
1334
from_rel, to_rel, errors.NotVersionedError(path=from_rel))
1335
if to_id is not None:
1337
# allow it with --after but only if dest is newly added
1339
basis = self.basis_tree()
1340
with basis.lock_read():
1341
if not basis.has_id(to_id):
1342
rename_entry.change_id = True
1345
raise errors.BzrMoveFailedError(
1347
errors.AlreadyVersionedError(path=to_rel))
1349
# try to determine the mode for rename (only change inv or change
1350
# inv and file system)
1352
if not self.has_filename(to_rel):
1353
raise errors.BzrMoveFailedError(
1357
extra="New file has not been created yet"))
1358
only_change_inv = True
1359
elif not self.has_filename(from_rel) and self.has_filename(to_rel):
1360
only_change_inv = True
1361
elif self.has_filename(from_rel) and not self.has_filename(to_rel):
1362
only_change_inv = False
1363
elif (not self.case_sensitive and
1364
from_rel.lower() == to_rel.lower() and
1365
self.has_filename(from_rel)):
1366
only_change_inv = False
1368
# something is wrong, so lets determine what exactly
1369
if not self.has_filename(from_rel) and \
1370
not self.has_filename(to_rel):
1371
raise errors.BzrRenameFailedError(
1373
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
1375
raise errors.RenameFailedFilesExist(from_rel, to_rel)
1376
rename_entry.only_change_inv = only_change_inv
1377
return rename_entries
1379
def _move(self, rename_entries):
1380
"""Moves a list of files.
1382
Depending on the value of the flag 'only_change_inv', the
1383
file will be moved on the file system or not.
1387
for entry in rename_entries:
1389
self._move_entry(entry)
1390
except BaseException:
1391
self._rollback_move(moved)
1395
def _rollback_move(self, moved):
1396
"""Try to rollback a previous move in case of an filesystem error."""
1399
self._move_entry(WorkingTree._RenameEntry(
1400
entry.to_rel, entry.from_id,
1401
entry.to_tail, entry.to_parent_id, entry.from_rel,
1402
entry.from_tail, entry.from_parent_id,
1403
entry.only_change_inv))
1404
except errors.BzrMoveFailedError as e:
1405
raise errors.BzrMoveFailedError(
1406
'', '', "Rollback failed."
1407
" The working tree is in an inconsistent state."
1408
" Please consider doing a 'bzr revert'."
1409
" Error message is: %s" % e)
1411
def _move_entry(self, entry):
1412
inv = self.root_inventory
1413
from_rel_abs = self.abspath(entry.from_rel)
1414
to_rel_abs = self.abspath(entry.to_rel)
1415
if from_rel_abs == to_rel_abs:
1416
raise errors.BzrMoveFailedError(entry.from_rel, entry.to_rel,
1417
"Source and target are identical.")
1419
if not entry.only_change_inv:
1421
osutils.rename(from_rel_abs, to_rel_abs)
1422
except OSError as e:
1423
raise errors.BzrMoveFailedError(
1424
entry.from_rel, entry.to_rel, e[1])
1426
to_id = inv.path2id(entry.to_rel)
1427
inv.remove_recursive_id(to_id)
1428
inv.rename(entry.from_id, entry.to_parent_id, entry.to_tail)
1430
def unversion(self, paths):
1431
"""Remove the paths in paths from the current versioned set.
1433
When a path is unversioned, all of its children are automatically
1436
:param paths: The paths to stop versioning.
1437
:raises NoSuchFile: if any path is not currently versioned.
1439
with self.lock_tree_write():
1442
file_id = self._inventory.path2id(path)
1444
raise errors.NoSuchFile(path, self)
1445
file_ids.add(file_id)
1446
for file_id in file_ids:
1447
if self._inventory.has_id(file_id):
1448
self._inventory.remove_recursive_id(file_id)
1450
# in the future this should just set a dirty bit to wait for
1451
# the final unlock. However, until all methods of workingtree
1452
# start with the current in -memory inventory rather than
1453
# triggering a read, it is more complex - we need to teach
1454
# read_inventory to know when to read, and when to not read
1455
# first... and possibly to save first when the in memory one
1456
# may be corrupted. so for now, we just only write it if it is
1457
# indeed dirty. - RBC 20060907
1458
self._write_inventory(self._inventory)
1460
def stored_kind(self, path):
1461
"""See Tree.stored_kind"""
1462
return self._path2ie(path).kind
1465
"""Yield all unversioned files in this WorkingTree.
1467
If there are any unversioned directories then only the directory is
1468
returned, not all its children. But if there are unversioned files
1469
under a versioned subdirectory, they are returned.
1471
Currently returned depth-first, sorted by name within directories.
1472
This is the same order used by 'osutils.walkdirs'.
1474
# TODO: Work from given directory downwards
1475
for path, dir_entry in self.iter_entries_by_dir():
1476
if dir_entry.kind != 'directory':
1478
# mutter("search for unknowns in %r", path)
1479
dirabs = self.abspath(path)
1480
if not osutils.isdir(dirabs):
1481
# e.g. directory deleted
1485
for subf in os.listdir(dirabs.encode(osutils._fs_enc)):
1487
subf = subf.decode(osutils._fs_enc)
1488
except UnicodeDecodeError:
1489
path_os_enc = path.encode(osutils._fs_enc)
1490
relpath = path_os_enc + b'/' + subf
1491
raise errors.BadFilenameEncoding(relpath,
1494
if self.controldir.is_control_filename(subf):
1496
if subf not in dir_entry.children:
1499
can_access) = osutils.normalized_filename(subf)
1500
except UnicodeDecodeError:
1501
path_os_enc = path.encode(osutils._fs_enc)
1502
relpath = path_os_enc + '/' + subf
1503
raise errors.BadFilenameEncoding(relpath,
1505
if subf_norm != subf and can_access:
1506
if subf_norm not in dir_entry.children:
1507
fl.append(subf_norm)
1513
subp = osutils.pathjoin(path, subf)
1516
def walkdirs(self, prefix=""):
1517
"""Walk the directories of this tree.
1519
returns a generator which yields items in the form:
1520
((curren_directory_path, fileid),
1521
[(file1_path, file1_name, file1_kind, (lstat), file1_id,
1524
This API returns a generator, which is only valid during the current
1525
tree transaction - within a single lock_read or lock_write duration.
1527
If the tree is not locked, it may cause an error to be raised,
1528
depending on the tree implementation.
1530
disk_top = self.abspath(prefix)
1531
if disk_top.endswith('/'):
1532
disk_top = disk_top[:-1]
1533
top_strip_len = len(disk_top) + 1
1534
inventory_iterator = self._walkdirs(prefix)
1535
disk_iterator = osutils.walkdirs(disk_top, prefix)
1537
current_disk = next(disk_iterator)
1538
disk_finished = False
1539
except OSError as e:
1540
if not (e.errno == errno.ENOENT
1541
or (sys.platform == 'win32' and e.errno == ERROR_PATH_NOT_FOUND)):
1544
disk_finished = True
1546
current_inv = next(inventory_iterator)
1547
inv_finished = False
1548
except StopIteration:
1551
while not inv_finished or not disk_finished:
1553
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
1554
cur_disk_dir_content) = current_disk
1556
((cur_disk_dir_relpath, cur_disk_dir_path_from_top),
1557
cur_disk_dir_content) = ((None, None), None)
1558
if not disk_finished:
1559
# strip out .bzr dirs
1560
if (cur_disk_dir_path_from_top[top_strip_len:] == ''
1561
and len(cur_disk_dir_content) > 0):
1562
# osutils.walkdirs can be made nicer -
1563
# yield the path-from-prefix rather than the pathjoined
1565
bzrdir_loc = bisect_left(cur_disk_dir_content,
1567
if (bzrdir_loc < len(cur_disk_dir_content) and
1568
self.controldir.is_control_filename(
1569
cur_disk_dir_content[bzrdir_loc][0])):
1570
# we dont yield the contents of, or, .bzr itself.
1571
del cur_disk_dir_content[bzrdir_loc]
1573
# everything is unknown
1576
# everything is missing
1579
direction = ((current_inv[0][0] > cur_disk_dir_relpath)
1580
- (current_inv[0][0] < cur_disk_dir_relpath))
1583
# disk is before inventory - unknown
1584
dirblock = [(relpath, basename, kind, stat, None, None) for
1585
relpath, basename, kind, stat, top_path in
1586
cur_disk_dir_content]
1587
yield (cur_disk_dir_relpath, None), dirblock
1589
current_disk = next(disk_iterator)
1590
except StopIteration:
1591
disk_finished = True
1593
# inventory is before disk - missing.
1594
dirblock = [(relpath, basename, 'unknown', None, fileid, kind)
1595
for relpath, basename, dkind, stat, fileid, kind in
1597
yield (current_inv[0][0], current_inv[0][1]), dirblock
1599
current_inv = next(inventory_iterator)
1600
except StopIteration:
1603
# versioned present directory
1604
# merge the inventory and disk data together
1606
for relpath, subiterator in itertools.groupby(sorted(
1607
current_inv[1] + cur_disk_dir_content,
1608
key=operator.itemgetter(0)), operator.itemgetter(1)):
1609
path_elements = list(subiterator)
1610
if len(path_elements) == 2:
1611
inv_row, disk_row = path_elements
1612
# versioned, present file
1613
dirblock.append((inv_row[0],
1614
inv_row[1], disk_row[2],
1615
disk_row[3], inv_row[4],
1617
elif len(path_elements[0]) == 5:
1620
(path_elements[0][0], path_elements[0][1],
1621
path_elements[0][2], path_elements[0][3], None,
1623
elif len(path_elements[0]) == 6:
1624
# versioned, absent file.
1626
(path_elements[0][0], path_elements[0][1],
1627
'unknown', None, path_elements[0][4],
1628
path_elements[0][5]))
1630
raise NotImplementedError('unreachable code')
1631
yield current_inv[0], dirblock
1633
current_inv = next(inventory_iterator)
1634
except StopIteration:
1637
current_disk = next(disk_iterator)
1638
except StopIteration:
1639
disk_finished = True
1641
def _walkdirs(self, prefix=""):
1642
"""Walk the directories of this tree.
1644
:param prefix: is used as the directrory to start with.
1645
:returns: a generator which yields items in the form::
1647
((curren_directory_path, fileid),
1648
[(file1_path, file1_name, file1_kind, None, file1_id,
1651
_directory = 'directory'
1652
# get the root in the inventory
1653
inv, top_id = self._path2inv_file_id(prefix)
1657
pending = [(prefix, '', _directory, None, top_id, None)]
1660
currentdir = pending.pop()
1661
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-id, 5-kind
1662
top_id = currentdir[4]
1664
relroot = currentdir[0] + '/'
1667
# FIXME: stash the node in pending
1668
entry = inv.get_entry(top_id)
1669
if entry.kind == 'directory':
1670
for name, child in entry.sorted_children():
1671
dirblock.append((relroot + name, name, child.kind, None,
1672
child.file_id, child.kind
1674
yield (currentdir[0], entry.file_id), dirblock
1675
# push the user specified dirs from dirblock
1676
for dir in reversed(dirblock):
1677
if dir[2] == _directory:
1680
def update_feature_flags(self, updated_flags):
1681
"""Update the feature flags for this branch.
1683
:param updated_flags: Dictionary mapping feature names to necessities
1684
A necessity can be None to indicate the feature should be removed
1686
with self.lock_write():
1687
self._format._update_feature_flags(updated_flags)
1688
self.control_transport.put_bytes(
1689
'format', self._format.as_string())
1691
def _check_for_tree_references(self, iterator):
1692
"""See if directories have become tree-references."""
1693
blocked_parent_ids = set()
1694
for path, ie in iterator:
1695
if ie.parent_id in blocked_parent_ids:
1696
# This entry was pruned because one of its parents became a
1697
# TreeReference. If this is a directory, mark it as blocked.
1698
if ie.kind == 'directory':
1699
blocked_parent_ids.add(ie.file_id)
1701
if (ie.kind == 'directory' and
1702
self._directory_is_tree_reference(path)):
1703
# This InventoryDirectory needs to be a TreeReference
1704
ie = inventory.TreeReference(ie.file_id, ie.name, ie.parent_id)
1705
blocked_parent_ids.add(ie.file_id)
1708
def iter_entries_by_dir(self, specific_files=None):
1709
"""See Tree.iter_entries_by_dir()"""
1710
# The only trick here is that if we supports_tree_reference then we
1711
# need to detect if a directory becomes a tree-reference.
1712
iterator = super(WorkingTree, self).iter_entries_by_dir(
1713
specific_files=specific_files)
1714
if not self.supports_tree_reference():
1717
return self._check_for_tree_references(iterator)
1719
def get_canonical_paths(self, paths):
1720
"""Look up canonical paths for multiple items.
1722
:param paths: A sequence of paths relative to the root of the tree.
1723
:return: A iterator over paths, with each item the corresponding input
1724
path adjusted to account for existing elements that match case
1727
with self.lock_read():
1728
if not self.case_sensitive:
1731
elif sys.platform == 'darwin':
1735
return unicodedata.normalize('NFC', x)
1739
if normalize is None or self.is_versioned(path):
1742
yield get_canonical_path(self, path, normalize)
1745
class WorkingTreeFormatMetaDir(bzrdir.BzrFormat, WorkingTreeFormat):
1746
"""Base class for working trees that live in bzr meta directories."""
1748
ignore_filename = '.bzrignore'
1751
WorkingTreeFormat.__init__(self)
1752
bzrdir.BzrFormat.__init__(self)
1755
def find_format_string(klass, controldir):
1756
"""Return format name for the working tree object in controldir."""
1758
transport = controldir.get_workingtree_transport(None)
1759
return transport.get_bytes("format")
1760
except errors.NoSuchFile:
1761
raise errors.NoWorkingTree(base=transport.base)
1764
def find_format(klass, controldir):
1765
"""Return the format for the working tree object in controldir."""
1766
format_string = klass.find_format_string(controldir)
1767
return klass._find_format(format_registry, 'working tree',
1770
def check_support_status(self, allow_unsupported, recommend_upgrade=True,
1772
WorkingTreeFormat.check_support_status(
1773
self, allow_unsupported=allow_unsupported,
1774
recommend_upgrade=recommend_upgrade, basedir=basedir)
1775
bzrdir.BzrFormat.check_support_status(
1776
self, allow_unsupported=allow_unsupported,
1777
recommend_upgrade=recommend_upgrade, basedir=basedir)