368
421
state = self.current_dirstate()
369
422
if stat_value is None:
371
stat_value = os.lstat(file_abspath)
424
stat_value = osutils.lstat(file_abspath)
373
426
if e.errno == errno.ENOENT:
377
430
link_or_sha1 = dirstate.update_entry(state, entry, file_abspath,
378
stat_value=stat_value)
379
if entry[1][0][0] == 'f':
431
stat_value=stat_value)
432
if entry[1][0][0] == b'f':
380
433
if link_or_sha1 is None:
381
file_obj, statvalue = self.get_file_with_stat(file_id, path)
434
file_obj, statvalue = self.get_file_with_stat(path)
383
436
sha1 = osutils.sha_file(file_obj)
386
self._observed_sha1(file_id, path, (sha1, statvalue))
439
self._observed_sha1(path, (sha1, statvalue))
389
442
return link_or_sha1
392
def _get_inventory(self):
445
def _get_root_inventory(self):
393
446
"""Get the inventory for the tree. This is only valid within a lock."""
394
447
if 'evil' in debug.debug_flags:
395
trace.mutter_callsite(2,
396
"accessing .inventory forces a size of tree translation.")
448
trace.mutter_callsite(
449
2, "accessing .inventory forces a size of tree translation.")
397
450
if self._inventory is not None:
398
451
return self._inventory
399
452
self._must_be_locked()
400
453
self._generate_inventory()
401
454
return self._inventory
403
inventory = property(_get_inventory,
404
doc="Inventory of this Tree")
456
root_inventory = property(_get_root_inventory,
457
"Root inventory of this tree")
407
459
def get_parent_ids(self):
408
460
"""See Tree.get_parent_ids.
410
462
This implementation requests the ids list from the dirstate file.
412
return self.current_dirstate().get_parent_ids()
464
with self.lock_read():
465
return self.current_dirstate().get_parent_ids()
414
def get_reference_revision(self, file_id, path=None):
467
def get_reference_revision(self, path):
415
468
# referenced tree's revision is whatever's currently there
416
return self.get_nested_tree(file_id, path).last_revision()
469
return self.get_nested_tree(path).last_revision()
418
def get_nested_tree(self, file_id, path=None):
420
path = self.id2path(file_id)
421
# else: check file_id is at path?
471
def get_nested_tree(self, path):
422
472
return WorkingTree.open(self.abspath(path))
425
def get_root_id(self):
426
"""Return the id of this trees root"""
427
return self._get_entry(path='')[0][2]
429
def has_id(self, file_id):
430
state = self.current_dirstate()
431
row, parents = self._get_entry(file_id=file_id)
434
return osutils.lexists(pathjoin(
435
self.basedir, row[0].decode('utf8'), row[1].decode('utf8')))
437
def has_or_had_id(self, file_id):
438
state = self.current_dirstate()
439
row, parents = self._get_entry(file_id=file_id)
440
return row is not None
443
474
def id2path(self, file_id):
444
475
"Convert a file-id to a path."
445
state = self.current_dirstate()
446
entry = self._get_entry(file_id=file_id)
447
if entry == (None, None):
448
raise errors.NoSuchId(tree=self, file_id=file_id)
449
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
450
return path_utf8.decode('utf8')
476
with self.lock_read():
477
state = self.current_dirstate()
478
entry = self._get_entry(file_id=file_id)
479
if entry == (None, None):
480
raise errors.NoSuchId(tree=self, file_id=file_id)
481
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
482
return path_utf8.decode('utf8')
452
484
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
453
485
entry = self._get_entry(path=path)
454
486
if entry == (None, None):
455
return False # Missing entries are not executable
456
return entry[1][0][3] # Executable?
458
if not osutils.supports_executable():
459
def is_executable(self, file_id, path=None):
460
"""Test if a file is executable or not.
462
Note: The caller is expected to take a read-lock before calling this.
464
entry = self._get_entry(file_id=file_id, path=path)
487
return False # Missing entries are not executable
488
return entry[1][0][3] # Executable?
490
def is_executable(self, path):
491
"""Test if a file is executable or not.
493
Note: The caller is expected to take a read-lock before calling this.
495
if not self._supports_executable():
496
entry = self._get_entry(path=path)
465
497
if entry == (None, None):
467
499
return entry[1][0][3]
469
_is_executable_from_path_and_stat = \
470
_is_executable_from_path_and_stat_from_basis
472
def is_executable(self, file_id, path=None):
473
"""Test if a file is executable or not.
475
Note: The caller is expected to take a read-lock before calling this.
477
501
self._must_be_locked()
479
path = self.id2path(file_id)
480
mode = os.lstat(self.abspath(path)).st_mode
502
mode = osutils.lstat(self.abspath(path)).st_mode
481
503
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
483
505
def all_file_ids(self):
600
618
self._repo_supports_tree_reference = getattr(
601
619
self.branch.repository._format, "supports_tree_reference",
621
except BaseException:
604
622
self._control_files.unlock()
624
except BaseException:
607
625
self.branch.unlock()
627
return LogicalLockResult(self.unlock)
610
629
def lock_tree_write(self):
611
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
630
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
632
:return: A breezy.lock.LogicalLockResult.
612
634
self.branch.lock_read()
613
self._lock_self_write()
635
return self._lock_self_write()
615
637
def lock_write(self):
616
"""See MutableTree.lock_write, and WorkingTree.unlock."""
638
"""See MutableTree.lock_write, and WorkingTree.unlock.
640
:return: A breezy.lock.LogicalLockResult.
617
642
self.branch.lock_write()
618
self._lock_self_write()
643
return self._lock_self_write()
620
@needs_tree_write_lock
621
645
def move(self, from_paths, to_dir, after=False):
622
646
"""See WorkingTree.move()."""
624
648
if not from_paths:
626
state = self.current_dirstate()
627
if isinstance(from_paths, basestring):
629
to_dir_utf8 = to_dir.encode('utf8')
630
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
631
id_index = state._get_id_index()
632
# check destination directory
633
# get the details for it
634
to_entry_block_index, to_entry_entry_index, dir_present, entry_present = \
635
state._get_block_entry_index(to_entry_dirname, to_basename, 0)
636
if not entry_present:
637
raise errors.BzrMoveFailedError('', to_dir,
638
errors.NotVersionedError(to_dir))
639
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
640
# get a handle on the block itself.
641
to_block_index = state._ensure_block(
642
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
643
to_block = state._dirblocks[to_block_index]
644
to_abs = self.abspath(to_dir)
645
if not isdir(to_abs):
646
raise errors.BzrMoveFailedError('',to_dir,
647
errors.NotADirectory(to_abs))
649
if to_entry[1][0][0] != 'd':
650
raise errors.BzrMoveFailedError('',to_dir,
651
errors.NotADirectory(to_abs))
653
if self._inventory is not None:
654
update_inventory = True
656
to_dir_id = to_entry[0][2]
657
to_dir_ie = inv[to_dir_id]
659
update_inventory = False
662
def move_one(old_entry, from_path_utf8, minikind, executable,
663
fingerprint, packed_stat, size,
664
to_block, to_key, to_path_utf8):
665
state._make_absent(old_entry)
666
from_key = old_entry[0]
668
lambda:state.update_minimal(from_key,
670
executable=executable,
671
fingerprint=fingerprint,
672
packed_stat=packed_stat,
674
path_utf8=from_path_utf8))
675
state.update_minimal(to_key,
677
executable=executable,
678
fingerprint=fingerprint,
679
packed_stat=packed_stat,
681
path_utf8=to_path_utf8)
682
added_entry_index, _ = state._find_entry_index(to_key, to_block[1])
683
new_entry = to_block[1][added_entry_index]
684
rollbacks.append(lambda:state._make_absent(new_entry))
686
for from_rel in from_paths:
687
# from_rel is 'pathinroot/foo/bar'
688
from_rel_utf8 = from_rel.encode('utf8')
689
from_dirname, from_tail = osutils.split(from_rel)
690
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
691
from_entry = self._get_entry(path=from_rel)
692
if from_entry == (None, None):
693
raise errors.BzrMoveFailedError(from_rel,to_dir,
694
errors.NotVersionedError(path=from_rel))
696
from_id = from_entry[0][2]
697
to_rel = pathjoin(to_dir, from_tail)
698
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
699
item_to_entry = self._get_entry(path=to_rel)
700
if item_to_entry != (None, None):
701
raise errors.BzrMoveFailedError(from_rel, to_rel,
702
"Target is already versioned.")
704
if from_rel == to_rel:
705
raise errors.BzrMoveFailedError(from_rel, to_rel,
706
"Source and target are identical.")
708
from_missing = not self.has_filename(from_rel)
709
to_missing = not self.has_filename(to_rel)
716
raise errors.BzrMoveFailedError(from_rel, to_rel,
717
errors.NoSuchFile(path=to_rel,
718
extra="New file has not been created yet"))
720
# neither path exists
721
raise errors.BzrRenameFailedError(from_rel, to_rel,
722
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
724
if from_missing: # implicitly just update our path mapping
650
with self.lock_tree_write():
651
state = self.current_dirstate()
652
if isinstance(from_paths, (str, bytes)):
654
to_dir_utf8 = to_dir.encode('utf8')
655
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
656
# check destination directory
657
# get the details for it
658
(to_entry_block_index, to_entry_entry_index, dir_present,
659
entry_present) = state._get_block_entry_index(
660
to_entry_dirname, to_basename, 0)
661
if not entry_present:
662
raise errors.BzrMoveFailedError(
663
'', to_dir, errors.NotVersionedError(to_dir))
664
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
665
# get a handle on the block itself.
666
to_block_index = state._ensure_block(
667
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
668
to_block = state._dirblocks[to_block_index]
669
to_abs = self.abspath(to_dir)
670
if not isdir(to_abs):
671
raise errors.BzrMoveFailedError('', to_dir,
672
errors.NotADirectory(to_abs))
674
if to_entry[1][0][0] != b'd':
675
raise errors.BzrMoveFailedError('', to_dir,
676
errors.NotADirectory(to_abs))
678
if self._inventory is not None:
679
update_inventory = True
680
inv = self.root_inventory
681
to_dir_id = to_entry[0][2]
683
update_inventory = False
685
# GZ 2017-03-28: The rollbacks variable was shadowed in the loop below
686
# missing those added here, but there's also no test coverage for this.
687
rollbacks = cleanup.ExitStack()
689
def move_one(old_entry, from_path_utf8, minikind, executable,
690
fingerprint, packed_stat, size,
691
to_block, to_key, to_path_utf8):
692
state._make_absent(old_entry)
693
from_key = old_entry[0]
695
state.update_minimal,
698
executable=executable,
699
fingerprint=fingerprint,
700
packed_stat=packed_stat,
702
path_utf8=from_path_utf8)
703
state.update_minimal(to_key,
705
executable=executable,
706
fingerprint=fingerprint,
707
packed_stat=packed_stat,
709
path_utf8=to_path_utf8)
710
added_entry_index, _ = state._find_entry_index(
712
new_entry = to_block[1][added_entry_index]
713
rollbacks.callback(state._make_absent, new_entry)
715
for from_rel in from_paths:
716
# from_rel is 'pathinroot/foo/bar'
717
from_rel_utf8 = from_rel.encode('utf8')
718
from_dirname, from_tail = osutils.split(from_rel)
719
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
720
from_entry = self._get_entry(path=from_rel)
721
if from_entry == (None, None):
722
raise errors.BzrMoveFailedError(
724
errors.NotVersionedError(path=from_rel))
726
from_id = from_entry[0][2]
727
to_rel = pathjoin(to_dir, from_tail)
728
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
729
item_to_entry = self._get_entry(path=to_rel)
730
if item_to_entry != (None, None):
731
raise errors.BzrMoveFailedError(
732
from_rel, to_rel, "Target is already versioned.")
734
if from_rel == to_rel:
735
raise errors.BzrMoveFailedError(
736
from_rel, to_rel, "Source and target are identical.")
738
from_missing = not self.has_filename(from_rel)
739
to_missing = not self.has_filename(to_rel)
725
741
move_file = False
727
raise errors.RenameFailedFilesExist(from_rel, to_rel)
746
raise errors.BzrMoveFailedError(
750
extra="New file has not been created yet"))
752
# neither path exists
753
raise errors.BzrRenameFailedError(
755
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
757
if from_missing: # implicitly just update our path mapping
760
raise errors.RenameFailedFilesExist(from_rel, to_rel)
730
def rollback_rename():
731
"""A single rename has failed, roll it back."""
732
# roll back everything, even if we encounter trouble doing one
735
# TODO: at least log the other exceptions rather than just
736
# losing them mbp 20070307
738
for rollback in reversed(rollbacks):
762
# perform the disk move first - its the most likely failure point.
764
from_rel_abs = self.abspath(from_rel)
765
to_rel_abs = self.abspath(to_rel)
742
exc_info = sys.exc_info()
744
raise exc_info[0], exc_info[1], exc_info[2]
746
# perform the disk move first - its the most likely failure point.
748
from_rel_abs = self.abspath(from_rel)
749
to_rel_abs = self.abspath(to_rel)
767
osutils.rename(from_rel_abs, to_rel_abs)
769
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
771
osutils.rename, to_rel_abs, from_rel_abs)
751
osutils.rename(from_rel_abs, to_rel_abs)
753
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
754
rollbacks.append(lambda: osutils.rename(to_rel_abs, from_rel_abs))
756
# perform the rename in the inventory next if needed: its easy
760
from_entry = inv[from_id]
761
current_parent = from_entry.parent_id
762
inv.rename(from_id, to_dir_id, from_tail)
764
lambda: inv.rename(from_id, current_parent, from_tail))
765
# finally do the rename in the dirstate, which is a little
766
# tricky to rollback, but least likely to need it.
767
old_block_index, old_entry_index, dir_present, file_present = \
768
state._get_block_entry_index(from_dirname, from_tail_utf8, 0)
769
old_block = state._dirblocks[old_block_index][1]
770
old_entry = old_block[old_entry_index]
771
from_key, old_entry_details = old_entry
772
cur_details = old_entry_details[0]
774
to_key = ((to_block[0],) + from_key[1:3])
775
minikind = cur_details[0]
776
move_one(old_entry, from_path_utf8=from_rel_utf8,
778
executable=cur_details[3],
779
fingerprint=cur_details[1],
780
packed_stat=cur_details[4],
784
to_path_utf8=to_rel_utf8)
787
def update_dirblock(from_dir, to_key, to_dir_utf8):
788
"""Recursively update all entries in this dirblock."""
790
raise AssertionError("renaming root not supported")
791
from_key = (from_dir, '')
792
from_block_idx, present = \
793
state._find_block_index_from_key(from_key)
795
# This is the old record, if it isn't present, then
796
# there is theoretically nothing to update.
797
# (Unless it isn't present because of lazy loading,
798
# but we don't do that yet)
800
from_block = state._dirblocks[from_block_idx]
801
to_block_index, to_entry_index, _, _ = \
802
state._get_block_entry_index(to_key[0], to_key[1], 0)
803
to_block_index = state._ensure_block(
804
to_block_index, to_entry_index, to_dir_utf8)
805
to_block = state._dirblocks[to_block_index]
807
# Grab a copy since move_one may update the list.
808
for entry in from_block[1][:]:
809
if not (entry[0][0] == from_dir):
810
raise AssertionError()
811
cur_details = entry[1][0]
812
to_key = (to_dir_utf8, entry[0][1], entry[0][2])
813
from_path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
814
to_path_utf8 = osutils.pathjoin(to_dir_utf8, entry[0][1])
815
minikind = cur_details[0]
817
# Deleted children of a renamed directory
818
# Do not need to be updated.
819
# Children that have been renamed out of this
820
# directory should also not be updated
822
move_one(entry, from_path_utf8=from_path_utf8,
824
executable=cur_details[3],
825
fingerprint=cur_details[1],
826
packed_stat=cur_details[4],
830
to_path_utf8=to_path_utf8)
832
# We need to move all the children of this
834
update_dirblock(from_path_utf8, to_key,
836
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
840
result.append((from_rel, to_rel))
841
state._dirblock_state = dirstate.DirState.IN_MEMORY_MODIFIED
842
self._make_dirty(reset_inventory=False)
773
# perform the rename in the inventory next if needed: its easy
777
from_entry = inv.get_entry(from_id)
778
current_parent = from_entry.parent_id
779
inv.rename(from_id, to_dir_id, from_tail)
781
inv.rename, from_id, current_parent, from_tail)
782
# finally do the rename in the dirstate, which is a little
783
# tricky to rollback, but least likely to need it.
784
old_block_index, old_entry_index, dir_present, file_present = \
785
state._get_block_entry_index(
786
from_dirname, from_tail_utf8, 0)
787
old_block = state._dirblocks[old_block_index][1]
788
old_entry = old_block[old_entry_index]
789
from_key, old_entry_details = old_entry
790
cur_details = old_entry_details[0]
792
to_key = ((to_block[0],) + from_key[1:3])
793
minikind = cur_details[0]
794
move_one(old_entry, from_path_utf8=from_rel_utf8,
796
executable=cur_details[3],
797
fingerprint=cur_details[1],
798
packed_stat=cur_details[4],
802
to_path_utf8=to_rel_utf8)
805
def update_dirblock(from_dir, to_key, to_dir_utf8):
806
"""Recursively update all entries in this dirblock."""
808
raise AssertionError(
809
"renaming root not supported")
810
from_key = (from_dir, '')
811
from_block_idx, present = \
812
state._find_block_index_from_key(from_key)
814
# This is the old record, if it isn't present,
815
# then there is theoretically nothing to
816
# update. (Unless it isn't present because of
817
# lazy loading, but we don't do that yet)
819
from_block = state._dirblocks[from_block_idx]
820
to_block_index, to_entry_index, _, _ = \
821
state._get_block_entry_index(
822
to_key[0], to_key[1], 0)
823
to_block_index = state._ensure_block(
824
to_block_index, to_entry_index, to_dir_utf8)
825
to_block = state._dirblocks[to_block_index]
827
# Grab a copy since move_one may update the list.
828
for entry in from_block[1][:]:
829
if not (entry[0][0] == from_dir):
830
raise AssertionError()
831
cur_details = entry[1][0]
833
to_dir_utf8, entry[0][1], entry[0][2])
834
from_path_utf8 = osutils.pathjoin(
835
entry[0][0], entry[0][1])
836
to_path_utf8 = osutils.pathjoin(
837
to_dir_utf8, entry[0][1])
838
minikind = cur_details[0]
839
if minikind in (b'a', b'r'):
840
# Deleted children of a renamed directory
841
# Do not need to be updated. Children that
842
# have been renamed out of this directory
843
# should also not be updated
845
move_one(entry, from_path_utf8=from_path_utf8,
847
executable=cur_details[3],
848
fingerprint=cur_details[1],
849
packed_stat=cur_details[4],
853
to_path_utf8=to_path_utf8)
855
# We need to move all the children of this
857
update_dirblock(from_path_utf8, to_key,
859
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
860
except BaseException:
863
result.append((from_rel, to_rel))
864
state._mark_modified()
865
self._make_dirty(reset_inventory=False)
846
869
def _must_be_locked(self):
847
870
if not self._control_files._lock_count:
1071
1102
If tree is None, then that element is treated as an unreachable
1072
1103
parent tree - i.e. a ghost.
1074
dirstate = self.current_dirstate()
1075
if len(parents_list) > 0:
1076
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
1077
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
1081
parent_ids = [rev_id for rev_id, tree in parents_list]
1082
graph = self.branch.repository.get_graph()
1083
heads = graph.heads(parent_ids)
1084
accepted_revisions = set()
1086
# convert absent trees to the null tree, which we convert back to
1087
# missing on access.
1088
for rev_id, tree in parents_list:
1089
if len(accepted_revisions) > 0:
1090
# we always accept the first tree
1091
if rev_id in accepted_revisions or rev_id not in heads:
1092
# We have already included either this tree, or its
1093
# descendent, so we skip it.
1095
_mod_revision.check_not_reserved_id(rev_id)
1096
if tree is not None:
1097
real_trees.append((rev_id, tree))
1099
real_trees.append((rev_id,
1100
self.branch.repository.revision_tree(
1101
_mod_revision.NULL_REVISION)))
1102
ghosts.append(rev_id)
1103
accepted_revisions.add(rev_id)
1104
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
1105
self._make_dirty(reset_inventory=False)
1105
with self.lock_tree_write():
1106
dirstate = self.current_dirstate()
1107
if len(parents_list) > 0:
1108
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
1109
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
1113
parent_ids = [rev_id for rev_id, tree in parents_list]
1114
graph = self.branch.repository.get_graph()
1115
heads = graph.heads(parent_ids)
1116
accepted_revisions = set()
1118
# convert absent trees to the null tree, which we convert back to
1119
# missing on access.
1120
for rev_id, tree in parents_list:
1121
if len(accepted_revisions) > 0:
1122
# we always accept the first tree
1123
if rev_id in accepted_revisions or rev_id not in heads:
1124
# We have already included either this tree, or its
1125
# descendent, so we skip it.
1127
_mod_revision.check_not_reserved_id(rev_id)
1128
if tree is not None:
1129
real_trees.append((rev_id, tree))
1131
real_trees.append((rev_id,
1132
self.branch.repository.revision_tree(
1133
_mod_revision.NULL_REVISION)))
1134
ghosts.append(rev_id)
1135
accepted_revisions.add(rev_id)
1137
if (len(real_trees) == 1
1139
and self.branch.repository._format.fast_deltas
1140
and isinstance(real_trees[0][1], InventoryRevisionTree)
1141
and self.get_parent_ids()):
1142
rev_id, rev_tree = real_trees[0]
1143
basis_id = self.get_parent_ids()[0]
1144
# There are times when basis_tree won't be in
1145
# self.branch.repository, (switch, for example)
1147
basis_tree = self.branch.repository.revision_tree(basis_id)
1148
except errors.NoSuchRevision:
1149
# Fall back to the set_parent_trees(), since we can't use
1150
# _make_delta if we can't get the RevisionTree
1153
delta = rev_tree.root_inventory._make_delta(
1154
basis_tree.root_inventory)
1155
dirstate.update_basis_by_delta(delta, rev_id)
1158
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
1159
self._make_dirty(reset_inventory=False)
1107
1161
def _set_root_id(self, file_id):
1108
1162
"""See WorkingTree.set_root_id."""
1109
1163
state = self.current_dirstate()
1110
state.set_path_id('', file_id)
1164
state.set_path_id(b'', file_id)
1111
1165
if state._dirblock_state == dirstate.DirState.IN_MEMORY_MODIFIED:
1112
1166
self._make_dirty(reset_inventory=True)
1153
1206
self.branch.unlock()
1155
@needs_tree_write_lock
1156
def unversion(self, file_ids):
1157
"""Remove the file ids in file_ids from the current versioned set.
1208
def unversion(self, paths):
1209
"""Remove the file ids in paths from the current versioned set.
1159
When a file_id is unversioned, all of its children are automatically
1211
When a directory is unversioned, all of its children are automatically
1162
:param file_ids: The file ids to stop versioning.
1214
:param paths: The file ids to stop versioning.
1163
1215
:raises: NoSuchId if any fileid is not currently versioned.
1167
state = self.current_dirstate()
1168
state._read_dirblocks_if_needed()
1169
ids_to_unversion = set(file_ids)
1170
paths_to_unversion = set()
1172
# check if the root is to be unversioned, if so, assert for now.
1173
# walk the state marking unversioned things as absent.
1174
# if there are any un-unversioned ids at the end, raise
1175
for key, details in state._dirblocks[0][1]:
1176
if (details[0][0] not in ('a', 'r') and # absent or relocated
1177
key[2] in ids_to_unversion):
1178
# I haven't written the code to unversion / yet - it should be
1180
raise errors.BzrError('Unversioning the / is not currently supported')
1182
while block_index < len(state._dirblocks):
1183
# process one directory at a time.
1184
block = state._dirblocks[block_index]
1185
# first check: is the path one to remove - it or its children
1186
delete_block = False
1187
for path in paths_to_unversion:
1188
if (block[0].startswith(path) and
1189
(len(block[0]) == len(path) or
1190
block[0][len(path)] == '/')):
1191
# this entire block should be deleted - its the block for a
1192
# path to unversion; or the child of one
1195
# TODO: trim paths_to_unversion as we pass by paths
1197
# this block is to be deleted: process it.
1198
# TODO: we can special case the no-parents case and
1199
# just forget the whole block.
1217
with self.lock_tree_write():
1220
state = self.current_dirstate()
1221
state._read_dirblocks_if_needed()
1224
file_id = self.path2id(path)
1226
raise errors.NoSuchFile(self, path)
1227
file_ids.add(file_id)
1228
ids_to_unversion = set(file_ids)
1229
paths_to_unversion = set()
1231
# check if the root is to be unversioned, if so, assert for now.
1232
# walk the state marking unversioned things as absent.
1233
# if there are any un-unversioned ids at the end, raise
1234
for key, details in state._dirblocks[0][1]:
1235
if (details[0][0] not in (b'a', b'r') and # absent or relocated
1236
key[2] in ids_to_unversion):
1237
# I haven't written the code to unversion / yet - it should
1239
raise errors.BzrError(
1240
'Unversioning the / is not currently supported')
1242
while block_index < len(state._dirblocks):
1243
# process one directory at a time.
1244
block = state._dirblocks[block_index]
1245
# first check: is the path one to remove - it or its children
1246
delete_block = False
1247
for path in paths_to_unversion:
1248
if (block[0].startswith(path) and
1249
(len(block[0]) == len(path) or
1250
block[0][len(path)] == '/')):
1251
# this entire block should be deleted - its the block for a
1252
# path to unversion; or the child of one
1255
# TODO: trim paths_to_unversion as we pass by paths
1257
# this block is to be deleted: process it.
1258
# TODO: we can special case the no-parents case and
1259
# just forget the whole block.
1261
while entry_index < len(block[1]):
1262
entry = block[1][entry_index]
1263
if entry[1][0][0] in (b'a', b'r'):
1264
# don't remove absent or renamed entries
1267
# Mark this file id as having been removed
1268
ids_to_unversion.discard(entry[0][2])
1269
if not state._make_absent(entry):
1270
# The block has not shrunk.
1272
# go to the next block. (At the moment we dont delete empty
1200
1276
entry_index = 0
1201
1277
while entry_index < len(block[1]):
1202
1278
entry = block[1][entry_index]
1203
if entry[1][0][0] in 'ar':
1204
# don't remove absent or renamed entries
1207
# Mark this file id as having been removed
1208
ids_to_unversion.discard(entry[0][2])
1209
if not state._make_absent(entry):
1210
# The block has not shrunk.
1212
# go to the next block. (At the moment we dont delete empty
1279
if (entry[1][0][0] in (b'a', b'r') or # absent, relocated
1280
# ^ some parent row.
1281
entry[0][2] not in ids_to_unversion):
1282
# ^ not an id to unversion
1285
if entry[1][0][0] == b'd':
1286
paths_to_unversion.add(
1287
pathjoin(entry[0][0], entry[0][1]))
1288
if not state._make_absent(entry):
1290
# we have unversioned this id
1291
ids_to_unversion.remove(entry[0][2])
1214
1292
block_index += 1
1217
while entry_index < len(block[1]):
1218
entry = block[1][entry_index]
1219
if (entry[1][0][0] in ('a', 'r') or # absent, relocated
1220
# ^ some parent row.
1221
entry[0][2] not in ids_to_unversion):
1222
# ^ not an id to unversion
1225
if entry[1][0][0] == 'd':
1226
paths_to_unversion.add(pathjoin(entry[0][0], entry[0][1]))
1227
if not state._make_absent(entry):
1229
# we have unversioned this id
1230
ids_to_unversion.remove(entry[0][2])
1232
if ids_to_unversion:
1233
raise errors.NoSuchId(self, iter(ids_to_unversion).next())
1234
self._make_dirty(reset_inventory=False)
1235
# have to change the legacy inventory too.
1236
if self._inventory is not None:
1237
for file_id in file_ids:
1238
self._inventory.remove_recursive_id(file_id)
1293
if ids_to_unversion:
1294
raise errors.NoSuchId(self, next(iter(ids_to_unversion)))
1295
self._make_dirty(reset_inventory=False)
1296
# have to change the legacy inventory too.
1297
if self._inventory is not None:
1298
for file_id in file_ids:
1299
if self._inventory.has_id(file_id):
1300
self._inventory.remove_recursive_id(file_id)
1240
@needs_tree_write_lock
1241
1302
def rename_one(self, from_rel, to_rel, after=False):
1242
1303
"""See WorkingTree.rename_one"""
1244
WorkingTree.rename_one(self, from_rel, to_rel, after)
1304
with self.lock_tree_write():
1306
super(DirStateWorkingTree, self).rename_one(
1307
from_rel, to_rel, after)
1246
@needs_tree_write_lock
1247
1309
def apply_inventory_delta(self, changes):
1248
1310
"""See MutableTree.apply_inventory_delta"""
1249
state = self.current_dirstate()
1250
state.update_by_delta(changes)
1251
self._make_dirty(reset_inventory=True)
1311
with self.lock_tree_write():
1312
state = self.current_dirstate()
1313
state.update_by_delta(changes)
1314
self._make_dirty(reset_inventory=True)
1253
1316
def update_basis_by_delta(self, new_revid, delta):
1254
1317
"""See MutableTree.update_basis_by_delta."""
1723
1844
inv_entry.text_size = size
1724
1845
inv_entry.text_sha1 = fingerprint
1725
1846
elif kind == 'directory':
1726
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
1847
parent_ies[(dirname + b'/' + name).strip(b'/')] = inv_entry
1727
1848
elif kind == 'symlink':
1728
inv_entry.executable = False
1729
inv_entry.text_size = None
1730
1849
inv_entry.symlink_target = utf8_decode(fingerprint)[0]
1731
1850
elif kind == 'tree-reference':
1732
1851
inv_entry.reference_revision = fingerprint or None
1734
raise AssertionError("cannot convert entry %r into an InventoryEntry"
1853
raise AssertionError(
1854
"cannot convert entry %r into an InventoryEntry"
1736
1856
# These checks cost us around 40ms on a 55k entry tree
1737
1857
if file_id in inv_byid:
1738
raise AssertionError('file_id %s already in'
1858
raise AssertionError(
1859
'file_id %s already in'
1739
1860
' inventory as %s' % (file_id, inv_byid[file_id]))
1740
1861
if name_unicode in parent_ie.children:
1741
1862
raise AssertionError('name %r already in parent'
1743
1864
inv_byid[file_id] = inv_entry
1744
1865
parent_ie.children[name_unicode] = inv_entry
1745
1866
self._inventory = inv
1747
def get_file_mtime(self, file_id, path=None):
1868
def get_file_mtime(self, path):
1748
1869
"""Return the modification time for this record.
1750
1871
We return the timestamp of the last-changed revision.
1752
1873
# Make sure the file exists
1753
entry = self._get_entry(file_id, path=path)
1874
entry = self._get_entry(path=path)
1754
1875
if entry == (None, None): # do we raise?
1876
raise errors.NoSuchFile(path)
1756
1877
parent_index = self._get_parent_index()
1757
1878
last_changed_revision = entry[1][parent_index][4]
1759
1880
rev = self._repository.get_revision(last_changed_revision)
1760
1881
except errors.NoSuchRevision:
1761
raise errors.FileTimestampUnavailable(self.id2path(file_id))
1882
raise FileTimestampUnavailable(path)
1762
1883
return rev.timestamp
1764
def get_file_sha1(self, file_id, path=None, stat_value=None):
1765
entry = self._get_entry(file_id=file_id, path=path)
1885
def get_file_sha1(self, path, stat_value=None):
1886
entry = self._get_entry(path=path)
1766
1887
parent_index = self._get_parent_index()
1767
1888
parent_details = entry[1][parent_index]
1768
if parent_details[0] == 'f':
1889
if parent_details[0] == b'f':
1769
1890
return parent_details[1]
1772
def get_file(self, file_id, path=None):
1773
return StringIO(self.get_file_text(file_id))
1775
def get_file_size(self, file_id):
1893
def get_file_revision(self, path):
1894
with self.lock_read():
1895
inv, inv_file_id = self._path2inv_file_id(path)
1896
return inv.get_entry(inv_file_id).revision
1898
def get_file(self, path):
1899
return BytesIO(self.get_file_text(path))
1901
def get_file_size(self, path):
1776
1902
"""See Tree.get_file_size"""
1777
return self.inventory[file_id].text_size
1779
def get_file_text(self, file_id, path=None):
1780
_, content = list(self.iter_files_bytes([(file_id, None)]))[0]
1781
return ''.join(content)
1783
def get_reference_revision(self, file_id, path=None):
1784
return self.inventory[file_id].reference_revision
1903
inv, inv_file_id = self._path2inv_file_id(path)
1904
return inv.get_entry(inv_file_id).text_size
1906
def get_file_text(self, path):
1908
for _, content_iter in self.iter_files_bytes([(path, None)]):
1909
if content is not None:
1910
raise AssertionError('iter_files_bytes returned'
1911
' too many entries')
1912
# For each entry returned by iter_files_bytes, we must consume the
1913
# content_iter before we step the files iterator.
1914
content = b''.join(content_iter)
1916
raise AssertionError('iter_files_bytes did not return'
1917
' the requested data')
1920
def get_reference_revision(self, path):
1921
inv, inv_file_id = self._path2inv_file_id(path)
1922
return inv.get_entry(inv_file_id).reference_revision
1786
1924
def iter_files_bytes(self, desired_files):
1787
1925
"""See Tree.iter_files_bytes.