424
368
state = self.current_dirstate()
425
369
if stat_value is None:
427
stat_value = osutils.lstat(file_abspath)
371
stat_value = os.lstat(file_abspath)
429
373
if e.errno == errno.ENOENT:
433
377
link_or_sha1 = dirstate.update_entry(state, entry, file_abspath,
434
stat_value=stat_value)
435
if entry[1][0][0] == b'f':
378
stat_value=stat_value)
379
if entry[1][0][0] == 'f':
436
380
if link_or_sha1 is None:
437
file_obj, statvalue = self.get_file_with_stat(path)
381
file_obj, statvalue = self.get_file_with_stat(file_id, path)
439
383
sha1 = osutils.sha_file(file_obj)
442
self._observed_sha1(path, (sha1, statvalue))
386
self._observed_sha1(file_id, path, (sha1, statvalue))
445
389
return link_or_sha1
448
def _get_root_inventory(self):
392
def _get_inventory(self):
449
393
"""Get the inventory for the tree. This is only valid within a lock."""
450
394
if 'evil' in debug.debug_flags:
451
trace.mutter_callsite(
452
2, "accessing .inventory forces a size of tree translation.")
395
trace.mutter_callsite(2,
396
"accessing .inventory forces a size of tree translation.")
453
397
if self._inventory is not None:
454
398
return self._inventory
455
399
self._must_be_locked()
456
400
self._generate_inventory()
457
401
return self._inventory
459
root_inventory = property(_get_root_inventory,
460
"Root inventory of this tree")
403
inventory = property(_get_inventory,
404
doc="Inventory of this Tree")
462
407
def get_parent_ids(self):
463
408
"""See Tree.get_parent_ids.
465
410
This implementation requests the ids list from the dirstate file.
467
with self.lock_read():
468
return self.current_dirstate().get_parent_ids()
412
return self.current_dirstate().get_parent_ids()
470
def get_reference_revision(self, path):
414
def get_reference_revision(self, file_id, path=None):
471
415
# referenced tree's revision is whatever's currently there
472
return self.get_nested_tree(path).last_revision()
416
return self.get_nested_tree(file_id, path).last_revision()
474
def get_nested_tree(self, path):
418
def get_nested_tree(self, file_id, path=None):
420
path = self.id2path(file_id)
421
# else: check file_id is at path?
475
422
return WorkingTree.open(self.abspath(path))
477
def id2path(self, file_id, recurse='down'):
425
def get_root_id(self):
426
"""Return the id of this trees root"""
427
return self._get_entry(path='')[0][2]
429
def has_id(self, file_id):
430
state = self.current_dirstate()
431
row, parents = self._get_entry(file_id=file_id)
434
return osutils.lexists(pathjoin(
435
self.basedir, row[0].decode('utf8'), row[1].decode('utf8')))
437
def has_or_had_id(self, file_id):
438
state = self.current_dirstate()
439
row, parents = self._get_entry(file_id=file_id)
440
return row is not None
443
def id2path(self, file_id):
478
444
"Convert a file-id to a path."
479
with self.lock_read():
480
state = self.current_dirstate()
481
entry = self._get_entry(file_id=file_id)
482
if entry == (None, None):
483
if recurse == 'down':
484
if 'evil' in debug.debug_flags:
485
trace.mutter_callsite(
486
2, "Tree.id2path scans all nested trees.")
487
for nested_path in self.iter_references():
488
nested_tree = self.get_nested_tree(nested_path)
490
return osutils.pathjoin(
491
nested_path, nested_tree.id2path(file_id))
492
except errors.NoSuchId:
494
raise errors.NoSuchId(tree=self, file_id=file_id)
495
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
496
return path_utf8.decode('utf8')
445
state = self.current_dirstate()
446
entry = self._get_entry(file_id=file_id)
447
if entry == (None, None):
448
raise errors.NoSuchId(tree=self, file_id=file_id)
449
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
450
return path_utf8.decode('utf8')
498
452
def _is_executable_from_path_and_stat_from_basis(self, path, stat_result):
499
453
entry = self._get_entry(path=path)
500
454
if entry == (None, None):
501
return False # Missing entries are not executable
502
return entry[1][0][3] # Executable?
504
def is_executable(self, path):
505
"""Test if a file is executable or not.
507
Note: The caller is expected to take a read-lock before calling this.
509
if not self._supports_executable():
510
entry = self._get_entry(path=path)
455
return False # Missing entries are not executable
456
return entry[1][0][3] # Executable?
458
if not osutils.supports_executable():
459
def is_executable(self, file_id, path=None):
460
"""Test if a file is executable or not.
462
Note: The caller is expected to take a read-lock before calling this.
464
entry = self._get_entry(file_id=file_id, path=path)
511
465
if entry == (None, None):
513
467
return entry[1][0][3]
469
_is_executable_from_path_and_stat = \
470
_is_executable_from_path_and_stat_from_basis
472
def is_executable(self, file_id, path=None):
473
"""Test if a file is executable or not.
475
Note: The caller is expected to take a read-lock before calling this.
515
477
self._must_be_locked()
516
mode = osutils.lstat(self.abspath(path)).st_mode
479
path = self.id2path(file_id)
480
mode = os.lstat(self.abspath(path)).st_mode
517
481
return bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
519
483
def all_file_ids(self):
521
485
self._must_be_locked()
523
487
for key, tree_details in self.current_dirstate()._iter_entries():
524
if tree_details[0][0] in (b'a', b'r'): # relocated
488
if tree_details[0][0] in ('a', 'r'): # relocated
526
490
result.add(key[2])
529
def all_versioned_paths(self):
530
self._must_be_locked()
531
return {path for path, entry in
532
self.root_inventory.iter_entries(recursive=True)}
534
494
def __iter__(self):
535
495
"""Iterate through file_ids for this tree.
537
497
file_ids are in a WorkingTree if they are in the working inventory
538
498
and the working file exists.
540
with self.lock_read():
542
for key, tree_details in self.current_dirstate()._iter_entries():
543
if tree_details[0][0] in (b'a', b'r'): # absent, relocated
544
# not relevant to the working tree
546
path = pathjoin(self.basedir, key[0].decode(
547
'utf8'), key[1].decode('utf8'))
548
if osutils.lexists(path):
549
result.append(key[2])
501
for key, tree_details in self.current_dirstate()._iter_entries():
502
if tree_details[0][0] in ('a', 'r'): # absent, relocated
503
# not relevant to the working tree
505
path = pathjoin(self.basedir, key[0].decode('utf8'), key[1].decode('utf8'))
506
if osutils.lexists(path):
507
result.append(key[2])
552
510
def iter_references(self):
553
511
if not self._repo_supports_tree_reference:
554
512
# When the repo doesn't support references, we will have nothing to
557
with self.lock_read():
558
for key, tree_details in self.current_dirstate()._iter_entries():
559
if tree_details[0][0] in (b'a', b'r'): # absent, relocated
560
# not relevant to the working tree
563
# the root is not a reference.
565
relpath = pathjoin(key[0].decode('utf8'), key[1].decode('utf8'))
567
if self.kind(relpath) == 'tree-reference':
569
except errors.NoSuchFile:
570
# path is missing on disk.
515
for key, tree_details in self.current_dirstate()._iter_entries():
516
if tree_details[0][0] in ('a', 'r'): # absent, relocated
517
# not relevant to the working tree
520
# the root is not a reference.
522
relpath = pathjoin(key[0].decode('utf8'), key[1].decode('utf8'))
524
if self._kind(relpath) == 'tree-reference':
525
yield relpath, key[2]
526
except errors.NoSuchFile:
527
# path is missing on disk.
573
def _observed_sha1(self, path, sha_and_stat):
530
def _observed_sha1(self, file_id, path, (sha1, statvalue)):
574
531
"""See MutableTree._observed_sha1."""
575
532
state = self.current_dirstate()
576
entry = self._get_entry(path=path)
577
state._observed_sha1(entry, *sha_and_stat)
579
def kind(self, relpath):
533
entry = self._get_entry(file_id=file_id, path=path)
534
state._observed_sha1(entry, sha1, statvalue)
536
def kind(self, file_id):
537
"""Return the kind of a file.
539
This is always the actual kind that's on disk, regardless of what it
542
Note: The caller is expected to take a read-lock before calling this.
544
relpath = self.id2path(file_id)
546
raise AssertionError(
547
"path for id {%s} is None!" % file_id)
548
return self._kind(relpath)
550
def _kind(self, relpath):
580
551
abspath = self.abspath(relpath)
581
552
kind = file_kind(abspath)
582
553
if (self._repo_supports_tree_reference and kind == 'directory'):
583
with self.lock_read():
584
entry = self._get_entry(path=relpath)
585
if entry[1] is not None:
586
if entry[1][0][0] == b't':
587
kind = 'tree-reference'
554
entry = self._get_entry(path=relpath)
555
if entry[1] is not None:
556
if entry[1][0][0] == 't':
557
kind = 'tree-reference'
590
561
def _last_revision(self):
591
562
"""See Mutable.last_revision."""
592
with self.lock_read():
593
parent_ids = self.current_dirstate().get_parent_ids()
597
return _mod_revision.NULL_REVISION
563
parent_ids = self.current_dirstate().get_parent_ids()
567
return _mod_revision.NULL_REVISION
599
569
def lock_read(self):
600
"""See Branch.lock_read, and WorkingTree.unlock.
602
:return: A breezy.lock.LogicalLockResult.
570
"""See Branch.lock_read, and WorkingTree.unlock."""
604
571
self.branch.lock_read()
606
573
self._control_files.lock_read()
634
600
self._repo_supports_tree_reference = getattr(
635
601
self.branch.repository._format, "supports_tree_reference",
637
except BaseException:
638
604
self._control_files.unlock()
640
except BaseException:
641
607
self.branch.unlock()
643
return LogicalLockResult(self.unlock)
645
610
def lock_tree_write(self):
646
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
648
:return: A breezy.lock.LogicalLockResult.
611
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
650
612
self.branch.lock_read()
651
return self._lock_self_write()
613
self._lock_self_write()
653
615
def lock_write(self):
654
"""See MutableTree.lock_write, and WorkingTree.unlock.
656
:return: A breezy.lock.LogicalLockResult.
616
"""See MutableTree.lock_write, and WorkingTree.unlock."""
658
617
self.branch.lock_write()
659
return self._lock_self_write()
618
self._lock_self_write()
620
@needs_tree_write_lock
661
621
def move(self, from_paths, to_dir, after=False):
662
622
"""See WorkingTree.move()."""
664
624
if not from_paths:
666
with self.lock_tree_write():
667
state = self.current_dirstate()
668
if isinstance(from_paths, (str, bytes)):
670
to_dir_utf8 = to_dir.encode('utf8')
671
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
672
# check destination directory
673
# get the details for it
674
(to_entry_block_index, to_entry_entry_index, dir_present,
675
entry_present) = state._get_block_entry_index(
676
to_entry_dirname, to_basename, 0)
677
if not entry_present:
678
raise errors.BzrMoveFailedError(
679
'', to_dir, errors.NotVersionedError(to_dir))
680
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
681
# get a handle on the block itself.
682
to_block_index = state._ensure_block(
683
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
684
to_block = state._dirblocks[to_block_index]
685
to_abs = self.abspath(to_dir)
686
if not isdir(to_abs):
687
raise errors.BzrMoveFailedError('', to_dir,
688
errors.NotADirectory(to_abs))
690
if to_entry[1][0][0] != b'd':
691
raise errors.BzrMoveFailedError('', to_dir,
692
errors.NotADirectory(to_abs))
694
if self._inventory is not None:
695
update_inventory = True
696
inv = self.root_inventory
697
to_dir_id = to_entry[0][2]
699
update_inventory = False
701
# GZ 2017-03-28: The rollbacks variable was shadowed in the loop below
702
# missing those added here, but there's also no test coverage for this.
703
rollbacks = cleanup.ExitStack()
705
def move_one(old_entry, from_path_utf8, minikind, executable,
706
fingerprint, packed_stat, size,
707
to_block, to_key, to_path_utf8):
708
state._make_absent(old_entry)
709
from_key = old_entry[0]
711
state.update_minimal,
714
executable=executable,
715
fingerprint=fingerprint,
716
packed_stat=packed_stat,
718
path_utf8=from_path_utf8)
719
state.update_minimal(to_key,
721
executable=executable,
722
fingerprint=fingerprint,
723
packed_stat=packed_stat,
725
path_utf8=to_path_utf8)
726
added_entry_index, _ = state._find_entry_index(
728
new_entry = to_block[1][added_entry_index]
729
rollbacks.callback(state._make_absent, new_entry)
731
for from_rel in from_paths:
732
# from_rel is 'pathinroot/foo/bar'
733
from_rel_utf8 = from_rel.encode('utf8')
734
from_dirname, from_tail = osutils.split(from_rel)
735
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
736
from_entry = self._get_entry(path=from_rel)
737
if from_entry == (None, None):
738
raise errors.BzrMoveFailedError(
740
errors.NotVersionedError(path=from_rel))
742
from_id = from_entry[0][2]
743
to_rel = pathjoin(to_dir, from_tail)
744
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
745
item_to_entry = self._get_entry(path=to_rel)
746
if item_to_entry != (None, None):
747
raise errors.BzrMoveFailedError(
748
from_rel, to_rel, "Target is already versioned.")
750
if from_rel == to_rel:
751
raise errors.BzrMoveFailedError(
752
from_rel, to_rel, "Source and target are identical.")
754
from_missing = not self.has_filename(from_rel)
755
to_missing = not self.has_filename(to_rel)
626
state = self.current_dirstate()
627
if isinstance(from_paths, basestring):
629
to_dir_utf8 = to_dir.encode('utf8')
630
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
631
id_index = state._get_id_index()
632
# check destination directory
633
# get the details for it
634
to_entry_block_index, to_entry_entry_index, dir_present, entry_present = \
635
state._get_block_entry_index(to_entry_dirname, to_basename, 0)
636
if not entry_present:
637
raise errors.BzrMoveFailedError('', to_dir,
638
errors.NotVersionedError(to_dir))
639
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
640
# get a handle on the block itself.
641
to_block_index = state._ensure_block(
642
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
643
to_block = state._dirblocks[to_block_index]
644
to_abs = self.abspath(to_dir)
645
if not isdir(to_abs):
646
raise errors.BzrMoveFailedError('',to_dir,
647
errors.NotADirectory(to_abs))
649
if to_entry[1][0][0] != 'd':
650
raise errors.BzrMoveFailedError('',to_dir,
651
errors.NotADirectory(to_abs))
653
if self._inventory is not None:
654
update_inventory = True
656
to_dir_id = to_entry[0][2]
657
to_dir_ie = inv[to_dir_id]
659
update_inventory = False
662
def move_one(old_entry, from_path_utf8, minikind, executable,
663
fingerprint, packed_stat, size,
664
to_block, to_key, to_path_utf8):
665
state._make_absent(old_entry)
666
from_key = old_entry[0]
668
lambda:state.update_minimal(from_key,
670
executable=executable,
671
fingerprint=fingerprint,
672
packed_stat=packed_stat,
674
path_utf8=from_path_utf8))
675
state.update_minimal(to_key,
677
executable=executable,
678
fingerprint=fingerprint,
679
packed_stat=packed_stat,
681
path_utf8=to_path_utf8)
682
added_entry_index, _ = state._find_entry_index(to_key, to_block[1])
683
new_entry = to_block[1][added_entry_index]
684
rollbacks.append(lambda:state._make_absent(new_entry))
686
for from_rel in from_paths:
687
# from_rel is 'pathinroot/foo/bar'
688
from_rel_utf8 = from_rel.encode('utf8')
689
from_dirname, from_tail = osutils.split(from_rel)
690
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
691
from_entry = self._get_entry(path=from_rel)
692
if from_entry == (None, None):
693
raise errors.BzrMoveFailedError(from_rel,to_dir,
694
errors.NotVersionedError(path=from_rel))
696
from_id = from_entry[0][2]
697
to_rel = pathjoin(to_dir, from_tail)
698
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
699
item_to_entry = self._get_entry(path=to_rel)
700
if item_to_entry != (None, None):
701
raise errors.BzrMoveFailedError(from_rel, to_rel,
702
"Target is already versioned.")
704
if from_rel == to_rel:
705
raise errors.BzrMoveFailedError(from_rel, to_rel,
706
"Source and target are identical.")
708
from_missing = not self.has_filename(from_rel)
709
to_missing = not self.has_filename(to_rel)
716
raise errors.BzrMoveFailedError(from_rel, to_rel,
717
errors.NoSuchFile(path=to_rel,
718
extra="New file has not been created yet"))
720
# neither path exists
721
raise errors.BzrRenameFailedError(from_rel, to_rel,
722
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
724
if from_missing: # implicitly just update our path mapping
757
725
move_file = False
762
raise errors.BzrMoveFailedError(
766
extra="New file has not been created yet"))
768
# neither path exists
769
raise errors.BzrRenameFailedError(
771
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
773
if from_missing: # implicitly just update our path mapping
776
raise errors.RenameFailedFilesExist(from_rel, to_rel)
727
raise errors.RenameFailedFilesExist(from_rel, to_rel)
778
# perform the disk move first - its the most likely failure point.
780
from_rel_abs = self.abspath(from_rel)
781
to_rel_abs = self.abspath(to_rel)
730
def rollback_rename():
731
"""A single rename has failed, roll it back."""
732
# roll back everything, even if we encounter trouble doing one
735
# TODO: at least log the other exceptions rather than just
736
# losing them mbp 20070307
738
for rollback in reversed(rollbacks):
783
osutils.rename(from_rel_abs, to_rel_abs)
785
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
787
osutils.rename, to_rel_abs, from_rel_abs)
742
exc_info = sys.exc_info()
744
raise exc_info[0], exc_info[1], exc_info[2]
746
# perform the disk move first - its the most likely failure point.
748
from_rel_abs = self.abspath(from_rel)
749
to_rel_abs = self.abspath(to_rel)
789
# perform the rename in the inventory next if needed: its easy
793
from_entry = inv.get_entry(from_id)
794
current_parent = from_entry.parent_id
795
inv.rename(from_id, to_dir_id, from_tail)
797
inv.rename, from_id, current_parent, from_tail)
798
# finally do the rename in the dirstate, which is a little
799
# tricky to rollback, but least likely to need it.
800
old_block_index, old_entry_index, dir_present, file_present = \
801
state._get_block_entry_index(
802
from_dirname, from_tail_utf8, 0)
803
old_block = state._dirblocks[old_block_index][1]
804
old_entry = old_block[old_entry_index]
805
from_key, old_entry_details = old_entry
806
cur_details = old_entry_details[0]
808
to_key = ((to_block[0],) + from_key[1:3])
809
minikind = cur_details[0]
810
move_one(old_entry, from_path_utf8=from_rel_utf8,
812
executable=cur_details[3],
813
fingerprint=cur_details[1],
814
packed_stat=cur_details[4],
818
to_path_utf8=to_rel_utf8)
821
def update_dirblock(from_dir, to_key, to_dir_utf8):
822
"""Recursively update all entries in this dirblock."""
824
raise AssertionError(
825
"renaming root not supported")
826
from_key = (from_dir, '')
827
from_block_idx, present = \
828
state._find_block_index_from_key(from_key)
830
# This is the old record, if it isn't present,
831
# then there is theoretically nothing to
832
# update. (Unless it isn't present because of
833
# lazy loading, but we don't do that yet)
835
from_block = state._dirblocks[from_block_idx]
836
to_block_index, to_entry_index, _, _ = \
837
state._get_block_entry_index(
838
to_key[0], to_key[1], 0)
839
to_block_index = state._ensure_block(
840
to_block_index, to_entry_index, to_dir_utf8)
841
to_block = state._dirblocks[to_block_index]
843
# Grab a copy since move_one may update the list.
844
for entry in from_block[1][:]:
845
if not (entry[0][0] == from_dir):
846
raise AssertionError()
847
cur_details = entry[1][0]
849
to_dir_utf8, entry[0][1], entry[0][2])
850
from_path_utf8 = osutils.pathjoin(
851
entry[0][0], entry[0][1])
852
to_path_utf8 = osutils.pathjoin(
853
to_dir_utf8, entry[0][1])
854
minikind = cur_details[0]
855
if minikind in (b'a', b'r'):
856
# Deleted children of a renamed directory
857
# Do not need to be updated. Children that
858
# have been renamed out of this directory
859
# should also not be updated
861
move_one(entry, from_path_utf8=from_path_utf8,
863
executable=cur_details[3],
864
fingerprint=cur_details[1],
865
packed_stat=cur_details[4],
869
to_path_utf8=to_path_utf8)
871
# We need to move all the children of this
873
update_dirblock(from_path_utf8, to_key,
875
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
876
except BaseException:
879
result.append((from_rel, to_rel))
880
state._mark_modified()
881
self._make_dirty(reset_inventory=False)
751
osutils.rename(from_rel_abs, to_rel_abs)
753
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
754
rollbacks.append(lambda: osutils.rename(to_rel_abs, from_rel_abs))
756
# perform the rename in the inventory next if needed: its easy
760
from_entry = inv[from_id]
761
current_parent = from_entry.parent_id
762
inv.rename(from_id, to_dir_id, from_tail)
764
lambda: inv.rename(from_id, current_parent, from_tail))
765
# finally do the rename in the dirstate, which is a little
766
# tricky to rollback, but least likely to need it.
767
old_block_index, old_entry_index, dir_present, file_present = \
768
state._get_block_entry_index(from_dirname, from_tail_utf8, 0)
769
old_block = state._dirblocks[old_block_index][1]
770
old_entry = old_block[old_entry_index]
771
from_key, old_entry_details = old_entry
772
cur_details = old_entry_details[0]
774
to_key = ((to_block[0],) + from_key[1:3])
775
minikind = cur_details[0]
776
move_one(old_entry, from_path_utf8=from_rel_utf8,
778
executable=cur_details[3],
779
fingerprint=cur_details[1],
780
packed_stat=cur_details[4],
784
to_path_utf8=to_rel_utf8)
787
def update_dirblock(from_dir, to_key, to_dir_utf8):
788
"""Recursively update all entries in this dirblock."""
790
raise AssertionError("renaming root not supported")
791
from_key = (from_dir, '')
792
from_block_idx, present = \
793
state._find_block_index_from_key(from_key)
795
# This is the old record, if it isn't present, then
796
# there is theoretically nothing to update.
797
# (Unless it isn't present because of lazy loading,
798
# but we don't do that yet)
800
from_block = state._dirblocks[from_block_idx]
801
to_block_index, to_entry_index, _, _ = \
802
state._get_block_entry_index(to_key[0], to_key[1], 0)
803
to_block_index = state._ensure_block(
804
to_block_index, to_entry_index, to_dir_utf8)
805
to_block = state._dirblocks[to_block_index]
807
# Grab a copy since move_one may update the list.
808
for entry in from_block[1][:]:
809
if not (entry[0][0] == from_dir):
810
raise AssertionError()
811
cur_details = entry[1][0]
812
to_key = (to_dir_utf8, entry[0][1], entry[0][2])
813
from_path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
814
to_path_utf8 = osutils.pathjoin(to_dir_utf8, entry[0][1])
815
minikind = cur_details[0]
817
# Deleted children of a renamed directory
818
# Do not need to be updated.
819
# Children that have been renamed out of this
820
# directory should also not be updated
822
move_one(entry, from_path_utf8=from_path_utf8,
824
executable=cur_details[3],
825
fingerprint=cur_details[1],
826
packed_stat=cur_details[4],
830
to_path_utf8=to_path_utf8)
832
# We need to move all the children of this
834
update_dirblock(from_path_utf8, to_key,
836
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
840
result.append((from_rel, to_rel))
841
state._dirblock_state = dirstate.DirState.IN_MEMORY_MODIFIED
842
self._make_dirty(reset_inventory=False)
885
846
def _must_be_locked(self):
886
847
if not self._control_files._lock_count:
1122
1071
If tree is None, then that element is treated as an unreachable
1123
1072
parent tree - i.e. a ghost.
1125
with self.lock_tree_write():
1126
dirstate = self.current_dirstate()
1127
if len(parents_list) > 0:
1128
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
1129
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
1133
parent_ids = [rev_id for rev_id, tree in parents_list]
1134
graph = self.branch.repository.get_graph()
1135
heads = graph.heads(parent_ids)
1136
accepted_revisions = set()
1138
# convert absent trees to the null tree, which we convert back to
1139
# missing on access.
1140
for rev_id, tree in parents_list:
1141
if len(accepted_revisions) > 0:
1142
# we always accept the first tree
1143
if rev_id in accepted_revisions or rev_id not in heads:
1144
# We have already included either this tree, or its
1145
# descendent, so we skip it.
1147
_mod_revision.check_not_reserved_id(rev_id)
1148
if tree is not None:
1149
real_trees.append((rev_id, tree))
1151
real_trees.append((rev_id,
1152
self.branch.repository.revision_tree(
1153
_mod_revision.NULL_REVISION)))
1154
ghosts.append(rev_id)
1155
accepted_revisions.add(rev_id)
1157
if (len(real_trees) == 1
1159
and self.branch.repository._format.fast_deltas
1160
and isinstance(real_trees[0][1], InventoryRevisionTree)
1161
and self.get_parent_ids()):
1162
rev_id, rev_tree = real_trees[0]
1163
basis_id = self.get_parent_ids()[0]
1164
# There are times when basis_tree won't be in
1165
# self.branch.repository, (switch, for example)
1167
basis_tree = self.branch.repository.revision_tree(basis_id)
1168
except errors.NoSuchRevision:
1169
# Fall back to the set_parent_trees(), since we can't use
1170
# _make_delta if we can't get the RevisionTree
1173
delta = rev_tree.root_inventory._make_delta(
1174
basis_tree.root_inventory)
1175
dirstate.update_basis_by_delta(delta, rev_id)
1178
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
1179
self._make_dirty(reset_inventory=False)
1074
dirstate = self.current_dirstate()
1075
if len(parents_list) > 0:
1076
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
1077
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
1081
parent_ids = [rev_id for rev_id, tree in parents_list]
1082
graph = self.branch.repository.get_graph()
1083
heads = graph.heads(parent_ids)
1084
accepted_revisions = set()
1086
# convert absent trees to the null tree, which we convert back to
1087
# missing on access.
1088
for rev_id, tree in parents_list:
1089
if len(accepted_revisions) > 0:
1090
# we always accept the first tree
1091
if rev_id in accepted_revisions or rev_id not in heads:
1092
# We have already included either this tree, or its
1093
# descendent, so we skip it.
1095
_mod_revision.check_not_reserved_id(rev_id)
1096
if tree is not None:
1097
real_trees.append((rev_id, tree))
1099
real_trees.append((rev_id,
1100
self.branch.repository.revision_tree(
1101
_mod_revision.NULL_REVISION)))
1102
ghosts.append(rev_id)
1103
accepted_revisions.add(rev_id)
1104
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
1105
self._make_dirty(reset_inventory=False)
1181
1107
def _set_root_id(self, file_id):
1182
1108
"""See WorkingTree.set_root_id."""
1183
1109
state = self.current_dirstate()
1184
state.set_path_id(b'', file_id)
1110
state.set_path_id('', file_id)
1185
1111
if state._dirblock_state == dirstate.DirState.IN_MEMORY_MODIFIED:
1186
1112
self._make_dirty(reset_inventory=True)
1226
1153
self.branch.unlock()
1228
def unversion(self, paths):
1229
"""Remove the file ids in paths from the current versioned set.
1155
@needs_tree_write_lock
1156
def unversion(self, file_ids):
1157
"""Remove the file ids in file_ids from the current versioned set.
1231
When a directory is unversioned, all of its children are automatically
1159
When a file_id is unversioned, all of its children are automatically
1234
:param paths: The file ids to stop versioning.
1162
:param file_ids: The file ids to stop versioning.
1235
1163
:raises: NoSuchId if any fileid is not currently versioned.
1237
with self.lock_tree_write():
1240
state = self.current_dirstate()
1241
state._read_dirblocks_if_needed()
1244
file_id = self.path2id(path)
1246
raise errors.NoSuchFile(self, path)
1247
file_ids.add(file_id)
1248
ids_to_unversion = set(file_ids)
1249
paths_to_unversion = set()
1251
# check if the root is to be unversioned, if so, assert for now.
1252
# walk the state marking unversioned things as absent.
1253
# if there are any un-unversioned ids at the end, raise
1254
for key, details in state._dirblocks[0][1]:
1255
if (details[0][0] not in (b'a', b'r') and # absent or relocated
1256
key[2] in ids_to_unversion):
1257
# I haven't written the code to unversion / yet - it should
1259
raise errors.BzrError(
1260
'Unversioning the / is not currently supported')
1262
while block_index < len(state._dirblocks):
1263
# process one directory at a time.
1264
block = state._dirblocks[block_index]
1265
# first check: is the path one to remove - it or its children
1266
delete_block = False
1267
for path in paths_to_unversion:
1268
if (block[0].startswith(path) and
1269
(len(block[0]) == len(path) or
1270
block[0][len(path)] == '/')):
1271
# this entire block should be deleted - its the block for a
1272
# path to unversion; or the child of one
1275
# TODO: trim paths_to_unversion as we pass by paths
1277
# this block is to be deleted: process it.
1278
# TODO: we can special case the no-parents case and
1279
# just forget the whole block.
1281
while entry_index < len(block[1]):
1282
entry = block[1][entry_index]
1283
if entry[1][0][0] in (b'a', b'r'):
1284
# don't remove absent or renamed entries
1287
# Mark this file id as having been removed
1288
ids_to_unversion.discard(entry[0][2])
1289
if not state._make_absent(entry):
1290
# The block has not shrunk.
1292
# go to the next block. (At the moment we dont delete empty
1167
state = self.current_dirstate()
1168
state._read_dirblocks_if_needed()
1169
ids_to_unversion = set(file_ids)
1170
paths_to_unversion = set()
1172
# check if the root is to be unversioned, if so, assert for now.
1173
# walk the state marking unversioned things as absent.
1174
# if there are any un-unversioned ids at the end, raise
1175
for key, details in state._dirblocks[0][1]:
1176
if (details[0][0] not in ('a', 'r') and # absent or relocated
1177
key[2] in ids_to_unversion):
1178
# I haven't written the code to unversion / yet - it should be
1180
raise errors.BzrError('Unversioning the / is not currently supported')
1182
while block_index < len(state._dirblocks):
1183
# process one directory at a time.
1184
block = state._dirblocks[block_index]
1185
# first check: is the path one to remove - it or its children
1186
delete_block = False
1187
for path in paths_to_unversion:
1188
if (block[0].startswith(path) and
1189
(len(block[0]) == len(path) or
1190
block[0][len(path)] == '/')):
1191
# this entire block should be deleted - its the block for a
1192
# path to unversion; or the child of one
1195
# TODO: trim paths_to_unversion as we pass by paths
1197
# this block is to be deleted: process it.
1198
# TODO: we can special case the no-parents case and
1199
# just forget the whole block.
1296
1200
entry_index = 0
1297
1201
while entry_index < len(block[1]):
1298
1202
entry = block[1][entry_index]
1299
if (entry[1][0][0] in (b'a', b'r') or # absent, relocated
1300
# ^ some parent row.
1301
entry[0][2] not in ids_to_unversion):
1302
# ^ not an id to unversion
1305
if entry[1][0][0] == b'd':
1306
paths_to_unversion.add(
1307
pathjoin(entry[0][0], entry[0][1]))
1308
if not state._make_absent(entry):
1310
# we have unversioned this id
1311
ids_to_unversion.remove(entry[0][2])
1203
if entry[1][0][0] in 'ar':
1204
# don't remove absent or renamed entries
1207
# Mark this file id as having been removed
1208
ids_to_unversion.discard(entry[0][2])
1209
if not state._make_absent(entry):
1210
# The block has not shrunk.
1212
# go to the next block. (At the moment we dont delete empty
1312
1214
block_index += 1
1313
if ids_to_unversion:
1314
raise errors.NoSuchId(self, next(iter(ids_to_unversion)))
1315
self._make_dirty(reset_inventory=False)
1316
# have to change the legacy inventory too.
1317
if self._inventory is not None:
1318
for file_id in file_ids:
1319
if self._inventory.has_id(file_id):
1320
self._inventory.remove_recursive_id(file_id)
1217
while entry_index < len(block[1]):
1218
entry = block[1][entry_index]
1219
if (entry[1][0][0] in ('a', 'r') or # absent, relocated
1220
# ^ some parent row.
1221
entry[0][2] not in ids_to_unversion):
1222
# ^ not an id to unversion
1225
if entry[1][0][0] == 'd':
1226
paths_to_unversion.add(pathjoin(entry[0][0], entry[0][1]))
1227
if not state._make_absent(entry):
1229
# we have unversioned this id
1230
ids_to_unversion.remove(entry[0][2])
1232
if ids_to_unversion:
1233
raise errors.NoSuchId(self, iter(ids_to_unversion).next())
1234
self._make_dirty(reset_inventory=False)
1235
# have to change the legacy inventory too.
1236
if self._inventory is not None:
1237
for file_id in file_ids:
1238
self._inventory.remove_recursive_id(file_id)
1240
@needs_tree_write_lock
1322
1241
def rename_one(self, from_rel, to_rel, after=False):
1323
1242
"""See WorkingTree.rename_one"""
1324
with self.lock_tree_write():
1326
super(DirStateWorkingTree, self).rename_one(
1327
from_rel, to_rel, after)
1244
WorkingTree.rename_one(self, from_rel, to_rel, after)
1246
@needs_tree_write_lock
1329
1247
def apply_inventory_delta(self, changes):
1330
1248
"""See MutableTree.apply_inventory_delta"""
1331
with self.lock_tree_write():
1332
state = self.current_dirstate()
1333
state.update_by_delta(changes)
1334
self._make_dirty(reset_inventory=True)
1249
state = self.current_dirstate()
1250
state.update_by_delta(changes)
1251
self._make_dirty(reset_inventory=True)
1336
1253
def update_basis_by_delta(self, new_revid, delta):
1337
1254
"""See MutableTree.update_basis_by_delta."""
1896
1723
inv_entry.text_size = size
1897
1724
inv_entry.text_sha1 = fingerprint
1898
1725
elif kind == 'directory':
1899
parent_ies[(dirname + b'/' + name).strip(b'/')] = inv_entry
1726
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
1900
1727
elif kind == 'symlink':
1728
inv_entry.executable = False
1729
inv_entry.text_size = None
1901
1730
inv_entry.symlink_target = utf8_decode(fingerprint)[0]
1902
1731
elif kind == 'tree-reference':
1903
1732
inv_entry.reference_revision = fingerprint or None
1905
raise AssertionError(
1906
"cannot convert entry %r into an InventoryEntry"
1734
raise AssertionError("cannot convert entry %r into an InventoryEntry"
1908
1736
# These checks cost us around 40ms on a 55k entry tree
1909
1737
if file_id in inv_byid:
1910
raise AssertionError(
1911
'file_id %s already in'
1738
raise AssertionError('file_id %s already in'
1912
1739
' inventory as %s' % (file_id, inv_byid[file_id]))
1913
1740
if name_unicode in parent_ie.children:
1914
1741
raise AssertionError('name %r already in parent'
1916
1743
inv_byid[file_id] = inv_entry
1917
1744
parent_ie.children[name_unicode] = inv_entry
1918
1745
self._inventory = inv
1920
def get_file_mtime(self, path):
1747
def get_file_mtime(self, file_id, path=None):
1921
1748
"""Return the modification time for this record.
1923
1750
We return the timestamp of the last-changed revision.
1925
1752
# Make sure the file exists
1926
entry = self._get_entry(path=path)
1753
entry = self._get_entry(file_id, path=path)
1927
1754
if entry == (None, None): # do we raise?
1928
raise errors.NoSuchFile(path)
1929
1756
parent_index = self._get_parent_index()
1930
1757
last_changed_revision = entry[1][parent_index][4]
1932
1759
rev = self._repository.get_revision(last_changed_revision)
1933
1760
except errors.NoSuchRevision:
1934
raise FileTimestampUnavailable(path)
1761
raise errors.FileTimestampUnavailable(self.id2path(file_id))
1935
1762
return rev.timestamp
1937
def get_file_sha1(self, path, stat_value=None):
1938
entry = self._get_entry(path=path)
1764
def get_file_sha1(self, file_id, path=None, stat_value=None):
1765
entry = self._get_entry(file_id=file_id, path=path)
1939
1766
parent_index = self._get_parent_index()
1940
1767
parent_details = entry[1][parent_index]
1941
if parent_details[0] == b'f':
1768
if parent_details[0] == 'f':
1942
1769
return parent_details[1]
1945
def get_file_revision(self, path):
1946
with self.lock_read():
1947
inv, inv_file_id = self._path2inv_file_id(path)
1948
return inv.get_entry(inv_file_id).revision
1950
def get_file(self, path):
1951
return BytesIO(self.get_file_text(path))
1953
def get_file_size(self, path):
1772
def get_file(self, file_id, path=None):
1773
return StringIO(self.get_file_text(file_id))
1775
def get_file_size(self, file_id):
1954
1776
"""See Tree.get_file_size"""
1955
inv, inv_file_id = self._path2inv_file_id(path)
1956
return inv.get_entry(inv_file_id).text_size
1958
def get_file_text(self, path):
1960
for _, content_iter in self.iter_files_bytes([(path, None)]):
1961
if content is not None:
1962
raise AssertionError('iter_files_bytes returned'
1963
' too many entries')
1964
# For each entry returned by iter_files_bytes, we must consume the
1965
# content_iter before we step the files iterator.
1966
content = b''.join(content_iter)
1968
raise AssertionError('iter_files_bytes did not return'
1969
' the requested data')
1972
def get_reference_revision(self, path):
1973
inv, inv_file_id = self._path2inv_file_id(path)
1974
return inv.get_entry(inv_file_id).reference_revision
1777
return self.inventory[file_id].text_size
1779
def get_file_text(self, file_id, path=None):
1780
_, content = list(self.iter_files_bytes([(file_id, None)]))[0]
1781
return ''.join(content)
1783
def get_reference_revision(self, file_id, path=None):
1784
return self.inventory[file_id].reference_revision
1976
1786
def iter_files_bytes(self, desired_files):
1977
1787
"""See Tree.iter_files_bytes.
2046
1854
return (kind, None, None, None)
2048
def is_executable(self, path):
2049
inv, inv_file_id = self._path2inv_file_id(path)
2050
if inv_file_id is None:
2051
raise errors.NoSuchFile(path)
2052
ie = inv.get_entry(inv_file_id)
1856
def is_executable(self, file_id, path=None):
1857
ie = self.inventory[file_id]
2053
1858
if ie.kind != "file":
2055
1860
return ie.executable
2057
def is_locked(self):
2060
def list_files(self, include_root=False, from_dir=None, recursive=True,
2061
recurse_nested=False):
2062
# The only files returned by this are those from the version
1862
def list_files(self, include_root=False, from_dir=None, recursive=True):
1863
# We use a standard implementation, because DirStateRevisionTree is
1864
# dealing with one of the parents of the current state
1865
inv = self._get_inventory()
2063
1866
if from_dir is None:
2064
1867
from_dir_id = None
2065
inv = self.root_inventory
2067
inv, from_dir_id = self._path2inv_file_id(from_dir)
1869
from_dir_id = inv.path2id(from_dir)
2068
1870
if from_dir_id is None:
2069
1871
# Directory not versioned
2071
def iter_entries(inv):
2072
entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive)
2073
if inv.root is not None and not include_root and from_dir is None:
2074
# skip the root for compatibility with the current apis.
2076
for path, entry in entries:
2077
if entry.kind == 'tree-reference' and recurse_nested:
2078
subtree = self._get_nested_tree(
2079
path, entry.file_id, entry.reference_revision)
2080
for subpath, status, kind, entry in subtree.list_files(
2081
include_root=True, recursive=recursive,
2082
recurse_nested=recurse_nested):
2084
full_subpath = osutils.pathjoin(path, subpath)
2087
yield full_subpath, status, kind, entry
2089
yield path, 'V', entry.kind, entry
2090
return iter_entries(inv)
1873
entries = inv.iter_entries(from_dir=from_dir_id, recursive=recursive)
1874
if inv.root is not None and not include_root and from_dir is None:
1876
for path, entry in entries:
1877
yield path, 'V', entry.kind, entry.file_id, entry
2092
1879
def lock_read(self):
2093
"""Lock the tree for a set of operations.
2095
:return: A breezy.lock.LogicalLockResult.
1880
"""Lock the tree for a set of operations."""
2097
1881
if not self._locked:
2098
1882
self._repository.lock_read()
2099
1883
if self._dirstate._lock_token is None:
2100
1884
self._dirstate.lock_read()
2101
1885
self._dirstate_locked = True
2102
1886
self._locked += 1
2103
return LogicalLockResult(self.unlock)
2105
1888
def _must_be_locked(self):
2106
1889
if not self._locked:
2107
1890
raise errors.ObjectNotLocked(self)
2109
1893
def path2id(self, path):
2110
1894
"""Return the id for path in this tree."""
2111
1895
# lookup by path: faster than splitting and walking the ivnentory.
2112
if isinstance(path, list):
2115
path = osutils.pathjoin(*path)
2116
with self.lock_read():
2117
entry = self._get_entry(path=path)
2118
if entry == (None, None):
2119
nested_tree, subpath = self.get_containing_nested_tree(path)
2120
if nested_tree is not None:
2121
return nested_tree.path2id(subpath)
1896
entry = self._get_entry(path=path)
1897
if entry == (None, None):
2125
1901
def unlock(self):
2126
1902
"""Unlock, freeing any cache memory used during the lock."""
2127
1903
# outside of a lock, the inventory is suspect: release it.
2129
1905
if not self._locked:
2130
1906
self._inventory = None
2131
1907
self._locked = 0