1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""WorkingTree4 format and implementation.
19
WorkingTree4 provides the dirstate based working tree logic.
21
To get a WorkingTree, call bzrdir.open_workingtree() or
22
WorkingTree.open(dir).
25
from cStringIO import StringIO
29
from bzrlib.lazy_import import lazy_import
30
lazy_import(globals(), """
31
from bisect import bisect_left
33
from copy import deepcopy
45
conflicts as _mod_conflicts,
63
from bzrlib.transport import get_transport
67
from bzrlib import symbol_versioning
68
from bzrlib.decorators import needs_read_lock, needs_write_lock
69
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, entry_factory
70
from bzrlib.lockable_files import LockableFiles, TransportLock
71
from bzrlib.lockdir import LockDir
72
import bzrlib.mutabletree
73
from bzrlib.mutabletree import needs_tree_write_lock
74
from bzrlib.osutils import (
86
from bzrlib.trace import mutter, note
87
from bzrlib.transport.local import LocalTransport
88
from bzrlib.tree import InterTree
89
from bzrlib.progress import DummyProgress, ProgressPhase
90
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
91
from bzrlib.rio import RioReader, rio_file, Stanza
92
from bzrlib.symbol_versioning import (deprecated_passed,
100
from bzrlib.tree import Tree
101
from bzrlib.workingtree import WorkingTree, WorkingTree3, WorkingTreeFormat3
104
class WorkingTree4(WorkingTree3):
105
"""This is the Format 4 working tree.
107
This differs from WorkingTree3 by:
108
- having a consolidated internal dirstate.
109
- not having a regular inventory attribute.
111
This is new in bzr TODO FIXME SETMEBEFORE MERGE.
114
def __init__(self, basedir,
119
"""Construct a WorkingTree for basedir.
121
If the branch is not supplied, it is opened automatically.
122
If the branch is supplied, it must be the branch for this basedir.
123
(branch.base is not cross checked, because for remote branches that
124
would be meaningless).
126
self._format = _format
127
self.bzrdir = _bzrdir
128
from bzrlib.trace import note, mutter
129
assert isinstance(basedir, basestring), \
130
"base directory %r is not a string" % basedir
131
basedir = safe_unicode(basedir)
132
mutter("opening working tree %r", basedir)
133
self._branch = branch
134
assert isinstance(self.branch, bzrlib.branch.Branch), \
135
"branch %r is not a Branch" % self.branch
136
self.basedir = realpath(basedir)
137
# if branch is at our basedir and is a format 6 or less
138
# assume all other formats have their own control files.
139
assert isinstance(_control_files, LockableFiles), \
140
"_control_files must be a LockableFiles, not %r" % _control_files
141
self._control_files = _control_files
144
# during a read or write lock these objects are set, and are
145
# None the rest of the time.
146
self._dirstate = None
147
self._inventory = None
150
@needs_tree_write_lock
151
def _add(self, files, ids, kinds):
152
"""See MutableTree._add."""
153
state = self.current_dirstate()
154
for f, file_id, kind in zip(files, ids, kinds):
159
# special case tree root handling.
160
if f == '' and self.path2id(f) == ROOT_ID:
161
state.set_path_id('', generate_ids.gen_file_id(f))
164
file_id = generate_ids.gen_file_id(f)
165
# deliberately add the file with no cached stat or sha1
166
# - on the first access it will be gathered, and we can
167
# always change this once tests are all passing.
168
state.add(f, file_id, kind, None, '')
169
self._make_dirty(reset_inventory=True)
171
def _make_dirty(self, reset_inventory):
172
"""Make the tree state dirty.
174
:param reset_inventory: True if the cached inventory should be removed
175
(presuming there is one).
178
if reset_inventory and self._inventory is not None:
179
self._inventory = None
181
def break_lock(self):
182
"""Break a lock if one is present from another instance.
184
Uses the ui factory to ask for confirmation if the lock may be from
187
This will probe the repository for its lock as well.
189
# if the dirstate is locked by an active process, reject the break lock
192
if self._dirstate is None:
196
state = self._current_dirstate()
197
if state._lock_token is not None:
198
# we already have it locked. sheese, cant break our own lock.
199
raise errors.LockActive(self.basedir)
202
# try for a write lock - need permission to get one anyhow
205
except errors.LockContention:
206
# oslocks fail when a process is still live: fail.
207
# TODO: get the locked lockdir info and give to the user to
208
# assist in debugging.
209
raise errors.LockActive(self.basedir)
214
self._dirstate = None
215
self._control_files.break_lock()
216
self.branch.break_lock()
219
def commit(self, message=None, revprops=None, *args, **kwargs):
220
# mark the tree as dirty post commit - commit
221
# can change the current versioned list by doing deletes.
222
result = WorkingTree3.commit(self, message, revprops, *args, **kwargs)
223
self._make_dirty(reset_inventory=True)
226
def current_dirstate(self):
227
"""Return the current dirstate object.
229
This is not part of the tree interface and only exposed for ease of
232
:raises errors.NotWriteLocked: when not in a lock.
234
if not self._control_files._lock_count:
235
raise errors.ObjectNotLocked(self)
236
return self._current_dirstate()
238
def _current_dirstate(self):
239
"""Internal function that does not check lock status.
241
This is needed for break_lock which also needs the dirstate.
243
if self._dirstate is not None:
244
return self._dirstate
245
local_path = self.bzrdir.get_workingtree_transport(None
246
).local_abspath('dirstate')
247
self._dirstate = dirstate.DirState.on_file(local_path)
248
return self._dirstate
250
def filter_unversioned_files(self, paths):
251
"""Filter out paths that are versioned.
253
:return: set of paths.
255
# TODO: make a generic multi-bisect routine roughly that should list
256
# the paths, then process one half at a time recursively, and feed the
257
# results of each bisect in further still
258
paths = sorted(paths)
260
state = self.current_dirstate()
261
# TODO we want a paths_to_dirblocks helper I think
263
dirname, basename = os.path.split(path.encode('utf8'))
264
_, _, _, path_is_versioned = state._get_block_entry_index(
265
dirname, basename, 0)
266
if not path_is_versioned:
271
"""Write all cached data to disk."""
272
if self._control_files._lock_mode != 'w':
273
raise errors.NotWriteLocked(self)
274
self.current_dirstate().save()
275
self._inventory = None
278
def _generate_inventory(self):
279
"""Create and set self.inventory from the dirstate object.
281
This is relatively expensive: we have to walk the entire dirstate.
282
Ideally we would not, and can deprecate this function.
284
#: uncomment to trap on inventory requests.
285
# import pdb;pdb.set_trace()
286
state = self.current_dirstate()
287
state._read_dirblocks_if_needed()
288
root_key, current_entry = self._get_entry(path='')
289
current_id = root_key[2]
290
assert current_entry[0][0] == 'd' # directory
291
inv = Inventory(root_id=current_id)
292
# Turn some things into local variables
293
minikind_to_kind = dirstate.DirState._minikind_to_kind
294
factory = entry_factory
295
utf8_decode = cache_utf8._utf8_decode
297
# we could do this straight out of the dirstate; it might be fast
298
# and should be profiled - RBC 20070216
299
parent_ies = {'' : inv.root}
300
for block in state._dirblocks[1:]: # skip the root
303
parent_ie = parent_ies[dirname]
305
# all the paths in this block are not versioned in this tree
307
for key, entry in block[1]:
308
minikind, link_or_sha1, size, executable, stat = entry[0]
309
if minikind in ('a', 'r'): # absent, relocated
310
# a parent tree only entry
313
name_unicode = utf8_decode(name)[0]
315
kind = minikind_to_kind[minikind]
316
inv_entry = factory[kind](file_id, name_unicode,
319
# not strictly needed: working tree
320
#entry.executable = executable
321
#entry.text_size = size
322
#entry.text_sha1 = sha1
324
elif kind == 'directory':
325
# add this entry to the parent map.
326
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
327
# These checks cost us around 40ms on a 55k entry tree
328
assert file_id not in inv_byid, ('file_id %s already in'
329
' inventory as %s' % (file_id, inv_byid[file_id]))
330
assert name_unicode not in parent_ie.children
331
inv_byid[file_id] = inv_entry
332
parent_ie.children[name_unicode] = inv_entry
333
self._inventory = inv
335
def _get_entry(self, file_id=None, path=None):
336
"""Get the dirstate row for file_id or path.
338
If either file_id or path is supplied, it is used as the key to lookup.
339
If both are supplied, the fastest lookup is used, and an error is
340
raised if they do not both point at the same row.
342
:param file_id: An optional unicode file_id to be looked up.
343
:param path: An optional unicode path to be looked up.
344
:return: The dirstate row tuple for path/file_id, or (None, None)
346
if file_id is None and path is None:
347
raise errors.BzrError('must supply file_id or path')
348
state = self.current_dirstate()
350
path = path.encode('utf8')
351
return state._get_entry(0, fileid_utf8=file_id, path_utf8=path)
353
def get_file_sha1(self, file_id, path=None, stat_value=None):
354
# check file id is valid unconditionally.
355
entry = self._get_entry(file_id=file_id, path=path)
356
assert entry[0] is not None, 'what error should this raise'
358
# if row stat is valid, use cached sha1, else, get a new sha1.
360
path = pathjoin(entry[0][0], entry[0][1]).decode('utf8')
362
file_abspath = self.abspath(path)
363
state = self.current_dirstate()
364
link_or_sha1 = state.update_entry(entry, file_abspath,
365
stat_value=stat_value)
366
if entry[1][0][0] == 'f':
370
def _get_inventory(self):
371
"""Get the inventory for the tree. This is only valid within a lock."""
372
if self._inventory is not None:
373
return self._inventory
374
self._generate_inventory()
375
return self._inventory
377
inventory = property(_get_inventory,
378
doc="Inventory of this Tree")
381
def get_parent_ids(self):
382
"""See Tree.get_parent_ids.
384
This implementation requests the ids list from the dirstate file.
386
return self.current_dirstate().get_parent_ids()
389
def get_root_id(self):
390
"""Return the id of this trees root"""
391
return self._get_entry(path='')[0][2]
393
def has_id(self, file_id):
394
state = self.current_dirstate()
395
file_id = osutils.safe_file_id(file_id)
396
row, parents = self._get_entry(file_id=file_id)
399
return osutils.lexists(pathjoin(
400
self.basedir, row[0].decode('utf8'), row[1].decode('utf8')))
403
def id2path(self, file_id):
404
file_id = osutils.safe_file_id(file_id)
405
state = self.current_dirstate()
406
entry = self._get_entry(file_id=file_id)
407
if entry == (None, None):
409
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
410
return path_utf8.decode('utf8')
414
"""Iterate through file_ids for this tree.
416
file_ids are in a WorkingTree if they are in the working inventory
417
and the working file exists.
420
for key, tree_details in self.current_dirstate()._iter_entries():
421
if tree_details[0][0] in ('a', 'r'): # absent, relocated
422
# not relevant to the working tree
424
path = pathjoin(self.basedir, key[0].decode('utf8'), key[1].decode('utf8'))
425
if osutils.lexists(path):
426
result.append(key[2])
430
def _last_revision(self):
431
"""See Mutable.last_revision."""
432
parent_ids = self.current_dirstate().get_parent_ids()
439
"""See Branch.lock_read, and WorkingTree.unlock."""
440
self.branch.lock_read()
442
self._control_files.lock_read()
444
state = self.current_dirstate()
445
if not state._lock_token:
448
self._control_files.unlock()
454
def _lock_self_write(self):
455
"""This should be called after the branch is locked."""
457
self._control_files.lock_write()
459
state = self.current_dirstate()
460
if not state._lock_token:
463
self._control_files.unlock()
469
def lock_tree_write(self):
470
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
471
self.branch.lock_read()
472
self._lock_self_write()
474
def lock_write(self):
475
"""See MutableTree.lock_write, and WorkingTree.unlock."""
476
self.branch.lock_write()
477
self._lock_self_write()
479
@needs_tree_write_lock
480
def move(self, from_paths, to_dir, after=False):
481
"""See WorkingTree.move()."""
486
state = self.current_dirstate()
488
assert not isinstance(from_paths, basestring)
489
to_dir_utf8 = to_dir.encode('utf8')
490
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
491
id_index = state._get_id_index()
492
# check destination directory
493
# get the details for it
494
to_entry_block_index, to_entry_entry_index, dir_present, entry_present = \
495
state._get_block_entry_index(to_entry_dirname, to_basename, 0)
496
if not entry_present:
497
raise errors.BzrMoveFailedError('', to_dir,
498
errors.NotVersionedError(to_dir))
499
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
500
# get a handle on the block itself.
501
to_block_index = state._ensure_block(
502
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
503
to_block = state._dirblocks[to_block_index]
504
to_abs = self.abspath(to_dir)
505
if not isdir(to_abs):
506
raise errors.BzrMoveFailedError('',to_dir,
507
errors.NotADirectory(to_abs))
509
if to_entry[1][0][0] != 'd':
510
raise errors.BzrMoveFailedError('',to_dir,
511
errors.NotADirectory(to_abs))
513
if self._inventory is not None:
514
update_inventory = True
516
to_dir_ie = inv[to_dir_id]
517
to_dir_id = to_entry[0][2]
519
update_inventory = False
522
def move_one(old_entry, from_path_utf8, minikind, executable,
523
fingerprint, packed_stat, size,
524
to_block, to_key, to_path_utf8):
525
state._make_absent(old_entry)
526
from_key = old_entry[0]
528
lambda:state.update_minimal(from_key,
530
executable=executable,
531
fingerprint=fingerprint,
532
packed_stat=packed_stat,
534
path_utf8=from_path_utf8))
535
state.update_minimal(to_key,
537
executable=executable,
538
fingerprint=fingerprint,
539
packed_stat=packed_stat,
541
path_utf8=to_path_utf8)
542
added_entry_index, _ = state._find_entry_index(to_key, to_block[1])
543
new_entry = to_block[1][added_entry_index]
544
rollbacks.append(lambda:state._make_absent(new_entry))
546
# create rename entries and tuples
547
for from_rel in from_paths:
548
# from_rel is 'pathinroot/foo/bar'
549
from_rel_utf8 = from_rel.encode('utf8')
550
from_dirname, from_tail = osutils.split(from_rel)
551
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
552
from_entry = self._get_entry(path=from_rel)
553
if from_entry == (None, None):
554
raise errors.BzrMoveFailedError(from_rel,to_dir,
555
errors.NotVersionedError(path=str(from_rel)))
557
from_id = from_entry[0][2]
558
to_rel = pathjoin(to_dir, from_tail)
559
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
560
item_to_entry = self._get_entry(path=to_rel)
561
if item_to_entry != (None, None):
562
raise errors.BzrMoveFailedError(from_rel, to_rel,
563
"Target is already versioned.")
565
if from_rel == to_rel:
566
raise errors.BzrMoveFailedError(from_rel, to_rel,
567
"Source and target are identical.")
569
from_missing = not self.has_filename(from_rel)
570
to_missing = not self.has_filename(to_rel)
577
raise errors.BzrMoveFailedError(from_rel, to_rel,
578
errors.NoSuchFile(path=to_rel,
579
extra="New file has not been created yet"))
581
# neither path exists
582
raise errors.BzrRenameFailedError(from_rel, to_rel,
583
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
585
if from_missing: # implicitly just update our path mapping
588
raise errors.RenameFailedFilesExist(from_rel, to_rel,
589
extra="(Use --after to update the Bazaar id)")
592
def rollback_rename():
593
"""A single rename has failed, roll it back."""
595
for rollback in reversed(rollbacks):
599
import pdb;pdb.set_trace()
600
exc_info = sys.exc_info()
602
raise exc_info[0], exc_info[1], exc_info[2]
604
# perform the disk move first - its the most likely failure point.
606
from_rel_abs = self.abspath(from_rel)
607
to_rel_abs = self.abspath(to_rel)
609
osutils.rename(from_rel_abs, to_rel_abs)
611
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
612
rollbacks.append(lambda: osutils.rename(to_rel_abs, from_rel_abs))
614
# perform the rename in the inventory next if needed: its easy
618
from_entry = inv[from_id]
619
current_parent = from_entry.parent_id
620
inv.rename(from_id, to_dir_id, from_tail)
622
lambda: inv.rename(from_id, current_parent, from_tail))
623
# finally do the rename in the dirstate, which is a little
624
# tricky to rollback, but least likely to need it.
625
old_block_index, old_entry_index, dir_present, file_present = \
626
state._get_block_entry_index(from_dirname, from_tail_utf8, 0)
627
old_block = state._dirblocks[old_block_index][1]
628
old_entry = old_block[old_entry_index]
629
from_key, old_entry_details = old_entry
630
cur_details = old_entry_details[0]
632
to_key = ((to_block[0],) + from_key[1:3])
633
minikind = cur_details[0]
634
move_one(old_entry, from_path_utf8=from_rel_utf8,
636
executable=cur_details[3],
637
fingerprint=cur_details[1],
638
packed_stat=cur_details[4],
642
to_path_utf8=to_rel_utf8)
645
def update_dirblock(from_dir, to_key, to_dir_utf8):
646
"""all entries in this block need updating.
648
TODO: This is pretty ugly, and doesn't support
649
reverting, but it works.
651
assert from_dir != '', "renaming root not supported"
652
from_key = (from_dir, '')
653
from_block_idx, present = \
654
state._find_block_index_from_key(from_key)
656
# This is the old record, if it isn't present, then
657
# there is theoretically nothing to update.
658
# (Unless it isn't present because of lazy loading,
659
# but we don't do that yet)
661
from_block = state._dirblocks[from_block_idx]
662
to_block_index, to_entry_index, _, _ = \
663
state._get_block_entry_index(to_key[0], to_key[1], 0)
664
to_block_index = state._ensure_block(
665
to_block_index, to_entry_index, to_dir_utf8)
666
to_block = state._dirblocks[to_block_index]
667
for entry in from_block[1]:
668
assert entry[0][0] == from_dir
669
cur_details = entry[1][0]
670
to_key = (to_dir_utf8, entry[0][1], entry[0][2])
671
from_path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
672
to_path_utf8 = osutils.pathjoin(to_dir_utf8, entry[0][1])
673
minikind = cur_details[0]
674
move_one(entry, from_path_utf8=from_path_utf8,
676
executable=cur_details[3],
677
fingerprint=cur_details[1],
678
packed_stat=cur_details[4],
682
to_path_utf8=to_rel_utf8)
684
# We need to move all the children of this
686
update_dirblock(from_path_utf8, to_key,
688
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
692
result.append((from_rel, to_rel))
693
state._dirblock_state = dirstate.DirState.IN_MEMORY_MODIFIED
694
self._make_dirty(reset_inventory=False)
699
"""Initialize the state in this tree to be a new tree."""
703
def path2id(self, path):
704
"""Return the id for path in this tree."""
705
path = path.strip('/')
706
entry = self._get_entry(path=path)
707
if entry == (None, None):
711
def paths2ids(self, paths, trees=[], require_versioned=True):
712
"""See Tree.paths2ids().
714
This specialisation fast-paths the case where all the trees are in the
719
parents = self.get_parent_ids()
721
if not (isinstance(tree, DirStateRevisionTree) and tree._revision_id in
723
return super(WorkingTree4, self).paths2ids(paths, trees, require_versioned)
724
search_indexes = [0] + [1 + parents.index(tree._revision_id) for tree in trees]
725
# -- make all paths utf8 --
728
paths_utf8.add(path.encode('utf8'))
730
# -- paths is now a utf8 path set --
731
# -- get the state object and prepare it.
732
state = self.current_dirstate()
733
if False and (state._dirblock_state == dirstate.DirState.NOT_IN_MEMORY
734
and '' not in paths):
735
paths2ids = self._paths2ids_using_bisect
737
paths2ids = self._paths2ids_in_memory
738
return paths2ids(paths, search_indexes,
739
require_versioned=require_versioned)
741
def _paths2ids_in_memory(self, paths, search_indexes,
742
require_versioned=True):
743
state = self.current_dirstate()
744
state._read_dirblocks_if_needed()
745
def _entries_for_path(path):
746
"""Return a list with all the entries that match path for all ids.
748
dirname, basename = os.path.split(path)
749
key = (dirname, basename, '')
750
block_index, present = state._find_block_index_from_key(key)
752
# the block which should contain path is absent.
755
block = state._dirblocks[block_index][1]
756
entry_index, _ = state._find_entry_index(key, block)
757
# we may need to look at multiple entries at this path: walk while the paths match.
758
while (entry_index < len(block) and
759
block[entry_index][0][0:2] == key[0:2]):
760
result.append(block[entry_index])
763
if require_versioned:
764
# -- check all supplied paths are versioned in a search tree. --
767
path_entries = _entries_for_path(path)
769
# this specified path is not present at all: error
770
all_versioned = False
772
found_versioned = False
773
# for each id at this path
774
for entry in path_entries:
776
for index in search_indexes:
777
if entry[1][index][0] != 'a': # absent
778
found_versioned = True
779
# all good: found a versioned cell
781
if not found_versioned:
782
# none of the indexes was not 'absent' at all ids for this
784
all_versioned = False
786
if not all_versioned:
787
raise errors.PathsNotVersionedError(paths)
788
# -- remove redundancy in supplied paths to prevent over-scanning --
791
other_paths = paths.difference(set([path]))
792
if not osutils.is_inside_any(other_paths, path):
793
# this is a top level path, we must check it.
794
search_paths.add(path)
796
# for all search_indexs in each path at or under each element of
797
# search_paths, if the detail is relocated: add the id, and add the
798
# relocated path as one to search if its not searched already. If the
799
# detail is not relocated, add the id.
800
searched_paths = set()
802
def _process_entry(entry):
803
"""Look at search_indexes within entry.
805
If a specific tree's details are relocated, add the relocation
806
target to search_paths if not searched already. If it is absent, do
807
nothing. Otherwise add the id to found_ids.
809
for index in search_indexes:
810
if entry[1][index][0] == 'r': # relocated
811
if not osutils.is_inside_any(searched_paths, entry[1][index][1]):
812
search_paths.add(entry[1][index][1])
813
elif entry[1][index][0] != 'a': # absent
814
found_ids.add(entry[0][2])
816
current_root = search_paths.pop()
817
searched_paths.add(current_root)
818
# process the entries for this containing directory: the rest will be
819
# found by their parents recursively.
820
root_entries = _entries_for_path(current_root)
822
# this specified path is not present at all, skip it.
824
for entry in root_entries:
825
_process_entry(entry)
826
initial_key = (current_root, '', '')
827
block_index, _ = state._find_block_index_from_key(initial_key)
828
while (block_index < len(state._dirblocks) and
829
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
830
for entry in state._dirblocks[block_index][1]:
831
_process_entry(entry)
835
def _paths2ids_using_bisect(self, paths, search_indexes,
836
require_versioned=True):
837
state = self.current_dirstate()
840
split_paths = sorted(osutils.split(p) for p in paths)
841
found = state._bisect_recursive(split_paths)
843
if require_versioned:
844
found_dir_names = set(dir_name_id[:2] for dir_name_id in found)
845
for dir_name in split_paths:
846
if dir_name not in found_dir_names:
847
raise errors.PathsNotVersionedError(paths)
849
for dir_name_id, trees_info in found.iteritems():
850
for index in search_indexes:
851
if trees_info[index][0] not in ('r', 'a'):
852
found_ids.add(dir_name_id[2])
855
def read_working_inventory(self):
856
"""Read the working inventory.
858
This is a meaningless operation for dirstate, but we obey it anyhow.
860
return self.inventory
863
def revision_tree(self, revision_id):
864
"""See Tree.revision_tree.
866
WorkingTree4 supplies revision_trees for any basis tree.
868
revision_id = osutils.safe_revision_id(revision_id)
869
dirstate = self.current_dirstate()
870
parent_ids = dirstate.get_parent_ids()
871
if revision_id not in parent_ids:
872
raise errors.NoSuchRevisionInTree(self, revision_id)
873
if revision_id in dirstate.get_ghosts():
874
raise errors.NoSuchRevisionInTree(self, revision_id)
875
return DirStateRevisionTree(dirstate, revision_id,
876
self.branch.repository)
878
@needs_tree_write_lock
879
def set_last_revision(self, new_revision):
880
"""Change the last revision in the working tree."""
881
new_revision = osutils.safe_revision_id(new_revision)
882
parents = self.get_parent_ids()
883
if new_revision in (NULL_REVISION, None):
884
assert len(parents) < 2, (
885
"setting the last parent to none with a pending merge is "
887
self.set_parent_ids([])
889
self.set_parent_ids([new_revision] + parents[1:],
890
allow_leftmost_as_ghost=True)
892
@needs_tree_write_lock
893
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
894
"""Set the parent ids to revision_ids.
896
See also set_parent_trees. This api will try to retrieve the tree data
897
for each element of revision_ids from the trees repository. If you have
898
tree data already available, it is more efficient to use
899
set_parent_trees rather than set_parent_ids. set_parent_ids is however
900
an easier API to use.
902
:param revision_ids: The revision_ids to set as the parent ids of this
903
working tree. Any of these may be ghosts.
905
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
907
for revision_id in revision_ids:
909
revtree = self.branch.repository.revision_tree(revision_id)
910
# TODO: jam 20070213 KnitVersionedFile raises
911
# RevisionNotPresent rather than NoSuchRevision if a
912
# given revision_id is not present. Should Repository be
913
# catching it and re-raising NoSuchRevision?
914
except (errors.NoSuchRevision, errors.RevisionNotPresent):
916
trees.append((revision_id, revtree))
917
self.set_parent_trees(trees,
918
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
920
@needs_tree_write_lock
921
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
922
"""Set the parents of the working tree.
924
:param parents_list: A list of (revision_id, tree) tuples.
925
If tree is None, then that element is treated as an unreachable
926
parent tree - i.e. a ghost.
928
dirstate = self.current_dirstate()
929
if len(parents_list) > 0:
930
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
931
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
934
# convert absent trees to the null tree, which we convert back to
936
for rev_id, tree in parents_list:
937
rev_id = osutils.safe_revision_id(rev_id)
939
real_trees.append((rev_id, tree))
941
real_trees.append((rev_id,
942
self.branch.repository.revision_tree(None)))
943
ghosts.append(rev_id)
944
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
945
self._make_dirty(reset_inventory=False)
947
def _set_root_id(self, file_id):
948
"""See WorkingTree.set_root_id."""
949
state = self.current_dirstate()
950
state.set_path_id('', file_id)
951
if state._dirblock_state == dirstate.DirState.IN_MEMORY_MODIFIED:
952
self._make_dirty(reset_inventory=True)
955
"""Unlock in format 4 trees needs to write the entire dirstate."""
956
if self._control_files._lock_count == 1:
957
# eventually we should do signature checking during read locks for
959
if self._control_files._lock_mode == 'w':
962
if self._dirstate is not None:
963
# This is a no-op if there are no modifications.
964
self._dirstate.save()
965
self._dirstate.unlock()
966
# TODO: jam 20070301 We shouldn't have to wipe the dirstate at this
967
# point. Instead, it could check if the header has been
968
# modified when it is locked, and if not, it can hang on to
969
# the data it has in memory.
970
self._dirstate = None
971
self._inventory = None
972
# reverse order of locking.
974
return self._control_files.unlock()
978
@needs_tree_write_lock
979
def unversion(self, file_ids):
980
"""Remove the file ids in file_ids from the current versioned set.
982
When a file_id is unversioned, all of its children are automatically
985
:param file_ids: The file ids to stop versioning.
986
:raises: NoSuchId if any fileid is not currently versioned.
990
state = self.current_dirstate()
991
state._read_dirblocks_if_needed()
992
ids_to_unversion = set()
993
for file_id in file_ids:
994
ids_to_unversion.add(osutils.safe_file_id(file_id))
995
paths_to_unversion = set()
997
# check if the root is to be unversioned, if so, assert for now.
998
# walk the state marking unversioned things as absent.
999
# if there are any un-unversioned ids at the end, raise
1000
for key, details in state._dirblocks[0][1]:
1001
if (details[0][0] not in ('a', 'r') and # absent or relocated
1002
key[2] in ids_to_unversion):
1003
# I haven't written the code to unversion / yet - it should be
1005
raise errors.BzrError('Unversioning the / is not currently supported')
1007
while block_index < len(state._dirblocks):
1008
# process one directory at a time.
1009
block = state._dirblocks[block_index]
1010
# first check: is the path one to remove - it or its children
1011
delete_block = False
1012
for path in paths_to_unversion:
1013
if (block[0].startswith(path) and
1014
(len(block[0]) == len(path) or
1015
block[0][len(path)] == '/')):
1016
# this entire block should be deleted - its the block for a
1017
# path to unversion; or the child of one
1020
# TODO: trim paths_to_unversion as we pass by paths
1022
# this block is to be deleted: process it.
1023
# TODO: we can special case the no-parents case and
1024
# just forget the whole block.
1026
while entry_index < len(block[1]):
1027
# Mark this file id as having been removed
1028
ids_to_unversion.discard(block[1][entry_index][0][2])
1029
if not state._make_absent(block[1][entry_index]):
1031
# go to the next block. (At the moment we dont delete empty
1036
while entry_index < len(block[1]):
1037
entry = block[1][entry_index]
1038
if (entry[1][0][0] in ('a', 'r') or # absent, relocated
1039
# ^ some parent row.
1040
entry[0][2] not in ids_to_unversion):
1041
# ^ not an id to unversion
1044
if entry[1][0][0] == 'd':
1045
paths_to_unversion.add(pathjoin(entry[0][0], entry[0][1]))
1046
if not state._make_absent(entry):
1048
# we have unversioned this id
1049
ids_to_unversion.remove(entry[0][2])
1051
if ids_to_unversion:
1052
raise errors.NoSuchId(self, iter(ids_to_unversion).next())
1053
self._make_dirty(reset_inventory=False)
1054
# have to change the legacy inventory too.
1055
if self._inventory is not None:
1056
for file_id in file_ids:
1057
self._inventory.remove_recursive_id(file_id)
1059
@needs_tree_write_lock
1060
def _write_inventory(self, inv):
1061
"""Write inventory as the current inventory."""
1062
assert not self._dirty, "attempting to write an inventory when the dirstate is dirty will cause data loss"
1063
self.current_dirstate().set_state_from_inventory(inv)
1064
self._make_dirty(reset_inventory=False)
1065
if self._inventory is not None:
1066
self._inventory = inv
1071
class WorkingTreeFormat4(WorkingTreeFormat3):
1072
"""The first consolidated dirstate working tree format.
1075
- exists within a metadir controlling .bzr
1076
- includes an explicit version marker for the workingtree control
1077
files, separate from the BzrDir format
1078
- modifies the hash cache format
1079
- is new in bzr TODO FIXME SETBEFOREMERGE
1080
- uses a LockDir to guard access to it.
1083
def get_format_string(self):
1084
"""See WorkingTreeFormat.get_format_string()."""
1085
return "Bazaar Working Tree format 4\n"
1087
def get_format_description(self):
1088
"""See WorkingTreeFormat.get_format_description()."""
1089
return "Working tree format 4"
1091
def initialize(self, a_bzrdir, revision_id=None):
1092
"""See WorkingTreeFormat.initialize().
1094
revision_id allows creating a working tree at a different
1095
revision than the branch is at.
1097
revision_id = osutils.safe_revision_id(revision_id)
1098
if not isinstance(a_bzrdir.transport, LocalTransport):
1099
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1100
transport = a_bzrdir.get_workingtree_transport(self)
1101
control_files = self._open_control_files(a_bzrdir)
1102
control_files.create_lock()
1103
control_files.lock_write()
1104
control_files.put_utf8('format', self.get_format_string())
1105
branch = a_bzrdir.open_branch()
1106
if revision_id is None:
1107
revision_id = branch.last_revision()
1108
local_path = transport.local_abspath('dirstate')
1109
state = dirstate.DirState.initialize(local_path)
1111
wt = WorkingTree4(a_bzrdir.root_transport.local_abspath('.'),
1115
_control_files=control_files)
1117
wt.lock_tree_write()
1119
wt.set_last_revision(revision_id)
1121
basis = wt.basis_tree()
1123
transform.build_tree(basis, wt)
1126
control_files.unlock()
1131
def _open(self, a_bzrdir, control_files):
1132
"""Open the tree itself.
1134
:param a_bzrdir: the dir for the tree.
1135
:param control_files: the control files for the tree.
1137
return WorkingTree4(a_bzrdir.root_transport.local_abspath('.'),
1138
branch=a_bzrdir.open_branch(),
1141
_control_files=control_files)
1144
class DirStateRevisionTree(Tree):
1145
"""A revision tree pulling the inventory from a dirstate."""
1147
def __init__(self, dirstate, revision_id, repository):
1148
self._dirstate = dirstate
1149
self._revision_id = osutils.safe_revision_id(revision_id)
1150
self._repository = repository
1151
self._inventory = None
1153
self._dirstate_locked = False
1155
def annotate_iter(self, file_id):
1156
"""See Tree.annotate_iter"""
1157
w = self._repository.weave_store.get_weave(file_id,
1158
self._repository.get_transaction())
1159
return w.annotate_iter(self.inventory[file_id].revision)
1161
def _comparison_data(self, entry, path):
1162
"""See Tree._comparison_data."""
1164
return None, False, None
1165
# trust the entry as RevisionTree does, but this may not be
1166
# sensible: the entry might not have come from us?
1167
return entry.kind, entry.executable, None
1169
def _file_size(self, entry, stat_value):
1170
return entry.text_size
1172
def filter_unversioned_files(self, paths):
1173
"""Filter out paths that are not versioned.
1175
:return: set of paths.
1177
pred = self.has_filename
1178
return set((p for p in paths if not pred(p)))
1180
def _get_parent_index(self):
1181
"""Return the index in the dirstate referenced by this tree."""
1182
return self._dirstate.get_parent_ids().index(self._revision_id) + 1
1184
def _get_entry(self, file_id=None, path=None):
1185
"""Get the dirstate row for file_id or path.
1187
If either file_id or path is supplied, it is used as the key to lookup.
1188
If both are supplied, the fastest lookup is used, and an error is
1189
raised if they do not both point at the same row.
1191
:param file_id: An optional unicode file_id to be looked up.
1192
:param path: An optional unicode path to be looked up.
1193
:return: The dirstate row tuple for path/file_id, or (None, None)
1195
if file_id is None and path is None:
1196
raise errors.BzrError('must supply file_id or path')
1197
file_id = osutils.safe_file_id(file_id)
1198
if path is not None:
1199
path = path.encode('utf8')
1200
parent_index = self._get_parent_index()
1201
return self._dirstate._get_entry(parent_index, fileid_utf8=file_id, path_utf8=path)
1203
def _generate_inventory(self):
1204
"""Create and set self.inventory from the dirstate object.
1206
This is relatively expensive: we have to walk the entire dirstate.
1207
Ideally we would not, and instead would """
1208
assert self._locked, 'cannot generate inventory of an unlocked '\
1209
'dirstate revision tree'
1210
# separate call for profiling - makes it clear where the costs are.
1211
self._dirstate._read_dirblocks_if_needed()
1212
assert self._revision_id in self._dirstate.get_parent_ids(), \
1213
'parent %s has disappeared from %s' % (
1214
self._revision_id, self._dirstate.get_parent_ids())
1215
parent_index = self._dirstate.get_parent_ids().index(self._revision_id) + 1
1216
# This is identical now to the WorkingTree _generate_inventory except
1217
# for the tree index use.
1218
root_key, current_entry = self._dirstate._get_entry(parent_index, path_utf8='')
1219
current_id = root_key[2]
1220
assert current_entry[parent_index][0] == 'd'
1221
inv = Inventory(root_id=current_id, revision_id=self._revision_id)
1222
inv.root.revision = current_entry[parent_index][4]
1223
# Turn some things into local variables
1224
minikind_to_kind = dirstate.DirState._minikind_to_kind
1225
factory = entry_factory
1226
utf8_decode = cache_utf8._utf8_decode
1227
inv_byid = inv._byid
1228
# we could do this straight out of the dirstate; it might be fast
1229
# and should be profiled - RBC 20070216
1230
parent_ies = {'' : inv.root}
1231
for block in self._dirstate._dirblocks[1:]: #skip root
1234
parent_ie = parent_ies[dirname]
1236
# all the paths in this block are not versioned in this tree
1238
for key, entry in block[1]:
1239
minikind, link_or_sha1, size, executable, revid = entry[parent_index]
1240
if minikind in ('a', 'r'): # absent, relocated
1244
name_unicode = utf8_decode(name)[0]
1246
kind = minikind_to_kind[minikind]
1247
inv_entry = factory[kind](file_id, name_unicode,
1249
inv_entry.revision = revid
1251
inv_entry.executable = executable
1252
inv_entry.text_size = size
1253
inv_entry.text_sha1 = link_or_sha1
1254
elif kind == 'directory':
1255
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
1256
elif kind == 'symlink':
1257
inv_entry.executable = False
1258
inv_entry.text_size = size
1259
inv_entry.symlink_target = utf8_decode(link_or_sha1)[0]
1261
raise Exception, kind
1262
# These checks cost us around 40ms on a 55k entry tree
1263
assert file_id not in inv_byid
1264
assert name_unicode not in parent_ie.children
1265
inv_byid[file_id] = inv_entry
1266
parent_ie.children[name_unicode] = inv_entry
1267
self._inventory = inv
1269
def get_file_mtime(self, file_id, path=None):
1270
"""Return the modification time for this record.
1272
We return the timestamp of the last-changed revision.
1274
# Make sure the file exists
1275
entry = self._get_entry(file_id, path=path)
1276
if entry == (None, None): # do we raise?
1278
parent_index = self._get_parent_index()
1279
last_changed_revision = entry[1][parent_index][4]
1280
return self._repository.get_revision(last_changed_revision).timestamp
1282
def get_file_sha1(self, file_id, path=None, stat_value=None):
1283
entry = self._get_entry(file_id=file_id, path=path)
1284
parent_index = self._get_parent_index()
1285
parent_details = entry[1][parent_index]
1286
if parent_details[0] == 'f':
1287
return parent_details[1]
1290
def get_file(self, file_id):
1291
return StringIO(self.get_file_text(file_id))
1293
def get_file_lines(self, file_id):
1294
ie = self.inventory[file_id]
1295
return self._repository.weave_store.get_weave(file_id,
1296
self._repository.get_transaction()).get_lines(ie.revision)
1298
def get_file_size(self, file_id):
1299
return self.inventory[file_id].text_size
1301
def get_file_text(self, file_id):
1302
return ''.join(self.get_file_lines(file_id))
1304
def get_symlink_target(self, file_id):
1305
entry = self._get_entry(file_id=file_id)
1306
parent_index = self._get_parent_index()
1307
if entry[1][parent_index][0] != 'l':
1310
# At present, none of the tree implementations supports non-ascii
1311
# symlink targets. So we will just assume that the dirstate path is
1313
return entry[1][parent_index][1]
1315
def get_revision_id(self):
1316
"""Return the revision id for this tree."""
1317
return self._revision_id
1319
def _get_inventory(self):
1320
if self._inventory is not None:
1321
return self._inventory
1322
self._generate_inventory()
1323
return self._inventory
1325
inventory = property(_get_inventory,
1326
doc="Inventory of this Tree")
1328
def get_parent_ids(self):
1329
"""The parents of a tree in the dirstate are not cached."""
1330
return self._repository.get_revision(self._revision_id).parent_ids
1332
def has_filename(self, filename):
1333
return bool(self.path2id(filename))
1335
def kind(self, file_id):
1336
return self.inventory[file_id].kind
1338
def is_executable(self, file_id, path=None):
1339
ie = self.inventory[file_id]
1340
if ie.kind != "file":
1342
return ie.executable
1344
def list_files(self, include_root=False):
1345
# We use a standard implementation, because DirStateRevisionTree is
1346
# dealing with one of the parents of the current state
1347
inv = self._get_inventory()
1348
entries = inv.iter_entries()
1349
if self.inventory.root is not None and not include_root:
1351
for path, entry in entries:
1352
yield path, 'V', entry.kind, entry.file_id, entry
1354
def lock_read(self):
1355
"""Lock the tree for a set of operations."""
1356
if not self._locked:
1357
self._repository.lock_read()
1358
if self._dirstate._lock_token is None:
1359
self._dirstate.lock_read()
1360
self._dirstate_locked = True
1364
def path2id(self, path):
1365
"""Return the id for path in this tree."""
1366
# lookup by path: faster than splitting and walking the ivnentory.
1367
entry = self._get_entry(path=path)
1368
if entry == (None, None):
1373
"""Unlock, freeing any cache memory used during the lock."""
1374
# outside of a lock, the inventory is suspect: release it.
1376
if not self._locked:
1377
self._inventory = None
1379
if self._dirstate_locked:
1380
self._dirstate.unlock()
1381
self._dirstate_locked = False
1382
self._repository.unlock()
1384
def walkdirs(self, prefix=""):
1385
# TODO: jam 20070215 This is the cheap way by cheating and using the
1386
# RevisionTree implementation.
1387
# This should be cleaned up to use the much faster Dirstate code
1388
# This is a little tricky, though, because the dirstate is
1389
# indexed by current path, not by parent path.
1390
# So for now, we just build up the parent inventory, and extract
1391
# it the same way RevisionTree does.
1392
_directory = 'directory'
1393
inv = self._get_inventory()
1394
top_id = inv.path2id(prefix)
1398
pending = [(prefix, top_id)]
1401
relpath, file_id = pending.pop()
1402
# 0 - relpath, 1- file-id
1404
relroot = relpath + '/'
1407
# FIXME: stash the node in pending
1408
entry = inv[file_id]
1409
for name, child in entry.sorted_children():
1410
toppath = relroot + name
1411
dirblock.append((toppath, name, child.kind, None,
1412
child.file_id, child.kind
1414
yield (relpath, entry.file_id), dirblock
1415
# push the user specified dirs from dirblock
1416
for dir in reversed(dirblock):
1417
if dir[2] == _directory:
1418
pending.append((dir[0], dir[4]))
1421
class InterDirStateTree(InterTree):
1422
"""Fast path optimiser for changes_from with dirstate trees."""
1424
def __init__(self, source, target):
1425
super(InterDirStateTree, self).__init__(source, target)
1426
if not InterDirStateTree.is_compatible(source, target):
1427
raise Exception, "invalid source %r and target %r" % (source, target)
1430
def make_source_parent_tree(source, target):
1431
"""Change the source tree into a parent of the target."""
1432
revid = source.commit('record tree')
1433
target.branch.repository.fetch(source.branch.repository, revid)
1434
target.set_parent_ids([revid])
1435
return target.basis_tree(), target
1437
_matching_from_tree_format = WorkingTreeFormat4()
1438
_matching_to_tree_format = WorkingTreeFormat4()
1439
_test_mutable_trees_to_test_trees = make_source_parent_tree
1441
def _iter_changes(self, include_unchanged=False,
1442
specific_files=None, pb=None, extra_trees=[],
1443
require_versioned=True):
1444
"""Return the changes from source to target.
1446
:return: An iterator that yields tuples. See InterTree._iter_changes
1448
:param specific_files: An optional list of file paths to restrict the
1449
comparison to. When mapping filenames to ids, all matches in all
1450
trees (including optional extra_trees) are used, and all children of
1451
matched directories are included.
1452
:param include_unchanged: An optional boolean requesting the inclusion of
1453
unchanged entries in the result.
1454
:param extra_trees: An optional list of additional trees to use when
1455
mapping the contents of specific_files (paths) to file_ids.
1456
:param require_versioned: If True, all files in specific_files must be
1457
versioned in one of source, target, extra_trees or
1458
PathsNotVersionedError is raised.
1460
utf8_decode = cache_utf8._utf8_decode
1461
_minikind_to_kind = dirstate.DirState._minikind_to_kind
1462
# NB: show_status depends on being able to pass in non-versioned files
1463
# and report them as unknown
1464
# TODO: handle extra trees in the dirstate.
1466
for f in super(InterDirStateTree, self)._iter_changes(
1467
include_unchanged, specific_files, pb, extra_trees,
1471
parent_ids = self.target.get_parent_ids()
1473
if self.source._revision_id == NULL_REVISION:
1475
indices = (target_index,)
1477
assert (self.source._revision_id in parent_ids), \
1478
"Failure: source._revision_id: %s not in target.parent_ids(%s)" % (
1479
self.source._revision_id, parent_ids)
1480
source_index = 1 + parent_ids.index(self.source._revision_id)
1481
indices = (source_index,target_index)
1482
# -- make all specific_files utf8 --
1484
specific_files_utf8 = set()
1485
for path in specific_files:
1486
specific_files_utf8.add(path.encode('utf8'))
1487
specific_files = specific_files_utf8
1489
specific_files = set([''])
1490
# -- specific_files is now a utf8 path set --
1491
# -- get the state object and prepare it.
1492
state = self.target.current_dirstate()
1493
state._read_dirblocks_if_needed()
1494
def _entries_for_path(path):
1495
"""Return a list with all the entries that match path for all ids.
1497
dirname, basename = os.path.split(path)
1498
key = (dirname, basename, '')
1499
block_index, present = state._find_block_index_from_key(key)
1501
# the block which should contain path is absent.
1504
block = state._dirblocks[block_index][1]
1505
entry_index, _ = state._find_entry_index(key, block)
1506
# we may need to look at multiple entries at this path: walk while the specific_files match.
1507
while (entry_index < len(block) and
1508
block[entry_index][0][0:2] == key[0:2]):
1509
result.append(block[entry_index])
1512
if require_versioned:
1513
# -- check all supplied paths are versioned in a search tree. --
1514
all_versioned = True
1515
for path in specific_files:
1516
path_entries = _entries_for_path(path)
1517
if not path_entries:
1518
# this specified path is not present at all: error
1519
all_versioned = False
1521
found_versioned = False
1522
# for each id at this path
1523
for entry in path_entries:
1525
for index in indices:
1526
if entry[1][index][0] != 'a': # absent
1527
found_versioned = True
1528
# all good: found a versioned cell
1530
if not found_versioned:
1531
# none of the indexes was not 'absent' at all ids for this
1533
all_versioned = False
1535
if not all_versioned:
1536
raise errors.PathsNotVersionedError(specific_files)
1537
# -- remove redundancy in supplied specific_files to prevent over-scanning --
1538
search_specific_files = set()
1539
for path in specific_files:
1540
other_specific_files = specific_files.difference(set([path]))
1541
if not osutils.is_inside_any(other_specific_files, path):
1542
# this is a top level path, we must check it.
1543
search_specific_files.add(path)
1545
# compare source_index and target_index at or under each element of search_specific_files.
1546
# follow the following comparison table. Note that we only want to do diff operations when
1547
# the target is fdl because thats when the walkdirs logic will have exposed the pathinfo
1551
# Source | Target | disk | action
1552
# r | fdl | | add source to search, add id path move and perform
1553
# | | | diff check on source-target
1554
# r | fdl | a | dangling file that was present in the basis.
1556
# r | a | | add source to search
1558
# r | r | | this path is present in a non-examined tree, skip.
1559
# r | r | a | this path is present in a non-examined tree, skip.
1560
# a | fdl | | add new id
1561
# a | fdl | a | dangling locally added file, skip
1562
# a | a | | not present in either tree, skip
1563
# a | a | a | not present in any tree, skip
1564
# a | r | | not present in either tree at this path, skip as it
1565
# | | | may not be selected by the users list of paths.
1566
# a | r | a | not present in either tree at this path, skip as it
1567
# | | | may not be selected by the users list of paths.
1568
# fdl | fdl | | content in both: diff them
1569
# fdl | fdl | a | deleted locally, but not unversioned - show as deleted ?
1570
# fdl | a | | unversioned: output deleted id for now
1571
# fdl | a | a | unversioned and deleted: output deleted id
1572
# fdl | r | | relocated in this tree, so add target to search.
1573
# | | | Dont diff, we will see an r,fd; pair when we reach
1574
# | | | this id at the other path.
1575
# fdl | r | a | relocated in this tree, so add target to search.
1576
# | | | Dont diff, we will see an r,fd; pair when we reach
1577
# | | | this id at the other path.
1579
# for all search_indexs in each path at or under each element of
1580
# search_specific_files, if the detail is relocated: add the id, and add the
1581
# relocated path as one to search if its not searched already. If the
1582
# detail is not relocated, add the id.
1583
searched_specific_files = set()
1584
NULL_PARENT_DETAILS = dirstate.DirState.NULL_PARENT_DETAILS
1585
# Using a list so that we can access the values and change them in
1586
# nested scope. Each one is [path, file_id, entry]
1587
last_source_parent = [None, None, None]
1588
last_target_parent = [None, None, None]
1590
def _process_entry(entry, path_info):
1591
"""Compare an entry and real disk to generate delta information.
1593
:param path_info: top_relpath, basename, kind, lstat, abspath for
1594
the path of entry. If None, then the path is considered absent.
1595
(Perhaps we should pass in a concrete entry for this ?)
1596
Basename is returned as a utf8 string because we expect this
1597
tuple will be ignored, and don't want to take the time to
1600
# TODO: when a parent has been renamed, dont emit path renames for children,
1601
if source_index is None:
1602
source_details = NULL_PARENT_DETAILS
1604
source_details = entry[1][source_index]
1605
target_details = entry[1][target_index]
1606
target_minikind = target_details[0]
1607
if path_info is not None and target_minikind in 'fdl':
1608
assert target_index == 0
1609
link_or_sha1 = state.update_entry(entry, abspath=path_info[4],
1610
stat_value=path_info[3])
1611
# The entry may have been modified by update_entry
1612
target_details = entry[1][target_index]
1613
target_minikind = target_details[0]
1616
source_minikind = source_details[0]
1617
if source_minikind in 'fdlr' and target_minikind in 'fdl':
1618
# claimed content in both: diff
1619
# r | fdl | | add source to search, add id path move and perform
1620
# | | | diff check on source-target
1621
# r | fdl | a | dangling file that was present in the basis.
1623
if source_minikind in 'r':
1624
# add the source to the search path to find any children it
1625
# has. TODO ? : only add if it is a container ?
1626
if not osutils.is_inside_any(searched_specific_files,
1628
search_specific_files.add(source_details[1])
1629
# generate the old path; this is needed for stating later
1631
old_path = source_details[1]
1632
old_dirname, old_basename = os.path.split(old_path)
1633
path = pathjoin(entry[0][0], entry[0][1])
1634
old_entry = state._get_entry(source_index,
1636
# update the source details variable to be the real
1638
source_details = old_entry[1][source_index]
1639
source_minikind = source_details[0]
1641
old_dirname = entry[0][0]
1642
old_basename = entry[0][1]
1643
old_path = path = pathjoin(old_dirname, old_basename)
1644
if path_info is None:
1645
# the file is missing on disk, show as removed.
1646
old_path = pathjoin(entry[0][0], entry[0][1])
1647
content_change = True
1651
# source and target are both versioned and disk file is present.
1652
target_kind = path_info[2]
1653
if target_kind == 'directory':
1654
if source_minikind != 'd':
1655
content_change = True
1657
# directories have no fingerprint
1658
content_change = False
1660
elif target_kind == 'file':
1661
if source_minikind != 'f':
1662
content_change = True
1664
# We could check the size, but we already have the
1666
content_change = (link_or_sha1 != source_details[1])
1667
# Target details is updated at update_entry time
1668
target_exec = target_details[3]
1669
elif target_kind == 'symlink':
1670
if source_minikind != 'l':
1671
content_change = True
1673
content_change = (link_or_sha1 != source_details[1])
1676
raise Exception, "unknown kind %s" % path_info[2]
1677
# parent id is the entry for the path in the target tree
1678
if old_dirname == last_source_parent[0]:
1679
source_parent_id = last_source_parent[1]
1681
source_parent_entry = state._get_entry(source_index,
1682
path_utf8=old_dirname)
1683
source_parent_id = source_parent_entry[0][2]
1684
if source_parent_id == entry[0][2]:
1685
# This is the root, so the parent is None
1686
source_parent_id = None
1688
last_source_parent[0] = old_dirname
1689
last_source_parent[1] = source_parent_id
1690
last_source_parent[2] = source_parent_entry
1692
new_dirname = entry[0][0]
1693
if new_dirname == last_target_parent[0]:
1694
target_parent_id = last_target_parent[1]
1696
# TODO: We don't always need to do the lookup, because the
1697
# parent entry will be the same as the source entry.
1698
target_parent_entry = state._get_entry(target_index,
1699
path_utf8=new_dirname)
1700
target_parent_id = target_parent_entry[0][2]
1701
if target_parent_id == entry[0][2]:
1702
# This is the root, so the parent is None
1703
target_parent_id = None
1705
last_target_parent[0] = new_dirname
1706
last_target_parent[1] = target_parent_id
1707
last_target_parent[2] = target_parent_entry
1709
source_exec = source_details[3]
1710
return ((entry[0][2], path, content_change,
1712
(source_parent_id, target_parent_id),
1713
(old_basename, entry[0][1]),
1714
(_minikind_to_kind[source_minikind], target_kind),
1715
(source_exec, target_exec)),)
1716
elif source_minikind in 'a' and target_minikind in 'fdl':
1717
# looks like a new file
1718
if path_info is not None:
1719
path = pathjoin(entry[0][0], entry[0][1])
1720
# parent id is the entry for the path in the target tree
1721
# TODO: these are the same for an entire directory: cache em.
1722
parent_id = state._get_entry(target_index,
1723
path_utf8=entry[0][0])[0][2]
1724
if parent_id == entry[0][2]:
1726
target_exec = target_details[3]
1727
return ((entry[0][2], path, True,
1730
(None, entry[0][1]),
1731
(None, path_info[2]),
1732
(None, target_exec)),)
1734
# but its not on disk: we deliberately treat this as just
1735
# never-present. (Why ?! - RBC 20070224)
1737
elif source_minikind in 'fdl' and target_minikind in 'a':
1738
# unversioned, possibly, or possibly not deleted: we dont care.
1739
# if its still on disk, *and* theres no other entry at this
1740
# path [we dont know this in this routine at the moment -
1741
# perhaps we should change this - then it would be an unknown.
1742
old_path = pathjoin(entry[0][0], entry[0][1])
1743
# parent id is the entry for the path in the target tree
1744
parent_id = state._get_entry(source_index, path_utf8=entry[0][0])[0][2]
1745
if parent_id == entry[0][2]:
1747
return ((entry[0][2], old_path, True,
1750
(entry[0][1], None),
1751
(_minikind_to_kind[source_minikind], None),
1752
(source_details[3], None)),)
1753
elif source_minikind in 'fdl' and target_minikind in 'r':
1754
# a rename; could be a true rename, or a rename inherited from
1755
# a renamed parent. TODO: handle this efficiently. Its not
1756
# common case to rename dirs though, so a correct but slow
1757
# implementation will do.
1758
if not osutils.is_inside_any(searched_specific_files, target_details[1]):
1759
search_specific_files.add(target_details[1])
1760
elif source_minikind in 'r' and target_minikind in 'r':
1761
# neither of the selected trees contain this file,
1762
# so skip over it. This is not currently directly tested, but
1763
# is indirectly via test_too_much.TestCommands.test_conflicts.
1766
print "*******", source_minikind, target_minikind
1767
import pdb;pdb.set_trace()
1769
while search_specific_files:
1770
# TODO: the pending list should be lexically sorted?
1771
current_root = search_specific_files.pop()
1772
searched_specific_files.add(current_root)
1773
# process the entries for this containing directory: the rest will be
1774
# found by their parents recursively.
1775
root_entries = _entries_for_path(current_root)
1776
root_abspath = self.target.abspath(current_root)
1778
root_stat = os.lstat(root_abspath)
1780
if e.errno == errno.ENOENT:
1781
# the path does not exist: let _process_entry know that.
1782
root_dir_info = None
1784
# some other random error: hand it up.
1787
root_dir_info = ('', current_root,
1788
osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
1790
if not root_entries and not root_dir_info:
1791
# this specified path is not present at all, skip it.
1793
for entry in root_entries:
1794
for result in _process_entry(entry, root_dir_info):
1795
# this check should probably be outside the loop: one
1796
# 'iterate two trees' api, and then _iter_changes filters
1797
# unchanged pairs. - RBC 20070226
1798
if (include_unchanged
1799
or result[2] # content change
1800
or result[3][0] != result[3][1] # versioned status
1801
or result[4][0] != result[4][1] # parent id
1802
or result[5][0] != result[5][1] # name
1803
or result[6][0] != result[6][1] # kind
1804
or result[7][0] != result[7][1] # executable
1806
result = (result[0],
1807
utf8_decode(result[1])[0]) + result[2:]
1809
dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root)
1810
initial_key = (current_root, '', '')
1811
block_index, _ = state._find_block_index_from_key(initial_key)
1812
if block_index == 0:
1813
# we have processed the total root already, but because the
1814
# initial key matched it we should skip it here.
1817
current_dir_info = dir_iterator.next()
1819
if e.errno in (errno.ENOENT, errno.ENOTDIR):
1820
# there may be directories in the inventory even though
1821
# this path is not a file on disk: so mark it as end of
1823
current_dir_info = None
1827
if current_dir_info[0][0] == '':
1828
# remove .bzr from iteration
1829
bzr_index = bisect_left(current_dir_info[1], ('.bzr',))
1830
assert current_dir_info[1][bzr_index][0] == '.bzr'
1831
del current_dir_info[1][bzr_index]
1832
# walk until both the directory listing and the versioned metadata
1833
# are exhausted. TODO: reevaluate this, perhaps we should stop when
1834
# the versioned data runs out.
1835
if (block_index < len(state._dirblocks) and
1836
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
1837
current_block = state._dirblocks[block_index]
1839
current_block = None
1840
while (current_dir_info is not None or
1841
current_block is not None):
1842
if (current_dir_info and current_block
1843
and current_dir_info[0][0] != current_block[0]):
1844
if current_dir_info[0][0] < current_block[0] :
1845
# import pdb; pdb.set_trace()
1846
# print 'unversioned dir'
1847
# filesystem data refers to paths not covered by the dirblock.
1848
# this has two possibilities:
1849
# A) it is versioned but empty, so there is no block for it
1850
# B) it is not versioned.
1851
# in either case it was processed by the containing directories walk:
1852
# if it is root/foo, when we walked root we emitted it,
1853
# or if we ere given root/foo to walk specifically, we
1854
# emitted it when checking the walk-root entries
1855
# advance the iterator and loop - we dont need to emit it.
1857
current_dir_info = dir_iterator.next()
1858
except StopIteration:
1859
current_dir_info = None
1861
# We have a dirblock entry for this location, but there
1862
# is no filesystem path for this. This is most likely
1863
# because a directory was removed from the disk.
1864
# We don't have to report the missing directory,
1865
# because that should have already been handled, but we
1866
# need to handle all of the files that are contained
1868
for current_entry in current_block[1]:
1869
# entry referring to file not present on disk.
1870
# advance the entry only, after processing.
1871
for result in _process_entry(current_entry, None):
1872
# this check should probably be outside the loop: one
1873
# 'iterate two trees' api, and then _iter_changes filters
1874
# unchanged pairs. - RBC 20070226
1875
if (include_unchanged
1876
or result[2] # content change
1877
or result[3][0] != result[3][1] # versioned status
1878
or result[4][0] != result[4][1] # parent id
1879
or result[5][0] != result[5][1] # name
1880
or result[6][0] != result[6][1] # kind
1881
or result[7][0] != result[7][1] # executable
1883
result = (result[0],
1884
utf8_decode(result[1])[0]) + result[2:]
1887
if (block_index < len(state._dirblocks) and
1888
osutils.is_inside(current_root,
1889
state._dirblocks[block_index][0])):
1890
current_block = state._dirblocks[block_index]
1892
current_block = None
1895
if current_block and entry_index < len(current_block[1]):
1896
current_entry = current_block[1][entry_index]
1898
current_entry = None
1899
advance_entry = True
1901
if current_dir_info and path_index < len(current_dir_info[1]):
1902
current_path_info = current_dir_info[1][path_index]
1904
current_path_info = None
1906
while (current_entry is not None or
1907
current_path_info is not None):
1908
if current_entry is None:
1909
# no more entries: yield current_pathinfo as an
1910
# unversioned file: its not the same as a path in any
1911
# tree in the dirstate.
1912
new_executable = bool(
1913
stat.S_ISREG(current_path_info[3].st_mode)
1914
and stat.S_IEXEC & current_path_info[3].st_mode)
1915
pass # unversioned file support not added to the
1916
# _iter_changes api yet - breaks status amongst other
1918
# yield (None, current_path_info[0], True,
1921
# (None, current_path_info[1]),
1922
# (None, current_path_info[2]),
1923
# (None, new_executable))
1924
elif current_path_info is None:
1925
# no path is fine: the per entry code will handle it.
1926
for result in _process_entry(current_entry, current_path_info):
1927
# this check should probably be outside the loop: one
1928
# 'iterate two trees' api, and then _iter_changes filters
1929
# unchanged pairs. - RBC 20070226
1930
if (include_unchanged
1931
or result[2] # content change
1932
or result[3][0] != result[3][1] # versioned status
1933
or result[4][0] != result[4][1] # parent id
1934
or result[5][0] != result[5][1] # name
1935
or result[6][0] != result[6][1] # kind
1936
or result[7][0] != result[7][1] # executable
1938
result = (result[0],
1939
utf8_decode(result[1])[0]) + result[2:]
1941
elif current_entry[0][1] != current_path_info[1]:
1942
if current_path_info[1] < current_entry[0][1]:
1943
# extra file on disk: pass for now, but only
1944
# increment the path, not the entry
1945
# import pdb; pdb.set_trace()
1946
# print 'unversioned file'
1947
advance_entry = False
1949
# entry referring to file not present on disk.
1950
# advance the entry only, after processing.
1951
for result in _process_entry(current_entry, None):
1952
# this check should probably be outside the loop: one
1953
# 'iterate two trees' api, and then _iter_changes filters
1954
# unchanged pairs. - RBC 20070226
1955
if (include_unchanged
1956
or result[2] # content change
1957
or result[3][0] != result[3][1] # versioned status
1958
or result[4][0] != result[4][1] # parent id
1959
or result[5][0] != result[5][1] # name
1960
or result[6][0] != result[6][1] # kind
1961
or result[7][0] != result[7][1] # executable
1963
result = (result[0],
1964
utf8_decode(result[1])[0]) + result[2:]
1966
advance_path = False
1968
for result in _process_entry(current_entry, current_path_info):
1969
# this check should probably be outside the loop: one
1970
# 'iterate two trees' api, and then _iter_changes filters
1971
# unchanged pairs. - RBC 20070226
1972
if (include_unchanged
1973
or result[2] # content change
1974
or result[3][0] != result[3][1] # versioned status
1975
or result[4][0] != result[4][1] # parent id
1976
or result[5][0] != result[5][1] # name
1977
or result[6][0] != result[6][1] # kind
1978
or result[7][0] != result[7][1] # executable
1980
result = (result[0],
1981
utf8_decode(result[1])[0]) + result[2:]
1983
if advance_entry and current_entry is not None:
1985
if entry_index < len(current_block[1]):
1986
current_entry = current_block[1][entry_index]
1988
current_entry = None
1990
advance_entry = True # reset the advance flaga
1991
if advance_path and current_path_info is not None:
1993
if path_index < len(current_dir_info[1]):
1994
current_path_info = current_dir_info[1][path_index]
1996
current_path_info = None
1998
advance_path = True # reset the advance flagg.
1999
if current_block is not None:
2001
if (block_index < len(state._dirblocks) and
2002
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
2003
current_block = state._dirblocks[block_index]
2005
current_block = None
2006
if current_dir_info is not None:
2008
current_dir_info = dir_iterator.next()
2009
except StopIteration:
2010
current_dir_info = None
2014
def is_compatible(source, target):
2015
# the target must be a dirstate working tree
2016
if not isinstance(target, WorkingTree4):
2018
# the source must be a revtreee or dirstate rev tree.
2019
if not isinstance(source,
2020
(revisiontree.RevisionTree, DirStateRevisionTree)):
2022
# the source revid must be in the target dirstate
2023
if not (source._revision_id == NULL_REVISION or
2024
source._revision_id in target.get_parent_ids()):
2025
# TODO: what about ghosts? it may well need to
2026
# check for them explicitly.
2030
InterTree.register_optimiser(InterDirStateTree)