1
# Copyright (C) 2005, 2006, 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
"""WorkingTree4 format and implementation.
19
WorkingTree4 provides the dirstate based working tree logic.
21
To get a WorkingTree, call bzrdir.open_workingtree() or
22
WorkingTree.open(dir).
25
from cStringIO import StringIO
29
from bzrlib.lazy_import import lazy_import
30
lazy_import(globals(), """
31
from bisect import bisect_left
33
from copy import deepcopy
45
conflicts as _mod_conflicts,
63
from bzrlib.transport import get_transport
67
from bzrlib import symbol_versioning
68
from bzrlib.decorators import needs_read_lock, needs_write_lock
69
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, entry_factory
70
from bzrlib.lockable_files import LockableFiles, TransportLock
71
from bzrlib.lockdir import LockDir
72
import bzrlib.mutabletree
73
from bzrlib.mutabletree import needs_tree_write_lock
74
from bzrlib.osutils import (
83
from bzrlib.trace import mutter, note
84
from bzrlib.transport.local import LocalTransport
85
from bzrlib.tree import InterTree
86
from bzrlib.progress import DummyProgress, ProgressPhase
87
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
88
from bzrlib.rio import RioReader, rio_file, Stanza
89
from bzrlib.symbol_versioning import (deprecated_passed,
94
from bzrlib.tree import Tree
95
from bzrlib.workingtree import WorkingTree, WorkingTree3, WorkingTreeFormat3
98
class WorkingTree4(WorkingTree3):
99
"""This is the Format 4 working tree.
101
This differs from WorkingTree3 by:
102
- Having a consolidated internal dirstate, stored in a
103
randomly-accessible sorted file on disk.
104
- Not having a regular inventory attribute. One can be synthesized
105
on demand but this is expensive and should be avoided.
107
This is new in bzr 0.15.
110
def __init__(self, basedir,
115
"""Construct a WorkingTree for basedir.
117
If the branch is not supplied, it is opened automatically.
118
If the branch is supplied, it must be the branch for this basedir.
119
(branch.base is not cross checked, because for remote branches that
120
would be meaningless).
122
self._format = _format
123
self.bzrdir = _bzrdir
124
from bzrlib.trace import note, mutter
125
assert isinstance(basedir, basestring), \
126
"base directory %r is not a string" % basedir
127
basedir = safe_unicode(basedir)
128
mutter("opening working tree %r", basedir)
129
self._branch = branch
130
assert isinstance(self.branch, bzrlib.branch.Branch), \
131
"branch %r is not a Branch" % self.branch
132
self.basedir = realpath(basedir)
133
# if branch is at our basedir and is a format 6 or less
134
# assume all other formats have their own control files.
135
assert isinstance(_control_files, LockableFiles), \
136
"_control_files must be a LockableFiles, not %r" % _control_files
137
self._control_files = _control_files
140
# during a read or write lock these objects are set, and are
141
# None the rest of the time.
142
self._dirstate = None
143
self._inventory = None
146
@needs_tree_write_lock
147
def _add(self, files, ids, kinds):
148
"""See MutableTree._add."""
149
state = self.current_dirstate()
150
for f, file_id, kind in zip(files, ids, kinds):
155
# special case tree root handling.
156
if f == '' and self.path2id(f) == ROOT_ID:
157
state.set_path_id('', generate_ids.gen_file_id(f))
160
file_id = generate_ids.gen_file_id(f)
161
# deliberately add the file with no cached stat or sha1
162
# - on the first access it will be gathered, and we can
163
# always change this once tests are all passing.
164
state.add(f, file_id, kind, None, '')
165
self._make_dirty(reset_inventory=True)
167
def _make_dirty(self, reset_inventory):
168
"""Make the tree state dirty.
170
:param reset_inventory: True if the cached inventory should be removed
171
(presuming there is one).
174
if reset_inventory and self._inventory is not None:
175
self._inventory = None
177
@needs_tree_write_lock
178
def add_reference(self, sub_tree):
179
# use standard implementation, which calls back to self._add
181
# So we don't store the reference_revision in the working dirstate,
182
# it's just recorded at the moment of commit.
183
self._add_reference(sub_tree)
185
def break_lock(self):
186
"""Break a lock if one is present from another instance.
188
Uses the ui factory to ask for confirmation if the lock may be from
191
This will probe the repository for its lock as well.
193
# if the dirstate is locked by an active process, reject the break lock
196
if self._dirstate is None:
200
state = self._current_dirstate()
201
if state._lock_token is not None:
202
# we already have it locked. sheese, cant break our own lock.
203
raise errors.LockActive(self.basedir)
206
# try for a write lock - need permission to get one anyhow
209
except errors.LockContention:
210
# oslocks fail when a process is still live: fail.
211
# TODO: get the locked lockdir info and give to the user to
212
# assist in debugging.
213
raise errors.LockActive(self.basedir)
218
self._dirstate = None
219
self._control_files.break_lock()
220
self.branch.break_lock()
222
def _comparison_data(self, entry, path):
223
kind, executable, stat_value = \
224
WorkingTree3._comparison_data(self, entry, path)
225
# it looks like a plain directory, but it's really a reference
226
if kind == 'directory' and entry.kind == 'tree-reference':
227
kind = 'tree-reference'
228
return kind, executable, stat_value
231
def commit(self, message=None, revprops=None, *args, **kwargs):
232
# mark the tree as dirty post commit - commit
233
# can change the current versioned list by doing deletes.
234
result = WorkingTree3.commit(self, message, revprops, *args, **kwargs)
235
self._make_dirty(reset_inventory=True)
238
def current_dirstate(self):
239
"""Return the current dirstate object.
241
This is not part of the tree interface and only exposed for ease of
244
:raises errors.NotWriteLocked: when not in a lock.
246
self._must_be_locked()
247
return self._current_dirstate()
249
def _current_dirstate(self):
250
"""Internal function that does not check lock status.
252
This is needed for break_lock which also needs the dirstate.
254
if self._dirstate is not None:
255
return self._dirstate
256
local_path = self.bzrdir.get_workingtree_transport(None
257
).local_abspath('dirstate')
258
self._dirstate = dirstate.DirState.on_file(local_path)
259
return self._dirstate
261
def filter_unversioned_files(self, paths):
262
"""Filter out paths that are versioned.
264
:return: set of paths.
266
# TODO: make a generic multi-bisect routine roughly that should list
267
# the paths, then process one half at a time recursively, and feed the
268
# results of each bisect in further still
269
paths = sorted(paths)
271
state = self.current_dirstate()
272
# TODO we want a paths_to_dirblocks helper I think
274
dirname, basename = os.path.split(path.encode('utf8'))
275
_, _, _, path_is_versioned = state._get_block_entry_index(
276
dirname, basename, 0)
277
if not path_is_versioned:
282
"""Write all cached data to disk."""
283
if self._control_files._lock_mode != 'w':
284
raise errors.NotWriteLocked(self)
285
self.current_dirstate().save()
286
self._inventory = None
289
def _generate_inventory(self):
290
"""Create and set self.inventory from the dirstate object.
292
This is relatively expensive: we have to walk the entire dirstate.
293
Ideally we would not, and can deprecate this function.
295
#: uncomment to trap on inventory requests.
296
# import pdb;pdb.set_trace()
297
state = self.current_dirstate()
298
state._read_dirblocks_if_needed()
299
root_key, current_entry = self._get_entry(path='')
300
current_id = root_key[2]
301
assert current_entry[0][0] == 'd' # directory
302
inv = Inventory(root_id=current_id)
303
# Turn some things into local variables
304
minikind_to_kind = dirstate.DirState._minikind_to_kind
305
factory = entry_factory
306
utf8_decode = cache_utf8._utf8_decode
308
# we could do this straight out of the dirstate; it might be fast
309
# and should be profiled - RBC 20070216
310
parent_ies = {'' : inv.root}
311
for block in state._dirblocks[1:]: # skip the root
314
parent_ie = parent_ies[dirname]
316
# all the paths in this block are not versioned in this tree
318
for key, entry in block[1]:
319
minikind, link_or_sha1, size, executable, stat = entry[0]
320
if minikind in ('a', 'r'): # absent, relocated
321
# a parent tree only entry
324
name_unicode = utf8_decode(name)[0]
326
kind = minikind_to_kind[minikind]
327
inv_entry = factory[kind](file_id, name_unicode,
330
# not strictly needed: working tree
331
#entry.executable = executable
332
#entry.text_size = size
333
#entry.text_sha1 = sha1
335
elif kind == 'directory':
336
# add this entry to the parent map.
337
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
338
# These checks cost us around 40ms on a 55k entry tree
339
assert file_id not in inv_byid, ('file_id %s already in'
340
' inventory as %s' % (file_id, inv_byid[file_id]))
341
assert name_unicode not in parent_ie.children
342
inv_byid[file_id] = inv_entry
343
parent_ie.children[name_unicode] = inv_entry
344
self._inventory = inv
346
def _get_entry(self, file_id=None, path=None):
347
"""Get the dirstate row for file_id or path.
349
If either file_id or path is supplied, it is used as the key to lookup.
350
If both are supplied, the fastest lookup is used, and an error is
351
raised if they do not both point at the same row.
353
:param file_id: An optional unicode file_id to be looked up.
354
:param path: An optional unicode path to be looked up.
355
:return: The dirstate row tuple for path/file_id, or (None, None)
357
if file_id is None and path is None:
358
raise errors.BzrError('must supply file_id or path')
359
state = self.current_dirstate()
361
path = path.encode('utf8')
362
return state._get_entry(0, fileid_utf8=file_id, path_utf8=path)
364
def get_file_sha1(self, file_id, path=None, stat_value=None):
365
# check file id is valid unconditionally.
366
entry = self._get_entry(file_id=file_id, path=path)
367
assert entry[0] is not None, 'what error should this raise'
369
# if row stat is valid, use cached sha1, else, get a new sha1.
371
path = pathjoin(entry[0][0], entry[0][1]).decode('utf8')
373
file_abspath = self.abspath(path)
374
state = self.current_dirstate()
375
link_or_sha1 = state.update_entry(entry, file_abspath,
376
stat_value=stat_value)
377
if entry[1][0][0] == 'f':
381
def _get_inventory(self):
382
"""Get the inventory for the tree. This is only valid within a lock."""
383
if self._inventory is not None:
384
return self._inventory
385
self._must_be_locked()
386
self._generate_inventory()
387
return self._inventory
389
inventory = property(_get_inventory,
390
doc="Inventory of this Tree")
393
def get_parent_ids(self):
394
"""See Tree.get_parent_ids.
396
This implementation requests the ids list from the dirstate file.
398
return self.current_dirstate().get_parent_ids()
400
def get_reference_revision(self, entry, path=None):
401
# referenced tree's revision is whatever's currently there
402
return self.get_nested_tree(entry, path).last_revision()
404
def get_nested_tree(self, entry, path=None):
406
path = self.id2path(entry.file_id)
407
return WorkingTree.open(self.abspath(path))
410
def get_root_id(self):
411
"""Return the id of this trees root"""
412
return self._get_entry(path='')[0][2]
414
def has_id(self, file_id):
415
state = self.current_dirstate()
416
file_id = osutils.safe_file_id(file_id)
417
row, parents = self._get_entry(file_id=file_id)
420
return osutils.lexists(pathjoin(
421
self.basedir, row[0].decode('utf8'), row[1].decode('utf8')))
424
def id2path(self, file_id):
425
file_id = osutils.safe_file_id(file_id)
426
state = self.current_dirstate()
427
entry = self._get_entry(file_id=file_id)
428
if entry == (None, None):
429
raise errors.NoSuchId(tree=self, file_id=file_id)
430
path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
431
return path_utf8.decode('utf8')
435
"""Iterate through file_ids for this tree.
437
file_ids are in a WorkingTree if they are in the working inventory
438
and the working file exists.
441
for key, tree_details in self.current_dirstate()._iter_entries():
442
if tree_details[0][0] in ('a', 'r'): # absent, relocated
443
# not relevant to the working tree
445
path = pathjoin(self.basedir, key[0].decode('utf8'), key[1].decode('utf8'))
446
if osutils.lexists(path):
447
result.append(key[2])
451
def kind(self, file_id):
452
# The kind of a file is whatever it actually is on disk, except that
453
# tree-references need to be reported as such rather than as the
456
# TODO: Possibly we should check that the directory still really
457
# contains a subtree, at least during commit? mbp 20070227
458
kind = WorkingTree3.kind(self, file_id)
459
if kind == 'directory':
460
# TODO: ask the dirstate not the inventory -- mbp 20060227
461
entry = self.inventory[file_id]
462
if entry.kind == 'tree-reference':
463
kind = 'tree-reference'
467
def _last_revision(self):
468
"""See Mutable.last_revision."""
469
parent_ids = self.current_dirstate().get_parent_ids()
476
"""See Branch.lock_read, and WorkingTree.unlock."""
477
self.branch.lock_read()
479
self._control_files.lock_read()
481
state = self.current_dirstate()
482
if not state._lock_token:
485
self._control_files.unlock()
491
def _lock_self_write(self):
492
"""This should be called after the branch is locked."""
494
self._control_files.lock_write()
496
state = self.current_dirstate()
497
if not state._lock_token:
500
self._control_files.unlock()
506
def lock_tree_write(self):
507
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
508
self.branch.lock_read()
509
self._lock_self_write()
511
def lock_write(self):
512
"""See MutableTree.lock_write, and WorkingTree.unlock."""
513
self.branch.lock_write()
514
self._lock_self_write()
516
@needs_tree_write_lock
517
def move(self, from_paths, to_dir, after=False):
518
"""See WorkingTree.move()."""
523
state = self.current_dirstate()
525
assert not isinstance(from_paths, basestring)
526
to_dir_utf8 = to_dir.encode('utf8')
527
to_entry_dirname, to_basename = os.path.split(to_dir_utf8)
528
id_index = state._get_id_index()
529
# check destination directory
530
# get the details for it
531
to_entry_block_index, to_entry_entry_index, dir_present, entry_present = \
532
state._get_block_entry_index(to_entry_dirname, to_basename, 0)
533
if not entry_present:
534
raise errors.BzrMoveFailedError('', to_dir,
535
errors.NotVersionedError(to_dir))
536
to_entry = state._dirblocks[to_entry_block_index][1][to_entry_entry_index]
537
# get a handle on the block itself.
538
to_block_index = state._ensure_block(
539
to_entry_block_index, to_entry_entry_index, to_dir_utf8)
540
to_block = state._dirblocks[to_block_index]
541
to_abs = self.abspath(to_dir)
542
if not isdir(to_abs):
543
raise errors.BzrMoveFailedError('',to_dir,
544
errors.NotADirectory(to_abs))
546
if to_entry[1][0][0] != 'd':
547
raise errors.BzrMoveFailedError('',to_dir,
548
errors.NotADirectory(to_abs))
550
if self._inventory is not None:
551
update_inventory = True
553
to_dir_ie = inv[to_dir_id]
554
to_dir_id = to_entry[0][2]
556
update_inventory = False
559
def move_one(old_entry, from_path_utf8, minikind, executable,
560
fingerprint, packed_stat, size,
561
to_block, to_key, to_path_utf8):
562
state._make_absent(old_entry)
563
from_key = old_entry[0]
565
lambda:state.update_minimal(from_key,
567
executable=executable,
568
fingerprint=fingerprint,
569
packed_stat=packed_stat,
571
path_utf8=from_path_utf8))
572
state.update_minimal(to_key,
574
executable=executable,
575
fingerprint=fingerprint,
576
packed_stat=packed_stat,
578
path_utf8=to_path_utf8)
579
added_entry_index, _ = state._find_entry_index(to_key, to_block[1])
580
new_entry = to_block[1][added_entry_index]
581
rollbacks.append(lambda:state._make_absent(new_entry))
583
# create rename entries and tuples
584
for from_rel in from_paths:
585
# from_rel is 'pathinroot/foo/bar'
586
from_rel_utf8 = from_rel.encode('utf8')
587
from_dirname, from_tail = osutils.split(from_rel)
588
from_dirname, from_tail_utf8 = osutils.split(from_rel_utf8)
589
from_entry = self._get_entry(path=from_rel)
590
if from_entry == (None, None):
591
raise errors.BzrMoveFailedError(from_rel,to_dir,
592
errors.NotVersionedError(path=str(from_rel)))
594
from_id = from_entry[0][2]
595
to_rel = pathjoin(to_dir, from_tail)
596
to_rel_utf8 = pathjoin(to_dir_utf8, from_tail_utf8)
597
item_to_entry = self._get_entry(path=to_rel)
598
if item_to_entry != (None, None):
599
raise errors.BzrMoveFailedError(from_rel, to_rel,
600
"Target is already versioned.")
602
if from_rel == to_rel:
603
raise errors.BzrMoveFailedError(from_rel, to_rel,
604
"Source and target are identical.")
606
from_missing = not self.has_filename(from_rel)
607
to_missing = not self.has_filename(to_rel)
614
raise errors.BzrMoveFailedError(from_rel, to_rel,
615
errors.NoSuchFile(path=to_rel,
616
extra="New file has not been created yet"))
618
# neither path exists
619
raise errors.BzrRenameFailedError(from_rel, to_rel,
620
errors.PathsDoNotExist(paths=(from_rel, to_rel)))
622
if from_missing: # implicitly just update our path mapping
625
raise errors.RenameFailedFilesExist(from_rel, to_rel,
626
extra="(Use --after to update the Bazaar id)")
629
def rollback_rename():
630
"""A single rename has failed, roll it back."""
632
for rollback in reversed(rollbacks):
636
import pdb;pdb.set_trace()
637
exc_info = sys.exc_info()
639
raise exc_info[0], exc_info[1], exc_info[2]
641
# perform the disk move first - its the most likely failure point.
643
from_rel_abs = self.abspath(from_rel)
644
to_rel_abs = self.abspath(to_rel)
646
osutils.rename(from_rel_abs, to_rel_abs)
648
raise errors.BzrMoveFailedError(from_rel, to_rel, e[1])
649
rollbacks.append(lambda: osutils.rename(to_rel_abs, from_rel_abs))
651
# perform the rename in the inventory next if needed: its easy
655
from_entry = inv[from_id]
656
current_parent = from_entry.parent_id
657
inv.rename(from_id, to_dir_id, from_tail)
659
lambda: inv.rename(from_id, current_parent, from_tail))
660
# finally do the rename in the dirstate, which is a little
661
# tricky to rollback, but least likely to need it.
662
old_block_index, old_entry_index, dir_present, file_present = \
663
state._get_block_entry_index(from_dirname, from_tail_utf8, 0)
664
old_block = state._dirblocks[old_block_index][1]
665
old_entry = old_block[old_entry_index]
666
from_key, old_entry_details = old_entry
667
cur_details = old_entry_details[0]
669
to_key = ((to_block[0],) + from_key[1:3])
670
minikind = cur_details[0]
671
move_one(old_entry, from_path_utf8=from_rel_utf8,
673
executable=cur_details[3],
674
fingerprint=cur_details[1],
675
packed_stat=cur_details[4],
679
to_path_utf8=to_rel_utf8)
682
def update_dirblock(from_dir, to_key, to_dir_utf8):
683
"""all entries in this block need updating.
685
TODO: This is pretty ugly, and doesn't support
686
reverting, but it works.
688
assert from_dir != '', "renaming root not supported"
689
from_key = (from_dir, '')
690
from_block_idx, present = \
691
state._find_block_index_from_key(from_key)
693
# This is the old record, if it isn't present, then
694
# there is theoretically nothing to update.
695
# (Unless it isn't present because of lazy loading,
696
# but we don't do that yet)
698
from_block = state._dirblocks[from_block_idx]
699
to_block_index, to_entry_index, _, _ = \
700
state._get_block_entry_index(to_key[0], to_key[1], 0)
701
to_block_index = state._ensure_block(
702
to_block_index, to_entry_index, to_dir_utf8)
703
to_block = state._dirblocks[to_block_index]
704
for entry in from_block[1]:
705
assert entry[0][0] == from_dir
706
cur_details = entry[1][0]
707
to_key = (to_dir_utf8, entry[0][1], entry[0][2])
708
from_path_utf8 = osutils.pathjoin(entry[0][0], entry[0][1])
709
to_path_utf8 = osutils.pathjoin(to_dir_utf8, entry[0][1])
710
minikind = cur_details[0]
711
move_one(entry, from_path_utf8=from_path_utf8,
713
executable=cur_details[3],
714
fingerprint=cur_details[1],
715
packed_stat=cur_details[4],
719
to_path_utf8=to_rel_utf8)
721
# We need to move all the children of this
723
update_dirblock(from_path_utf8, to_key,
725
update_dirblock(from_rel_utf8, to_key, to_rel_utf8)
729
result.append((from_rel, to_rel))
730
state._dirblock_state = dirstate.DirState.IN_MEMORY_MODIFIED
731
self._make_dirty(reset_inventory=False)
735
def _must_be_locked(self):
736
if not self._control_files._lock_count:
737
raise errors.ObjectNotLocked(self)
740
"""Initialize the state in this tree to be a new tree."""
744
def path2id(self, path):
745
"""Return the id for path in this tree."""
746
path = path.strip('/')
747
entry = self._get_entry(path=path)
748
if entry == (None, None):
752
def paths2ids(self, paths, trees=[], require_versioned=True):
753
"""See Tree.paths2ids().
755
This specialisation fast-paths the case where all the trees are in the
760
parents = self.get_parent_ids()
762
if not (isinstance(tree, DirStateRevisionTree) and tree._revision_id in
764
return super(WorkingTree4, self).paths2ids(paths, trees, require_versioned)
765
search_indexes = [0] + [1 + parents.index(tree._revision_id) for tree in trees]
766
# -- make all paths utf8 --
769
paths_utf8.add(path.encode('utf8'))
771
# -- paths is now a utf8 path set --
772
# -- get the state object and prepare it.
773
state = self.current_dirstate()
774
if False and (state._dirblock_state == dirstate.DirState.NOT_IN_MEMORY
775
and '' not in paths):
776
paths2ids = self._paths2ids_using_bisect
778
paths2ids = self._paths2ids_in_memory
779
return paths2ids(paths, search_indexes,
780
require_versioned=require_versioned)
782
def _paths2ids_in_memory(self, paths, search_indexes,
783
require_versioned=True):
784
state = self.current_dirstate()
785
state._read_dirblocks_if_needed()
786
def _entries_for_path(path):
787
"""Return a list with all the entries that match path for all ids.
789
dirname, basename = os.path.split(path)
790
key = (dirname, basename, '')
791
block_index, present = state._find_block_index_from_key(key)
793
# the block which should contain path is absent.
796
block = state._dirblocks[block_index][1]
797
entry_index, _ = state._find_entry_index(key, block)
798
# we may need to look at multiple entries at this path: walk while the paths match.
799
while (entry_index < len(block) and
800
block[entry_index][0][0:2] == key[0:2]):
801
result.append(block[entry_index])
804
if require_versioned:
805
# -- check all supplied paths are versioned in a search tree. --
808
path_entries = _entries_for_path(path)
810
# this specified path is not present at all: error
811
all_versioned = False
813
found_versioned = False
814
# for each id at this path
815
for entry in path_entries:
817
for index in search_indexes:
818
if entry[1][index][0] != 'a': # absent
819
found_versioned = True
820
# all good: found a versioned cell
822
if not found_versioned:
823
# none of the indexes was not 'absent' at all ids for this
825
all_versioned = False
827
if not all_versioned:
828
raise errors.PathsNotVersionedError(paths)
829
# -- remove redundancy in supplied paths to prevent over-scanning --
832
other_paths = paths.difference(set([path]))
833
if not osutils.is_inside_any(other_paths, path):
834
# this is a top level path, we must check it.
835
search_paths.add(path)
837
# for all search_indexs in each path at or under each element of
838
# search_paths, if the detail is relocated: add the id, and add the
839
# relocated path as one to search if its not searched already. If the
840
# detail is not relocated, add the id.
841
searched_paths = set()
843
def _process_entry(entry):
844
"""Look at search_indexes within entry.
846
If a specific tree's details are relocated, add the relocation
847
target to search_paths if not searched already. If it is absent, do
848
nothing. Otherwise add the id to found_ids.
850
for index in search_indexes:
851
if entry[1][index][0] == 'r': # relocated
852
if not osutils.is_inside_any(searched_paths, entry[1][index][1]):
853
search_paths.add(entry[1][index][1])
854
elif entry[1][index][0] != 'a': # absent
855
found_ids.add(entry[0][2])
857
current_root = search_paths.pop()
858
searched_paths.add(current_root)
859
# process the entries for this containing directory: the rest will be
860
# found by their parents recursively.
861
root_entries = _entries_for_path(current_root)
863
# this specified path is not present at all, skip it.
865
for entry in root_entries:
866
_process_entry(entry)
867
initial_key = (current_root, '', '')
868
block_index, _ = state._find_block_index_from_key(initial_key)
869
while (block_index < len(state._dirblocks) and
870
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
871
for entry in state._dirblocks[block_index][1]:
872
_process_entry(entry)
876
def _paths2ids_using_bisect(self, paths, search_indexes,
877
require_versioned=True):
878
state = self.current_dirstate()
881
split_paths = sorted(osutils.split(p) for p in paths)
882
found = state._bisect_recursive(split_paths)
884
if require_versioned:
885
found_dir_names = set(dir_name_id[:2] for dir_name_id in found)
886
for dir_name in split_paths:
887
if dir_name not in found_dir_names:
888
raise errors.PathsNotVersionedError(paths)
890
for dir_name_id, trees_info in found.iteritems():
891
for index in search_indexes:
892
if trees_info[index][0] not in ('r', 'a'):
893
found_ids.add(dir_name_id[2])
896
def read_working_inventory(self):
897
"""Read the working inventory.
899
This is a meaningless operation for dirstate, but we obey it anyhow.
901
return self.inventory
904
def revision_tree(self, revision_id):
905
"""See Tree.revision_tree.
907
WorkingTree4 supplies revision_trees for any basis tree.
909
revision_id = osutils.safe_revision_id(revision_id)
910
dirstate = self.current_dirstate()
911
parent_ids = dirstate.get_parent_ids()
912
if revision_id not in parent_ids:
913
raise errors.NoSuchRevisionInTree(self, revision_id)
914
if revision_id in dirstate.get_ghosts():
915
raise errors.NoSuchRevisionInTree(self, revision_id)
916
return DirStateRevisionTree(dirstate, revision_id,
917
self.branch.repository)
919
@needs_tree_write_lock
920
def set_last_revision(self, new_revision):
921
"""Change the last revision in the working tree."""
922
new_revision = osutils.safe_revision_id(new_revision)
923
parents = self.get_parent_ids()
924
if new_revision in (NULL_REVISION, None):
925
assert len(parents) < 2, (
926
"setting the last parent to none with a pending merge is "
928
self.set_parent_ids([])
930
self.set_parent_ids([new_revision] + parents[1:],
931
allow_leftmost_as_ghost=True)
933
@needs_tree_write_lock
934
def set_parent_ids(self, revision_ids, allow_leftmost_as_ghost=False):
935
"""Set the parent ids to revision_ids.
937
See also set_parent_trees. This api will try to retrieve the tree data
938
for each element of revision_ids from the trees repository. If you have
939
tree data already available, it is more efficient to use
940
set_parent_trees rather than set_parent_ids. set_parent_ids is however
941
an easier API to use.
943
:param revision_ids: The revision_ids to set as the parent ids of this
944
working tree. Any of these may be ghosts.
946
revision_ids = [osutils.safe_revision_id(r) for r in revision_ids]
948
for revision_id in revision_ids:
950
revtree = self.branch.repository.revision_tree(revision_id)
951
# TODO: jam 20070213 KnitVersionedFile raises
952
# RevisionNotPresent rather than NoSuchRevision if a
953
# given revision_id is not present. Should Repository be
954
# catching it and re-raising NoSuchRevision?
955
except (errors.NoSuchRevision, errors.RevisionNotPresent):
957
trees.append((revision_id, revtree))
958
self.current_dirstate()._validate()
959
self.set_parent_trees(trees,
960
allow_leftmost_as_ghost=allow_leftmost_as_ghost)
961
self.current_dirstate()._validate()
963
@needs_tree_write_lock
964
def set_parent_trees(self, parents_list, allow_leftmost_as_ghost=False):
965
"""Set the parents of the working tree.
967
:param parents_list: A list of (revision_id, tree) tuples.
968
If tree is None, then that element is treated as an unreachable
969
parent tree - i.e. a ghost.
971
dirstate = self.current_dirstate()
973
if len(parents_list) > 0:
974
if not allow_leftmost_as_ghost and parents_list[0][1] is None:
975
raise errors.GhostRevisionUnusableHere(parents_list[0][0])
978
# convert absent trees to the null tree, which we convert back to
980
for rev_id, tree in parents_list:
981
rev_id = osutils.safe_revision_id(rev_id)
983
real_trees.append((rev_id, tree))
985
real_trees.append((rev_id,
986
self.branch.repository.revision_tree(None)))
987
ghosts.append(rev_id)
989
dirstate.set_parent_trees(real_trees, ghosts=ghosts)
991
self._make_dirty(reset_inventory=False)
994
def _set_root_id(self, file_id):
995
"""See WorkingTree.set_root_id."""
996
state = self.current_dirstate()
997
state.set_path_id('', file_id)
998
if state._dirblock_state == dirstate.DirState.IN_MEMORY_MODIFIED:
999
self._make_dirty(reset_inventory=True)
1002
"""Unlock in format 4 trees needs to write the entire dirstate."""
1003
if self._control_files._lock_count == 1:
1004
# eventually we should do signature checking during read locks for
1006
if self._control_files._lock_mode == 'w':
1009
if self._dirstate is not None:
1010
# This is a no-op if there are no modifications.
1011
self._dirstate.save()
1012
self._dirstate.unlock()
1013
# TODO: jam 20070301 We shouldn't have to wipe the dirstate at this
1014
# point. Instead, it could check if the header has been
1015
# modified when it is locked, and if not, it can hang on to
1016
# the data it has in memory.
1017
self._dirstate = None
1018
self._inventory = None
1019
# reverse order of locking.
1021
return self._control_files.unlock()
1023
self.branch.unlock()
1025
@needs_tree_write_lock
1026
def unversion(self, file_ids):
1027
"""Remove the file ids in file_ids from the current versioned set.
1029
When a file_id is unversioned, all of its children are automatically
1032
:param file_ids: The file ids to stop versioning.
1033
:raises: NoSuchId if any fileid is not currently versioned.
1037
state = self.current_dirstate()
1038
state._read_dirblocks_if_needed()
1039
ids_to_unversion = set()
1040
for file_id in file_ids:
1041
ids_to_unversion.add(osutils.safe_file_id(file_id))
1042
paths_to_unversion = set()
1044
# check if the root is to be unversioned, if so, assert for now.
1045
# walk the state marking unversioned things as absent.
1046
# if there are any un-unversioned ids at the end, raise
1047
for key, details in state._dirblocks[0][1]:
1048
if (details[0][0] not in ('a', 'r') and # absent or relocated
1049
key[2] in ids_to_unversion):
1050
# I haven't written the code to unversion / yet - it should be
1052
raise errors.BzrError('Unversioning the / is not currently supported')
1054
while block_index < len(state._dirblocks):
1055
# process one directory at a time.
1056
block = state._dirblocks[block_index]
1057
# first check: is the path one to remove - it or its children
1058
delete_block = False
1059
for path in paths_to_unversion:
1060
if (block[0].startswith(path) and
1061
(len(block[0]) == len(path) or
1062
block[0][len(path)] == '/')):
1063
# this entire block should be deleted - its the block for a
1064
# path to unversion; or the child of one
1067
# TODO: trim paths_to_unversion as we pass by paths
1069
# this block is to be deleted: process it.
1070
# TODO: we can special case the no-parents case and
1071
# just forget the whole block.
1073
while entry_index < len(block[1]):
1074
# Mark this file id as having been removed
1075
ids_to_unversion.discard(block[1][entry_index][0][2])
1076
if not state._make_absent(block[1][entry_index]):
1078
# go to the next block. (At the moment we dont delete empty
1083
while entry_index < len(block[1]):
1084
entry = block[1][entry_index]
1085
if (entry[1][0][0] in ('a', 'r') or # absent, relocated
1086
# ^ some parent row.
1087
entry[0][2] not in ids_to_unversion):
1088
# ^ not an id to unversion
1091
if entry[1][0][0] == 'd':
1092
paths_to_unversion.add(pathjoin(entry[0][0], entry[0][1]))
1093
if not state._make_absent(entry):
1095
# we have unversioned this id
1096
ids_to_unversion.remove(entry[0][2])
1098
if ids_to_unversion:
1099
raise errors.NoSuchId(self, iter(ids_to_unversion).next())
1100
self._make_dirty(reset_inventory=False)
1101
# have to change the legacy inventory too.
1102
if self._inventory is not None:
1103
for file_id in file_ids:
1104
self._inventory.remove_recursive_id(file_id)
1106
@needs_tree_write_lock
1107
def _write_inventory(self, inv):
1108
"""Write inventory as the current inventory."""
1109
assert not self._dirty, "attempting to write an inventory when the dirstate is dirty will cause data loss"
1110
self.current_dirstate().set_state_from_inventory(inv)
1111
self._make_dirty(reset_inventory=False)
1112
if self._inventory is not None:
1113
self._inventory = inv
1117
class WorkingTreeFormat4(WorkingTreeFormat3):
1118
"""The first consolidated dirstate working tree format.
1121
- exists within a metadir controlling .bzr
1122
- includes an explicit version marker for the workingtree control
1123
files, separate from the BzrDir format
1124
- modifies the hash cache format
1125
- is new in bzr TODO FIXME SETBEFOREMERGE
1126
- uses a LockDir to guard access to it.
1129
supports_tree_reference = True
1131
def get_format_string(self):
1132
"""See WorkingTreeFormat.get_format_string()."""
1133
return "Bazaar Working Tree format 4\n"
1135
def get_format_description(self):
1136
"""See WorkingTreeFormat.get_format_description()."""
1137
return "Working tree format 4"
1139
def initialize(self, a_bzrdir, revision_id=None):
1140
"""See WorkingTreeFormat.initialize().
1142
:param revision_id: allows creating a working tree at a different
1143
revision than the branch is at.
1145
These trees get an initial random root id.
1147
revision_id = osutils.safe_revision_id(revision_id)
1148
if not isinstance(a_bzrdir.transport, LocalTransport):
1149
raise errors.NotLocalUrl(a_bzrdir.transport.base)
1150
transport = a_bzrdir.get_workingtree_transport(self)
1151
control_files = self._open_control_files(a_bzrdir)
1152
control_files.create_lock()
1153
control_files.lock_write()
1154
control_files.put_utf8('format', self.get_format_string())
1155
branch = a_bzrdir.open_branch()
1156
if revision_id is None:
1157
revision_id = branch.last_revision()
1158
local_path = transport.local_abspath('dirstate')
1159
# write out new dirstate (must exist when we create the tree)
1160
state = dirstate.DirState.initialize(local_path)
1162
wt = WorkingTree4(a_bzrdir.root_transport.local_abspath('.'),
1166
_control_files=control_files)
1168
wt.lock_tree_write()
1171
if revision_id in (None, NULL_REVISION):
1172
wt._set_root_id(generate_ids.gen_root_id())
1174
wt.current_dirstate()._validate()
1175
wt.set_last_revision(revision_id)
1177
basis = wt.basis_tree()
1179
# if the basis has a root id we have to use that; otherwise we use
1181
basis_root_id = basis.get_root_id()
1182
if basis_root_id is not None:
1183
wt._set_root_id(basis_root_id)
1185
transform.build_tree(basis, wt)
1188
control_files.unlock()
1192
def _open(self, a_bzrdir, control_files):
1193
"""Open the tree itself.
1195
:param a_bzrdir: the dir for the tree.
1196
:param control_files: the control files for the tree.
1198
return WorkingTree4(a_bzrdir.root_transport.local_abspath('.'),
1199
branch=a_bzrdir.open_branch(),
1202
_control_files=control_files)
1204
def __get_matchingbzrdir(self):
1205
# please test against something that will let us do tree references
1206
return bzrdir.format_registry.make_bzrdir(
1207
'experimental-reference-dirstate')
1209
_matchingbzrdir = property(__get_matchingbzrdir)
1212
class DirStateRevisionTree(Tree):
1213
"""A revision tree pulling the inventory from a dirstate."""
1215
def __init__(self, dirstate, revision_id, repository):
1216
self._dirstate = dirstate
1217
self._revision_id = osutils.safe_revision_id(revision_id)
1218
self._repository = repository
1219
self._inventory = None
1221
self._dirstate_locked = False
1224
return "<%s of %s in %s>" % \
1225
(self.__class__.__name__, self._revision_id, self._dirstate)
1227
def annotate_iter(self, file_id):
1228
"""See Tree.annotate_iter"""
1229
w = self._repository.weave_store.get_weave(file_id,
1230
self._repository.get_transaction())
1231
return w.annotate_iter(self.inventory[file_id].revision)
1233
def _comparison_data(self, entry, path):
1234
"""See Tree._comparison_data."""
1236
return None, False, None
1237
# trust the entry as RevisionTree does, but this may not be
1238
# sensible: the entry might not have come from us?
1239
return entry.kind, entry.executable, None
1241
def _file_size(self, entry, stat_value):
1242
return entry.text_size
1244
def filter_unversioned_files(self, paths):
1245
"""Filter out paths that are not versioned.
1247
:return: set of paths.
1249
pred = self.has_filename
1250
return set((p for p in paths if not pred(p)))
1252
def get_root_id(self):
1253
return self.path2id('')
1255
def _get_parent_index(self):
1256
"""Return the index in the dirstate referenced by this tree."""
1257
return self._dirstate.get_parent_ids().index(self._revision_id) + 1
1259
def _get_entry(self, file_id=None, path=None):
1260
"""Get the dirstate row for file_id or path.
1262
If either file_id or path is supplied, it is used as the key to lookup.
1263
If both are supplied, the fastest lookup is used, and an error is
1264
raised if they do not both point at the same row.
1266
:param file_id: An optional unicode file_id to be looked up.
1267
:param path: An optional unicode path to be looked up.
1268
:return: The dirstate row tuple for path/file_id, or (None, None)
1270
if file_id is None and path is None:
1271
raise errors.BzrError('must supply file_id or path')
1272
file_id = osutils.safe_file_id(file_id)
1273
if path is not None:
1274
path = path.encode('utf8')
1275
parent_index = self._get_parent_index()
1276
return self._dirstate._get_entry(parent_index, fileid_utf8=file_id, path_utf8=path)
1278
def _generate_inventory(self):
1279
"""Create and set self.inventory from the dirstate object.
1281
(So this is only called the first time the inventory is requested for
1282
this tree; it then remains in memory until it's out of date.)
1284
This is relatively expensive: we have to walk the entire dirstate.
1286
assert self._locked, 'cannot generate inventory of an unlocked '\
1287
'dirstate revision tree'
1288
# separate call for profiling - makes it clear where the costs are.
1289
self._dirstate._read_dirblocks_if_needed()
1290
assert self._revision_id in self._dirstate.get_parent_ids(), \
1291
'parent %s has disappeared from %s' % (
1292
self._revision_id, self._dirstate.get_parent_ids())
1293
parent_index = self._dirstate.get_parent_ids().index(self._revision_id) + 1
1294
# This is identical now to the WorkingTree _generate_inventory except
1295
# for the tree index use.
1296
root_key, current_entry = self._dirstate._get_entry(parent_index, path_utf8='')
1297
current_id = root_key[2]
1298
assert current_entry[parent_index][0] == 'd'
1299
inv = Inventory(root_id=current_id, revision_id=self._revision_id)
1300
inv.root.revision = current_entry[parent_index][4]
1301
# Turn some things into local variables
1302
minikind_to_kind = dirstate.DirState._minikind_to_kind
1303
factory = entry_factory
1304
utf8_decode = cache_utf8._utf8_decode
1305
inv_byid = inv._byid
1306
# we could do this straight out of the dirstate; it might be fast
1307
# and should be profiled - RBC 20070216
1308
parent_ies = {'' : inv.root}
1309
for block in self._dirstate._dirblocks[1:]: #skip root
1312
parent_ie = parent_ies[dirname]
1314
# all the paths in this block are not versioned in this tree
1316
for key, entry in block[1]:
1317
minikind, fingerprint, size, executable, revid = entry[parent_index]
1318
if minikind in ('a', 'r'): # absent, relocated
1322
name_unicode = utf8_decode(name)[0]
1324
kind = minikind_to_kind[minikind]
1325
inv_entry = factory[kind](file_id, name_unicode,
1327
inv_entry.revision = revid
1329
inv_entry.executable = executable
1330
inv_entry.text_size = size
1331
inv_entry.text_sha1 = fingerprint
1332
elif kind == 'directory':
1333
parent_ies[(dirname + '/' + name).strip('/')] = inv_entry
1334
elif kind == 'symlink':
1335
inv_entry.executable = False
1336
inv_entry.text_size = size
1337
inv_entry.symlink_target = utf8_decode(fingerprint)[0]
1338
elif kind == 'tree-reference':
1339
inv_entry.reference_revision = fingerprint
1341
raise AssertionError("cannot convert entry %r into an InventoryEntry"
1343
# These checks cost us around 40ms on a 55k entry tree
1344
assert file_id not in inv_byid
1345
assert name_unicode not in parent_ie.children
1346
inv_byid[file_id] = inv_entry
1347
parent_ie.children[name_unicode] = inv_entry
1348
self._inventory = inv
1350
def get_file_mtime(self, file_id, path=None):
1351
"""Return the modification time for this record.
1353
We return the timestamp of the last-changed revision.
1355
# Make sure the file exists
1356
entry = self._get_entry(file_id, path=path)
1357
if entry == (None, None): # do we raise?
1359
parent_index = self._get_parent_index()
1360
last_changed_revision = entry[1][parent_index][4]
1361
return self._repository.get_revision(last_changed_revision).timestamp
1363
def get_file_sha1(self, file_id, path=None, stat_value=None):
1364
entry = self._get_entry(file_id=file_id, path=path)
1365
parent_index = self._get_parent_index()
1366
parent_details = entry[1][parent_index]
1367
if parent_details[0] == 'f':
1368
return parent_details[1]
1371
def get_file(self, file_id):
1372
return StringIO(self.get_file_text(file_id))
1374
def get_file_lines(self, file_id):
1375
ie = self.inventory[file_id]
1376
return self._repository.weave_store.get_weave(file_id,
1377
self._repository.get_transaction()).get_lines(ie.revision)
1379
def get_file_size(self, file_id):
1380
return self.inventory[file_id].text_size
1382
def get_file_text(self, file_id):
1383
return ''.join(self.get_file_lines(file_id))
1385
def get_symlink_target(self, file_id):
1386
entry = self._get_entry(file_id=file_id)
1387
parent_index = self._get_parent_index()
1388
if entry[1][parent_index][0] != 'l':
1391
# At present, none of the tree implementations supports non-ascii
1392
# symlink targets. So we will just assume that the dirstate path is
1394
return entry[1][parent_index][1]
1396
def get_revision_id(self):
1397
"""Return the revision id for this tree."""
1398
return self._revision_id
1400
def _get_inventory(self):
1401
if self._inventory is not None:
1402
return self._inventory
1403
self._must_be_locked()
1404
self._generate_inventory()
1405
return self._inventory
1407
inventory = property(_get_inventory,
1408
doc="Inventory of this Tree")
1410
def get_parent_ids(self):
1411
"""The parents of a tree in the dirstate are not cached."""
1412
return self._repository.get_revision(self._revision_id).parent_ids
1414
def has_filename(self, filename):
1415
return bool(self.path2id(filename))
1417
def kind(self, file_id):
1418
return self.inventory[file_id].kind
1420
def is_executable(self, file_id, path=None):
1421
ie = self.inventory[file_id]
1422
if ie.kind != "file":
1424
return ie.executable
1426
def list_files(self, include_root=False):
1427
# We use a standard implementation, because DirStateRevisionTree is
1428
# dealing with one of the parents of the current state
1429
inv = self._get_inventory()
1430
entries = inv.iter_entries()
1431
if self.inventory.root is not None and not include_root:
1433
for path, entry in entries:
1434
yield path, 'V', entry.kind, entry.file_id, entry
1436
def lock_read(self):
1437
"""Lock the tree for a set of operations."""
1438
if not self._locked:
1439
self._repository.lock_read()
1440
if self._dirstate._lock_token is None:
1441
self._dirstate.lock_read()
1442
self._dirstate_locked = True
1445
def _must_be_locked(self):
1446
if not self._locked:
1447
raise errors.ObjectNotLocked(self)
1450
def path2id(self, path):
1451
"""Return the id for path in this tree."""
1452
# lookup by path: faster than splitting and walking the ivnentory.
1453
entry = self._get_entry(path=path)
1454
if entry == (None, None):
1459
"""Unlock, freeing any cache memory used during the lock."""
1460
# outside of a lock, the inventory is suspect: release it.
1462
if not self._locked:
1463
self._inventory = None
1465
if self._dirstate_locked:
1466
self._dirstate.unlock()
1467
self._dirstate_locked = False
1468
self._repository.unlock()
1470
def walkdirs(self, prefix=""):
1471
# TODO: jam 20070215 This is the cheap way by cheating and using the
1472
# RevisionTree implementation.
1473
# This should be cleaned up to use the much faster Dirstate code
1474
# This is a little tricky, though, because the dirstate is
1475
# indexed by current path, not by parent path.
1476
# So for now, we just build up the parent inventory, and extract
1477
# it the same way RevisionTree does.
1478
_directory = 'directory'
1479
inv = self._get_inventory()
1480
top_id = inv.path2id(prefix)
1484
pending = [(prefix, top_id)]
1487
relpath, file_id = pending.pop()
1488
# 0 - relpath, 1- file-id
1490
relroot = relpath + '/'
1493
# FIXME: stash the node in pending
1494
entry = inv[file_id]
1495
for name, child in entry.sorted_children():
1496
toppath = relroot + name
1497
dirblock.append((toppath, name, child.kind, None,
1498
child.file_id, child.kind
1500
yield (relpath, entry.file_id), dirblock
1501
# push the user specified dirs from dirblock
1502
for dir in reversed(dirblock):
1503
if dir[2] == _directory:
1504
pending.append((dir[0], dir[4]))
1507
class InterDirStateTree(InterTree):
1508
"""Fast path optimiser for changes_from with dirstate trees."""
1510
def __init__(self, source, target):
1511
super(InterDirStateTree, self).__init__(source, target)
1512
if not InterDirStateTree.is_compatible(source, target):
1513
raise Exception, "invalid source %r and target %r" % (source, target)
1516
def make_source_parent_tree(source, target):
1517
"""Change the source tree into a parent of the target."""
1518
revid = source.commit('record tree')
1519
target.branch.repository.fetch(source.branch.repository, revid)
1520
target.set_parent_ids([revid])
1521
return target.basis_tree(), target
1523
_matching_from_tree_format = WorkingTreeFormat4()
1524
_matching_to_tree_format = WorkingTreeFormat4()
1525
_test_mutable_trees_to_test_trees = make_source_parent_tree
1527
def _iter_changes(self, include_unchanged=False,
1528
specific_files=None, pb=None, extra_trees=[],
1529
require_versioned=True, want_unversioned=False):
1530
"""Return the changes from source to target.
1532
:return: An iterator that yields tuples. See InterTree._iter_changes
1534
:param specific_files: An optional list of file paths to restrict the
1535
comparison to. When mapping filenames to ids, all matches in all
1536
trees (including optional extra_trees) are used, and all children of
1537
matched directories are included.
1538
:param include_unchanged: An optional boolean requesting the inclusion of
1539
unchanged entries in the result.
1540
:param extra_trees: An optional list of additional trees to use when
1541
mapping the contents of specific_files (paths) to file_ids.
1542
:param require_versioned: If True, all files in specific_files must be
1543
versioned in one of source, target, extra_trees or
1544
PathsNotVersionedError is raised.
1545
:param want_unversioned: Should unversioned files be returned in the
1546
output. An unversioned file is defined as one with (False, False)
1547
for the versioned pair.
1549
utf8_decode = cache_utf8._utf8_decode_with_None
1550
_minikind_to_kind = dirstate.DirState._minikind_to_kind
1551
# NB: show_status depends on being able to pass in non-versioned files
1552
# and report them as unknown
1553
# TODO: handle extra trees in the dirstate.
1554
# TODO: handle comparisons as an empty tree as a different special
1555
# case? mbp 20070226
1556
if extra_trees or (self.source._revision_id == NULL_REVISION):
1557
# we can't fast-path these cases (yet)
1558
for f in super(InterDirStateTree, self)._iter_changes(
1559
include_unchanged, specific_files, pb, extra_trees,
1563
parent_ids = self.target.get_parent_ids()
1564
assert (self.source._revision_id in parent_ids), \
1565
"revision {%s} is not stored in {%s}, but %s " \
1566
"can only be used for trees stored in the dirstate" \
1567
% (self.source._revision_id, self.target, self._iter_changes)
1569
if self.source._revision_id == NULL_REVISION:
1571
indices = (target_index,)
1573
assert (self.source._revision_id in parent_ids), \
1574
"Failure: source._revision_id: %s not in target.parent_ids(%s)" % (
1575
self.source._revision_id, parent_ids)
1576
source_index = 1 + parent_ids.index(self.source._revision_id)
1577
indices = (source_index,target_index)
1578
# -- make all specific_files utf8 --
1580
specific_files_utf8 = set()
1581
for path in specific_files:
1582
specific_files_utf8.add(path.encode('utf8'))
1583
specific_files = specific_files_utf8
1585
specific_files = set([''])
1586
# -- specific_files is now a utf8 path set --
1587
# -- get the state object and prepare it.
1588
state = self.target.current_dirstate()
1589
state._read_dirblocks_if_needed()
1590
def _entries_for_path(path):
1591
"""Return a list with all the entries that match path for all ids.
1593
dirname, basename = os.path.split(path)
1594
key = (dirname, basename, '')
1595
block_index, present = state._find_block_index_from_key(key)
1597
# the block which should contain path is absent.
1600
block = state._dirblocks[block_index][1]
1601
entry_index, _ = state._find_entry_index(key, block)
1602
# we may need to look at multiple entries at this path: walk while the specific_files match.
1603
while (entry_index < len(block) and
1604
block[entry_index][0][0:2] == key[0:2]):
1605
result.append(block[entry_index])
1608
if require_versioned:
1609
# -- check all supplied paths are versioned in a search tree. --
1610
all_versioned = True
1611
for path in specific_files:
1612
path_entries = _entries_for_path(path)
1613
if not path_entries:
1614
# this specified path is not present at all: error
1615
all_versioned = False
1617
found_versioned = False
1618
# for each id at this path
1619
for entry in path_entries:
1621
for index in indices:
1622
if entry[1][index][0] != 'a': # absent
1623
found_versioned = True
1624
# all good: found a versioned cell
1626
if not found_versioned:
1627
# none of the indexes was not 'absent' at all ids for this
1629
all_versioned = False
1631
if not all_versioned:
1632
raise errors.PathsNotVersionedError(specific_files)
1633
# -- remove redundancy in supplied specific_files to prevent over-scanning --
1634
search_specific_files = set()
1635
for path in specific_files:
1636
other_specific_files = specific_files.difference(set([path]))
1637
if not osutils.is_inside_any(other_specific_files, path):
1638
# this is a top level path, we must check it.
1639
search_specific_files.add(path)
1641
# compare source_index and target_index at or under each element of search_specific_files.
1642
# follow the following comparison table. Note that we only want to do diff operations when
1643
# the target is fdl because thats when the walkdirs logic will have exposed the pathinfo
1647
# Source | Target | disk | action
1648
# r | fdlt | | add source to search, add id path move and perform
1649
# | | | diff check on source-target
1650
# r | fdlt | a | dangling file that was present in the basis.
1652
# r | a | | add source to search
1654
# r | r | | this path is present in a non-examined tree, skip.
1655
# r | r | a | this path is present in a non-examined tree, skip.
1656
# a | fdlt | | add new id
1657
# a | fdlt | a | dangling locally added file, skip
1658
# a | a | | not present in either tree, skip
1659
# a | a | a | not present in any tree, skip
1660
# a | r | | not present in either tree at this path, skip as it
1661
# | | | may not be selected by the users list of paths.
1662
# a | r | a | not present in either tree at this path, skip as it
1663
# | | | may not be selected by the users list of paths.
1664
# fdlt | fdlt | | content in both: diff them
1665
# fdlt | fdlt | a | deleted locally, but not unversioned - show as deleted ?
1666
# fdlt | a | | unversioned: output deleted id for now
1667
# fdlt | a | a | unversioned and deleted: output deleted id
1668
# fdlt | r | | relocated in this tree, so add target to search.
1669
# | | | Dont diff, we will see an r,fd; pair when we reach
1670
# | | | this id at the other path.
1671
# fdlt | r | a | relocated in this tree, so add target to search.
1672
# | | | Dont diff, we will see an r,fd; pair when we reach
1673
# | | | this id at the other path.
1675
# for all search_indexs in each path at or under each element of
1676
# search_specific_files, if the detail is relocated: add the id, and add the
1677
# relocated path as one to search if its not searched already. If the
1678
# detail is not relocated, add the id.
1679
searched_specific_files = set()
1680
NULL_PARENT_DETAILS = dirstate.DirState.NULL_PARENT_DETAILS
1681
# Using a list so that we can access the values and change them in
1682
# nested scope. Each one is [path, file_id, entry]
1683
last_source_parent = [None, None, None]
1684
last_target_parent = [None, None, None]
1686
def _process_entry(entry, path_info):
1687
"""Compare an entry and real disk to generate delta information.
1689
:param path_info: top_relpath, basename, kind, lstat, abspath for
1690
the path of entry. If None, then the path is considered absent.
1691
(Perhaps we should pass in a concrete entry for this ?)
1692
Basename is returned as a utf8 string because we expect this
1693
tuple will be ignored, and don't want to take the time to
1696
# TODO: when a parent has been renamed, dont emit path renames for children,
1697
if source_index is None:
1698
source_details = NULL_PARENT_DETAILS
1700
source_details = entry[1][source_index]
1701
target_details = entry[1][target_index]
1702
target_minikind = target_details[0]
1703
if path_info is not None and target_minikind in 'fdl':
1704
assert target_index == 0
1705
link_or_sha1 = state.update_entry(entry, abspath=path_info[4],
1706
stat_value=path_info[3])
1707
# The entry may have been modified by update_entry
1708
target_details = entry[1][target_index]
1709
target_minikind = target_details[0]
1712
source_minikind = source_details[0]
1713
if source_minikind in 'fdltr' and target_minikind in 'fdlt':
1714
# claimed content in both: diff
1715
# r | fdlt | | add source to search, add id path move and perform
1716
# | | | diff check on source-target
1717
# r | fdlt | a | dangling file that was present in the basis.
1719
if source_minikind in 'r':
1720
# add the source to the search path to find any children it
1721
# has. TODO ? : only add if it is a container ?
1722
if not osutils.is_inside_any(searched_specific_files,
1724
search_specific_files.add(source_details[1])
1725
# generate the old path; this is needed for stating later
1727
old_path = source_details[1]
1728
old_dirname, old_basename = os.path.split(old_path)
1729
path = pathjoin(entry[0][0], entry[0][1])
1730
old_entry = state._get_entry(source_index,
1732
# update the source details variable to be the real
1734
source_details = old_entry[1][source_index]
1735
source_minikind = source_details[0]
1737
old_dirname = entry[0][0]
1738
old_basename = entry[0][1]
1739
old_path = path = pathjoin(old_dirname, old_basename)
1740
if path_info is None:
1741
# the file is missing on disk, show as removed.
1742
content_change = True
1746
# source and target are both versioned and disk file is present.
1747
target_kind = path_info[2]
1748
if target_kind == 'directory':
1749
if source_minikind != 'd':
1750
content_change = True
1752
# directories have no fingerprint
1753
content_change = False
1755
elif target_kind == 'file':
1756
if source_minikind != 'f':
1757
content_change = True
1759
# We could check the size, but we already have the
1761
content_change = (link_or_sha1 != source_details[1])
1762
# Target details is updated at update_entry time
1764
stat.S_ISREG(path_info[3].st_mode)
1765
and stat.S_IEXEC & path_info[3].st_mode)
1766
elif target_kind == 'symlink':
1767
if source_minikind != 'l':
1768
content_change = True
1770
content_change = (link_or_sha1 != source_details[1])
1772
elif target_kind == 'tree-reference':
1773
if source_minikind != 't':
1774
content_change = True
1776
content_change = False
1778
raise Exception, "unknown kind %s" % path_info[2]
1779
# parent id is the entry for the path in the target tree
1780
if old_dirname == last_source_parent[0]:
1781
source_parent_id = last_source_parent[1]
1783
source_parent_entry = state._get_entry(source_index,
1784
path_utf8=old_dirname)
1785
source_parent_id = source_parent_entry[0][2]
1786
if source_parent_id == entry[0][2]:
1787
# This is the root, so the parent is None
1788
source_parent_id = None
1790
last_source_parent[0] = old_dirname
1791
last_source_parent[1] = source_parent_id
1792
last_source_parent[2] = source_parent_entry
1794
new_dirname = entry[0][0]
1795
if new_dirname == last_target_parent[0]:
1796
target_parent_id = last_target_parent[1]
1798
# TODO: We don't always need to do the lookup, because the
1799
# parent entry will be the same as the source entry.
1800
target_parent_entry = state._get_entry(target_index,
1801
path_utf8=new_dirname)
1802
target_parent_id = target_parent_entry[0][2]
1803
if target_parent_id == entry[0][2]:
1804
# This is the root, so the parent is None
1805
target_parent_id = None
1807
last_target_parent[0] = new_dirname
1808
last_target_parent[1] = target_parent_id
1809
last_target_parent[2] = target_parent_entry
1811
source_exec = source_details[3]
1812
return ((entry[0][2], (old_path, path), content_change,
1814
(source_parent_id, target_parent_id),
1815
(old_basename, entry[0][1]),
1816
(_minikind_to_kind[source_minikind], target_kind),
1817
(source_exec, target_exec)),)
1818
elif source_minikind in 'a' and target_minikind in 'fdlt':
1819
# looks like a new file
1820
if path_info is not None:
1821
path = pathjoin(entry[0][0], entry[0][1])
1822
# parent id is the entry for the path in the target tree
1823
# TODO: these are the same for an entire directory: cache em.
1824
parent_id = state._get_entry(target_index,
1825
path_utf8=entry[0][0])[0][2]
1826
if parent_id == entry[0][2]:
1829
stat.S_ISREG(path_info[3].st_mode)
1830
and stat.S_IEXEC & path_info[3].st_mode)
1831
return ((entry[0][2], (None, path), True,
1834
(None, entry[0][1]),
1835
(None, path_info[2]),
1836
(None, target_exec)),)
1838
# but its not on disk: we deliberately treat this as just
1839
# never-present. (Why ?! - RBC 20070224)
1841
elif source_minikind in 'fdlt' and target_minikind in 'a':
1842
# unversioned, possibly, or possibly not deleted: we dont care.
1843
# if its still on disk, *and* theres no other entry at this
1844
# path [we dont know this in this routine at the moment -
1845
# perhaps we should change this - then it would be an unknown.
1846
old_path = pathjoin(entry[0][0], entry[0][1])
1847
# parent id is the entry for the path in the target tree
1848
parent_id = state._get_entry(source_index, path_utf8=entry[0][0])[0][2]
1849
if parent_id == entry[0][2]:
1851
return ((entry[0][2], (old_path, None), True,
1854
(entry[0][1], None),
1855
(_minikind_to_kind[source_minikind], None),
1856
(source_details[3], None)),)
1857
elif source_minikind in 'fdlt' and target_minikind in 'r':
1858
# a rename; could be a true rename, or a rename inherited from
1859
# a renamed parent. TODO: handle this efficiently. Its not
1860
# common case to rename dirs though, so a correct but slow
1861
# implementation will do.
1862
if not osutils.is_inside_any(searched_specific_files, target_details[1]):
1863
search_specific_files.add(target_details[1])
1864
elif source_minikind in 'r' and target_minikind in 'r':
1865
# neither of the selected trees contain this file,
1866
# so skip over it. This is not currently directly tested, but
1867
# is indirectly via test_too_much.TestCommands.test_conflicts.
1870
print "*******", source_minikind, target_minikind
1871
import pdb;pdb.set_trace()
1873
while search_specific_files:
1874
# TODO: the pending list should be lexically sorted?
1875
current_root = search_specific_files.pop()
1876
searched_specific_files.add(current_root)
1877
# process the entries for this containing directory: the rest will be
1878
# found by their parents recursively.
1879
root_entries = _entries_for_path(current_root)
1880
root_abspath = self.target.abspath(current_root)
1882
root_stat = os.lstat(root_abspath)
1884
if e.errno == errno.ENOENT:
1885
# the path does not exist: let _process_entry know that.
1886
root_dir_info = None
1888
# some other random error: hand it up.
1891
root_dir_info = ('', current_root,
1892
osutils.file_kind_from_stat_mode(root_stat.st_mode), root_stat,
1894
if not root_entries and not root_dir_info:
1895
# this specified path is not present at all, skip it.
1897
path_handled = False
1898
for entry in root_entries:
1899
for result in _process_entry(entry, root_dir_info):
1900
# this check should probably be outside the loop: one
1901
# 'iterate two trees' api, and then _iter_changes filters
1902
# unchanged pairs. - RBC 20070226
1904
if (include_unchanged
1905
or result[2] # content change
1906
or result[3][0] != result[3][1] # versioned status
1907
or result[4][0] != result[4][1] # parent id
1908
or result[5][0] != result[5][1] # name
1909
or result[6][0] != result[6][1] # kind
1910
or result[7][0] != result[7][1] # executable
1912
result = (result[0],
1913
((utf8_decode(result[1][0])[0]),
1914
utf8_decode(result[1][1])[0]),) + result[2:]
1916
if want_unversioned and not path_handled:
1917
new_executable = bool(
1918
stat.S_ISREG(root_dir_info[3].st_mode)
1919
and stat.S_IEXEC & root_dir_info[3].st_mode)
1920
yield (None, (None, current_root), True, (False, False),
1922
(None, splitpath(current_root)[-1]),
1923
(None, root_dir_info[2]), (None, new_executable))
1924
dir_iterator = osutils._walkdirs_utf8(root_abspath, prefix=current_root)
1925
initial_key = (current_root, '', '')
1926
block_index, _ = state._find_block_index_from_key(initial_key)
1927
if block_index == 0:
1928
# we have processed the total root already, but because the
1929
# initial key matched it we should skip it here.
1932
current_dir_info = dir_iterator.next()
1934
if e.errno in (errno.ENOENT, errno.ENOTDIR):
1935
# there may be directories in the inventory even though
1936
# this path is not a file on disk: so mark it as end of
1938
current_dir_info = None
1942
if current_dir_info[0][0] == '':
1943
# remove .bzr from iteration
1944
bzr_index = bisect_left(current_dir_info[1], ('.bzr',))
1945
assert current_dir_info[1][bzr_index][0] == '.bzr'
1946
del current_dir_info[1][bzr_index]
1947
# walk until both the directory listing and the versioned metadata
1948
# are exhausted. TODO: reevaluate this, perhaps we should stop when
1949
# the versioned data runs out.
1950
if (block_index < len(state._dirblocks) and
1951
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
1952
current_block = state._dirblocks[block_index]
1954
current_block = None
1955
while (current_dir_info is not None or
1956
current_block is not None):
1957
if (current_dir_info and current_block
1958
and current_dir_info[0][0] != current_block[0]):
1959
if current_dir_info[0][0] < current_block[0] :
1960
# import pdb; pdb.set_trace()
1961
# print 'unversioned dir'
1962
# filesystem data refers to paths not covered by the dirblock.
1963
# this has two possibilities:
1964
# A) it is versioned but empty, so there is no block for it
1965
# B) it is not versioned.
1966
# in either case it was processed by the containing directories walk:
1967
# if it is root/foo, when we walked root we emitted it,
1968
# or if we ere given root/foo to walk specifically, we
1969
# emitted it when checking the walk-root entries
1970
# advance the iterator and loop - we dont need to emit it.
1972
current_dir_info = dir_iterator.next()
1973
except StopIteration:
1974
current_dir_info = None
1976
# We have a dirblock entry for this location, but there
1977
# is no filesystem path for this. This is most likely
1978
# because a directory was removed from the disk.
1979
# We don't have to report the missing directory,
1980
# because that should have already been handled, but we
1981
# need to handle all of the files that are contained
1983
for current_entry in current_block[1]:
1984
# entry referring to file not present on disk.
1985
# advance the entry only, after processing.
1986
for result in _process_entry(current_entry, None):
1987
# this check should probably be outside the loop: one
1988
# 'iterate two trees' api, and then _iter_changes filters
1989
# unchanged pairs. - RBC 20070226
1990
if (include_unchanged
1991
or result[2] # content change
1992
or result[3][0] != result[3][1] # versioned status
1993
or result[4][0] != result[4][1] # parent id
1994
or result[5][0] != result[5][1] # name
1995
or result[6][0] != result[6][1] # kind
1996
or result[7][0] != result[7][1] # executable
1998
result = (result[0],
1999
((utf8_decode(result[1][0])[0]),
2000
utf8_decode(result[1][1])[0]),) + result[2:]
2003
if (block_index < len(state._dirblocks) and
2004
osutils.is_inside(current_root,
2005
state._dirblocks[block_index][0])):
2006
current_block = state._dirblocks[block_index]
2008
current_block = None
2011
if current_block and entry_index < len(current_block[1]):
2012
current_entry = current_block[1][entry_index]
2014
current_entry = None
2015
advance_entry = True
2017
if current_dir_info and path_index < len(current_dir_info[1]):
2018
current_path_info = current_dir_info[1][path_index]
2020
current_path_info = None
2022
path_handled = False
2023
while (current_entry is not None or
2024
current_path_info is not None):
2025
if current_entry is None:
2026
# the check for path_handled when the path is adnvaced
2027
# will yield this path if needed.
2029
elif current_path_info is None:
2030
# no path is fine: the per entry code will handle it.
2031
for result in _process_entry(current_entry, current_path_info):
2032
# this check should probably be outside the loop: one
2033
# 'iterate two trees' api, and then _iter_changes filters
2034
# unchanged pairs. - RBC 20070226
2035
if (include_unchanged
2036
or result[2] # content change
2037
or result[3][0] != result[3][1] # versioned status
2038
or result[4][0] != result[4][1] # parent id
2039
or result[5][0] != result[5][1] # name
2040
or result[6][0] != result[6][1] # kind
2041
or result[7][0] != result[7][1] # executable
2043
result = (result[0],
2044
((utf8_decode(result[1][0])[0]),
2045
utf8_decode(result[1][1])[0]),) + result[2:]
2047
elif current_entry[0][1] != current_path_info[1]:
2048
if current_path_info[1] < current_entry[0][1]:
2049
# extra file on disk: pass for now, but only
2050
# increment the path, not the entry
2051
# import pdb; pdb.set_trace()
2052
# print 'unversioned file'
2053
advance_entry = False
2055
# entry referring to file not present on disk.
2056
# advance the entry only, after processing.
2057
for result in _process_entry(current_entry, None):
2058
# this check should probably be outside the loop: one
2059
# 'iterate two trees' api, and then _iter_changes filters
2060
# unchanged pairs. - RBC 20070226
2062
if (include_unchanged
2063
or result[2] # content change
2064
or result[3][0] != result[3][1] # versioned status
2065
or result[4][0] != result[4][1] # parent id
2066
or result[5][0] != result[5][1] # name
2067
or result[6][0] != result[6][1] # kind
2068
or result[7][0] != result[7][1] # executable
2070
result = (result[0],
2071
((utf8_decode(result[1][0])[0]),
2072
utf8_decode(result[1][1])[0]),) + result[2:]
2074
advance_path = False
2076
for result in _process_entry(current_entry, current_path_info):
2077
# this check should probably be outside the loop: one
2078
# 'iterate two trees' api, and then _iter_changes filters
2079
# unchanged pairs. - RBC 20070226
2081
if (include_unchanged
2082
or result[2] # content change
2083
or result[3][0] != result[3][1] # versioned status
2084
or result[4][0] != result[4][1] # parent id
2085
or result[5][0] != result[5][1] # name
2086
or result[6][0] != result[6][1] # kind
2087
or result[7][0] != result[7][1] # executable
2089
result = (result[0],
2090
((utf8_decode(result[1][0])[0]),
2091
utf8_decode(result[1][1])[0]),) + result[2:]
2093
if advance_entry and current_entry is not None:
2095
if entry_index < len(current_block[1]):
2096
current_entry = current_block[1][entry_index]
2098
current_entry = None
2100
advance_entry = True # reset the advance flaga
2101
if advance_path and current_path_info is not None:
2102
if not path_handled:
2103
# unversioned in all regards
2104
if want_unversioned:
2105
new_executable = bool(
2106
stat.S_ISREG(current_path_info[3].st_mode)
2107
and stat.S_IEXEC & current_path_info[3].st_mode)
2108
if want_unversioned:
2109
yield (None, (None, current_path_info[0]),
2113
(None, current_path_info[1]),
2114
(None, current_path_info[2]),
2115
(None, new_executable))
2116
# dont descend into this unversioned path if it is
2118
if current_path_info[2] == 'directory':
2119
del current_dir_info[1][path_index]
2122
if path_index < len(current_dir_info[1]):
2123
current_path_info = current_dir_info[1][path_index]
2125
current_path_info = None
2126
path_handled = False
2128
advance_path = True # reset the advance flagg.
2129
if current_block is not None:
2131
if (block_index < len(state._dirblocks) and
2132
osutils.is_inside(current_root, state._dirblocks[block_index][0])):
2133
current_block = state._dirblocks[block_index]
2135
current_block = None
2136
if current_dir_info is not None:
2138
current_dir_info = dir_iterator.next()
2139
except StopIteration:
2140
current_dir_info = None
2144
def is_compatible(source, target):
2145
# the target must be a dirstate working tree
2146
if not isinstance(target, WorkingTree4):
2148
# the source must be a revtreee or dirstate rev tree.
2149
if not isinstance(source,
2150
(revisiontree.RevisionTree, DirStateRevisionTree)):
2152
# the source revid must be in the target dirstate
2153
if not (source._revision_id == NULL_REVISION or
2154
source._revision_id in target.get_parent_ids()):
2155
# TODO: what about ghosts? it may well need to
2156
# check for them explicitly.
2160
InterTree.register_optimiser(InterDirStateTree)