13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
17
"""WorkingTree object and friends.
19
19
A WorkingTree represents the editable working copy of a branch.
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
20
Operations which represent the WorkingTree are also done here,
21
such as renaming or adding files. The WorkingTree has an inventory
22
which is updated by these operations. A commit produces a
23
23
new revision based on the workingtree and its inventory.
25
25
At the moment every WorkingTree has its own branch. Remote
51
59
conflicts as _mod_conflicts,
60
67
revision as _mod_revision,
69
79
import bzrlib.branch
70
80
from bzrlib.transport import get_transport
71
from bzrlib.workingtree_4 import (
82
from bzrlib.workingtree_4 import WorkingTreeFormat4
78
85
from bzrlib import symbol_versioning
79
86
from bzrlib.decorators import needs_read_lock, needs_write_lock
80
from bzrlib.lock import LogicalLockResult
81
from bzrlib.lockable_files import LockableFiles
87
from bzrlib.inventory import InventoryEntry, Inventory, ROOT_ID, TreeReference
88
from bzrlib.lockable_files import LockableFiles, TransportLock
82
89
from bzrlib.lockdir import LockDir
83
90
import bzrlib.mutabletree
84
91
from bzrlib.mutabletree import needs_tree_write_lock
85
92
from bzrlib import osutils
86
93
from bzrlib.osutils import (
94
103
supports_executable,
96
from bzrlib.filters import filtered_input_file
97
105
from bzrlib.trace import mutter, note
98
106
from bzrlib.transport.local import LocalTransport
99
from bzrlib.revision import CURRENT_REVISION
107
from bzrlib.progress import DummyProgress, ProgressPhase
108
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
100
109
from bzrlib.rio import RioReader, rio_file, Stanza
101
from bzrlib.symbol_versioning import (
103
DEPRECATED_PARAMETER,
110
from bzrlib.symbol_versioning import (deprecated_passed,
113
DEPRECATED_PARAMETER,
107
117
MERGE_MODIFIED_HEADER_1 = "BZR merge-modified list format 1"
108
# TODO: Modifying the conflict objects or their type is currently nearly
109
# impossible as there is no clear relationship between the working tree format
110
# and the conflict list file format.
111
118
CONFLICT_HEADER_1 = "BZR conflict list format 1"
113
120
ERROR_PATH_NOT_FOUND = 3 # WindowsError errno code, equivalent to ENOENT
286
278
self._control_files.break_lock()
287
279
self.branch.break_lock()
289
def _get_check_refs(self):
290
"""Return the references needed to perform a check of this tree.
292
The default implementation returns no refs, and is only suitable for
293
trees that have no local caching and can commit on ghosts at any time.
295
:seealso: bzrlib.check for details about check_refs.
299
281
def requires_rich_root(self):
300
282
return self._format.requires_rich_root
302
284
def supports_tree_reference(self):
305
def supports_content_filtering(self):
306
return self._format.supports_content_filtering()
308
def supports_views(self):
309
return self.views.supports_views()
311
287
def _set_inventory(self, inv, dirty):
312
288
"""Set the internal cached inventory.
447
423
def has_filename(self, filename):
448
424
return osutils.lexists(self.abspath(filename))
450
def get_file(self, file_id, path=None, filtered=True):
451
return self.get_file_with_stat(file_id, path, filtered=filtered)[0]
426
def get_file(self, file_id, path=None):
427
return self.get_file_with_stat(file_id, path)[0]
453
def get_file_with_stat(self, file_id, path=None, filtered=True,
455
"""See Tree.get_file_with_stat."""
429
def get_file_with_stat(self, file_id, path=None, _fstat=os.fstat):
430
"""See MutableTree.get_file_with_stat."""
457
432
path = self.id2path(file_id)
458
file_obj = self.get_file_byname(path, filtered=False)
459
stat_value = _fstat(file_obj.fileno())
460
if filtered and self.supports_content_filtering():
461
filters = self._content_filter_stack(path)
462
file_obj = filtered_input_file(file_obj, filters)
463
return (file_obj, stat_value)
465
def get_file_text(self, file_id, path=None, filtered=True):
466
my_file = self.get_file(file_id, path=path, filtered=filtered)
468
return my_file.read()
472
def get_file_byname(self, filename, filtered=True):
473
path = self.abspath(filename)
475
if filtered and self.supports_content_filtering():
476
filters = self._content_filter_stack(filename)
477
return filtered_input_file(f, filters)
481
def get_file_lines(self, file_id, path=None, filtered=True):
433
file_obj = self.get_file_byname(path)
434
return (file_obj, _fstat(file_obj.fileno()))
436
def get_file_byname(self, filename):
437
return file(self.abspath(filename), 'rb')
439
def get_file_lines(self, file_id, path=None):
482
440
"""See Tree.get_file_lines()"""
483
file = self.get_file(file_id, path, filtered=filtered)
441
file = self.get_file(file_id, path)
485
443
return file.readlines()
497
455
incorrectly attributed to CURRENT_REVISION (but after committing, the
498
456
attribution will be correct).
500
maybe_file_parent_keys = []
501
for parent_id in self.get_parent_ids():
503
parent_tree = self.revision_tree(parent_id)
504
except errors.NoSuchRevisionInTree:
505
parent_tree = self.branch.repository.revision_tree(parent_id)
506
parent_tree.lock_read()
508
if file_id not in parent_tree:
510
ie = parent_tree.inventory[file_id]
511
if ie.kind != 'file':
512
# Note: this is slightly unnecessary, because symlinks and
513
# directories have a "text" which is the empty text, and we
514
# know that won't mess up annotations. But it seems cleaner
516
parent_text_key = (file_id, ie.revision)
517
if parent_text_key not in maybe_file_parent_keys:
518
maybe_file_parent_keys.append(parent_text_key)
521
graph = _mod_graph.Graph(self.branch.repository.texts)
522
heads = graph.heads(maybe_file_parent_keys)
523
file_parent_keys = []
524
for key in maybe_file_parent_keys:
526
file_parent_keys.append(key)
528
# Now we have the parents of this content
529
annotator = self.branch.repository.texts.get_annotator()
530
text = self.get_file_text(file_id)
531
this_key =(file_id, default_revision)
532
annotator.add_special_text(this_key, file_parent_keys, text)
533
annotations = [(key[-1], line)
534
for key, line in annotator.annotate_flat(this_key)]
458
basis = self.basis_tree()
461
changes = self.iter_changes(basis, True, [self.id2path(file_id)],
462
require_versioned=True).next()
463
changed_content, kind = changes[2], changes[6]
464
if not changed_content:
465
return basis.annotate_iter(file_id)
469
if kind[0] != 'file':
472
old_lines = list(basis.annotate_iter(file_id))
474
for tree in self.branch.repository.revision_trees(
475
self.get_parent_ids()[1:]):
476
if file_id not in tree:
478
old.append(list(tree.annotate_iter(file_id)))
479
return annotate.reannotate(old, self.get_file(file_id).readlines(),
537
484
def _get_ancestors(self, default_revision):
538
485
ancestors = set([default_revision])
772
717
kind = 'tree-reference'
773
718
return kind, None, None, None
774
719
elif kind == 'symlink':
775
target = osutils.readlink(abspath)
776
return ('symlink', None, None, target)
720
return ('symlink', None, None, os.readlink(abspath))
778
722
return (kind, None, None, None)
780
def _file_content_summary(self, path, stat_result):
781
size = stat_result.st_size
782
executable = self._is_executable_from_path_and_stat(path, stat_result)
783
# try for a stat cache lookup
784
return ('file', size, executable, self._sha_from_stat(
787
724
def _check_parents_for_ghosts(self, revision_ids, allow_leftmost_as_ghost):
788
725
"""Common ghost checking functionality from set_parent_*.
919
856
branch.last_revision().
921
858
from bzrlib.merge import Merger, Merge3Merger
922
merger = Merger(self.branch, this_tree=self)
923
# check that there are no local alterations
924
if not force and self.has_changes():
925
raise errors.UncommittedChanges(self)
926
if to_revision is None:
927
to_revision = _mod_revision.ensure_null(branch.last_revision())
928
merger.other_rev_id = to_revision
929
if _mod_revision.is_null(merger.other_rev_id):
930
raise errors.NoCommits(branch)
931
self.branch.fetch(branch, last_revision=merger.other_rev_id)
932
merger.other_basis = merger.other_rev_id
933
merger.other_tree = self.branch.repository.revision_tree(
935
merger.other_branch = branch
936
if from_revision is None:
939
merger.set_base_revision(from_revision, branch)
940
if merger.base_rev_id == merger.other_rev_id:
941
raise errors.PointlessMerge
942
merger.backup_files = False
943
if merge_type is None:
944
merger.merge_type = Merge3Merger
946
merger.merge_type = merge_type
947
merger.set_interesting_files(None)
948
merger.show_base = False
949
merger.reprocess = False
950
conflicts = merger.do_merge()
859
pb = bzrlib.ui.ui_factory.nested_progress_bar()
861
merger = Merger(self.branch, this_tree=self, pb=pb)
862
merger.pp = ProgressPhase("Merge phase", 5, pb)
863
merger.pp.next_phase()
864
# check that there are no
866
merger.check_basis(check_clean=True, require_commits=False)
867
if to_revision is None:
868
to_revision = _mod_revision.ensure_null(branch.last_revision())
869
merger.other_rev_id = to_revision
870
if _mod_revision.is_null(merger.other_rev_id):
871
raise errors.NoCommits(branch)
872
self.branch.fetch(branch, last_revision=merger.other_rev_id)
873
merger.other_basis = merger.other_rev_id
874
merger.other_tree = self.branch.repository.revision_tree(
876
merger.other_branch = branch
877
merger.pp.next_phase()
878
if from_revision is None:
881
merger.set_base_revision(from_revision, branch)
882
if merger.base_rev_id == merger.other_rev_id:
883
raise errors.PointlessMerge
884
merger.backup_files = False
885
if merge_type is None:
886
merger.merge_type = Merge3Merger
888
merger.merge_type = merge_type
889
merger.set_interesting_files(None)
890
merger.show_base = False
891
merger.reprocess = False
892
conflicts = merger.do_merge()
955
899
def merge_modified(self):
956
900
"""Return a dictionary of files modified by a merge.
958
The list is initialized by WorkingTree.set_merge_modified, which is
902
The list is initialized by WorkingTree.set_merge_modified, which is
959
903
typically called after we make some automatic updates to the tree
960
904
because of a merge.
1101
1043
tree_transport = self.bzrdir.root_transport.clone(sub_path)
1102
1044
if tree_transport.base != branch_transport.base:
1103
1045
tree_bzrdir = format.initialize_on_transport(tree_transport)
1104
branch.BranchReferenceFormat().initialize(tree_bzrdir,
1105
target_branch=new_branch)
1046
branch.BranchReferenceFormat().initialize(tree_bzrdir, new_branch)
1107
1048
tree_bzrdir = branch_bzrdir
1108
wt = tree_bzrdir.create_workingtree(_mod_revision.NULL_REVISION)
1049
wt = tree_bzrdir.create_workingtree(NULL_REVISION)
1109
1050
wt.set_parent_ids(self.get_parent_ids())
1110
1051
my_inv = self.inventory
1111
child_inv = inventory.Inventory(root_id=None)
1052
child_inv = Inventory(root_id=None)
1112
1053
new_root = my_inv[file_id]
1113
1054
my_inv.remove_recursive_id(file_id)
1114
1055
new_root.parent_id = None
1133
1074
self._serialize(self._inventory, sio)
1135
1076
self._transport.put_file('inventory', sio,
1136
mode=self.bzrdir._get_file_mode())
1077
mode=self._control_files._file_mode)
1137
1078
self._inventory_is_modified = False
1139
1080
def _kind(self, relpath):
1140
1081
return osutils.file_kind(self.abspath(relpath))
1142
def list_files(self, include_root=False, from_dir=None, recursive=True):
1143
"""List all files as (path, class, kind, id, entry).
1083
def list_files(self, include_root=False):
1084
"""Recursively list all files as (path, class, kind, id, entry).
1145
1086
Lists, but does not descend into unversioned directories.
1146
1088
This does not include files that have been deleted in this
1147
tree. Skips the control directory.
1149
:param include_root: if True, return an entry for the root
1150
:param from_dir: start from this directory or None for the root
1151
:param recursive: whether to recurse into subdirectories or not
1091
Skips the control directory.
1153
1093
# list_files is an iterator, so @needs_read_lock doesn't work properly
1154
1094
# with it. So callers should be careful to always read_lock the tree.
1169
1109
fk_entries = {'directory':TreeDirectory, 'file':TreeFile, 'symlink':TreeLink}
1171
1111
# directory file_id, relative path, absolute path, reverse sorted children
1172
if from_dir is not None:
1173
from_dir_id = inv.path2id(from_dir)
1174
if from_dir_id is None:
1175
# Directory not versioned
1177
from_dir_abspath = pathjoin(self.basedir, from_dir)
1179
from_dir_id = inv.root.file_id
1180
from_dir_abspath = self.basedir
1181
children = os.listdir(from_dir_abspath)
1112
children = os.listdir(self.basedir)
1182
1113
children.sort()
1183
# jam 20060527 The kernel sized tree seems equivalent whether we
1114
# jam 20060527 The kernel sized tree seems equivalent whether we
1184
1115
# use a deque and popleft to keep them sorted, or if we use a plain
1185
1116
# list and just reverse() them.
1186
1117
children = collections.deque(children)
1187
stack = [(from_dir_id, u'', from_dir_abspath, children)]
1118
stack = [(inv.root.file_id, u'', self.basedir, children)]
1189
1120
from_dir_id, from_dir_relpath, from_dir_abspath, children = stack[-1]
1244
1175
except KeyError:
1245
1176
yield fp[1:], c, fk, None, TreeEntry()
1248
1179
if fk != 'directory':
1251
# But do this child first if recursing down
1253
new_children = os.listdir(fap)
1255
new_children = collections.deque(new_children)
1256
stack.append((f_ie.file_id, fp, fap, new_children))
1257
# Break out of inner loop,
1258
# so that we start outer loop with child
1182
# But do this child first
1183
new_children = os.listdir(fap)
1185
new_children = collections.deque(new_children)
1186
stack.append((f_ie.file_id, fp, fap, new_children))
1187
# Break out of inner loop,
1188
# so that we start outer loop with child
1261
1191
# if we finished all children, pop it off the stack
1498
1427
from_tail = splitpath(from_rel)[-1]
1499
1428
from_id = inv.path2id(from_rel)
1500
1429
if from_id is None:
1501
# if file is missing in the inventory maybe it's in the basis_tree
1502
basis_tree = self.branch.basis_tree()
1503
from_id = basis_tree.path2id(from_rel)
1505
raise errors.BzrRenameFailedError(from_rel,to_rel,
1506
errors.NotVersionedError(path=str(from_rel)))
1507
# put entry back in the inventory so we can rename it
1508
from_entry = basis_tree.inventory[from_id].copy()
1511
from_entry = inv[from_id]
1430
raise errors.BzrRenameFailedError(from_rel,to_rel,
1431
errors.NotVersionedError(path=str(from_rel)))
1432
from_entry = inv[from_id]
1512
1433
from_parent_id = from_entry.parent_id
1513
1434
to_dir, to_tail = os.path.split(to_rel)
1514
1435
to_dir_id = inv.path2id(to_dir)
1576
1497
:raises: NoSuchId if any fileid is not currently versioned.
1578
1499
for file_id in file_ids:
1579
if file_id not in self._inventory:
1580
raise errors.NoSuchId(self, file_id)
1581
for file_id in file_ids:
1582
1500
if self._inventory.has_id(file_id):
1583
1501
self._inventory.remove_recursive_id(file_id)
1503
raise errors.NoSuchId(self, file_id)
1584
1504
if len(file_ids):
1585
# in the future this should just set a dirty bit to wait for the
1505
# in the future this should just set a dirty bit to wait for the
1586
1506
# final unlock. However, until all methods of workingtree start
1587
# with the current in -memory inventory rather than triggering
1507
# with the current in -memory inventory rather than triggering
1588
1508
# a read, it is more complex - we need to teach read_inventory
1589
1509
# to know when to read, and when to not read first... and possibly
1590
1510
# to save first when the in memory one may be corrupted.
1591
1511
# so for now, we just only write it if it is indeed dirty.
1592
1512
# - RBC 20060907
1593
1513
self._write_inventory(self._inventory)
1595
1515
def _iter_conflicts(self):
1596
1516
conflicted = set()
1597
1517
for info in self.list_files():
1606
1526
@needs_write_lock
1607
1527
def pull(self, source, overwrite=False, stop_revision=None,
1608
change_reporter=None, possible_transports=None, local=False):
1528
change_reporter=None, possible_transports=None):
1529
top_pb = bzrlib.ui.ui_factory.nested_progress_bar()
1609
1530
source.lock_read()
1532
pp = ProgressPhase("Pull phase", 2, top_pb)
1611
1534
old_revision_info = self.branch.last_revision_info()
1612
1535
basis_tree = self.basis_tree()
1613
1536
count = self.branch.pull(source, overwrite, stop_revision,
1614
possible_transports=possible_transports,
1537
possible_transports=possible_transports)
1616
1538
new_revision_info = self.branch.last_revision_info()
1617
1539
if new_revision_info != old_revision_info:
1618
1541
repository = self.branch.repository
1542
pb = bzrlib.ui.ui_factory.nested_progress_bar()
1619
1543
basis_tree.lock_read()
1621
1545
new_basis_tree = self.branch.basis_tree()
1624
1548
new_basis_tree,
1626
1550
this_tree=self,
1628
1552
change_reporter=change_reporter)
1629
basis_root_id = basis_tree.get_root_id()
1630
new_root_id = new_basis_tree.get_root_id()
1631
if basis_root_id != new_root_id:
1632
self.set_root_id(new_root_id)
1553
if (basis_tree.inventory.root is None and
1554
new_basis_tree.inventory.root is not None):
1555
self.set_root_id(new_basis_tree.get_root_id())
1634
1558
basis_tree.unlock()
1635
1559
# TODO - dedup parents list with things merged by pull ?
1636
1560
# reuse the revisiontree we merged against to set the new
1638
1562
parent_trees = [(self.branch.last_revision(), new_basis_tree)]
1639
# we have to pull the merge trees out again, because
1640
# merge_inner has set the ids. - this corner is not yet
1563
# we have to pull the merge trees out again, because
1564
# merge_inner has set the ids. - this corner is not yet
1641
1565
# layered well enough to prevent double handling.
1642
1566
# XXX TODO: Fix the double handling: telling the tree about
1643
1567
# the already known parent data is wasteful.
1803
1726
raise errors.ObjectNotLocked(self)
1805
1728
def lock_read(self):
1806
"""Lock the tree for reading.
1808
This also locks the branch, and can be unlocked via self.unlock().
1810
:return: A bzrlib.lock.LogicalLockResult.
1729
"""See Branch.lock_read, and WorkingTree.unlock."""
1812
1730
if not self.is_locked():
1813
1731
self._reset_data()
1814
1732
self.branch.lock_read()
1816
self._control_files.lock_read()
1817
return LogicalLockResult(self.unlock)
1734
return self._control_files.lock_read()
1819
1736
self.branch.unlock()
1822
1739
def lock_tree_write(self):
1823
"""See MutableTree.lock_tree_write, and WorkingTree.unlock.
1825
:return: A bzrlib.lock.LogicalLockResult.
1740
"""See MutableTree.lock_tree_write, and WorkingTree.unlock."""
1827
1741
if not self.is_locked():
1828
1742
self._reset_data()
1829
1743
self.branch.lock_read()
1831
self._control_files.lock_write()
1832
return LogicalLockResult(self.unlock)
1745
return self._control_files.lock_write()
1834
1747
self.branch.unlock()
1837
1750
def lock_write(self):
1838
"""See MutableTree.lock_write, and WorkingTree.unlock.
1840
:return: A bzrlib.lock.LogicalLockResult.
1751
"""See MutableTree.lock_write, and WorkingTree.unlock."""
1842
1752
if not self.is_locked():
1843
1753
self._reset_data()
1844
1754
self.branch.lock_write()
1846
self._control_files.lock_write()
1847
return LogicalLockResult(self.unlock)
1756
return self._control_files.lock_write()
1849
1758
self.branch.unlock()
1905
1810
# as commit already has that ready-to-use [while the format is the
1906
1811
# same, that is].
1908
# this double handles the inventory - unpack and repack -
1813
# this double handles the inventory - unpack and repack -
1909
1814
# but is easier to understand. We can/should put a conditional
1910
1815
# in here based on whether the inventory is in the latest format
1911
1816
# - perhaps we should repack all inventories on a repository
1913
1818
# the fast path is to copy the raw xml from the repository. If the
1914
# xml contains 'revision_id="', then we assume the right
1819
# xml contains 'revision_id="', then we assume the right
1915
1820
# revision_id is set. We must check for this full string, because a
1916
1821
# root node id can legitimately look like 'revision_id' but cannot
1917
1822
# contain a '"'.
1918
xml = self.branch.repository._get_inventory_xml(new_revision)
1823
xml = self.branch.repository.get_inventory_xml(new_revision)
1919
1824
firstline = xml.split('\n', 1)[0]
1920
if (not 'revision_id="' in firstline or
1825
if (not 'revision_id="' in firstline or
1921
1826
'format="7"' not in firstline):
1922
inv = self.branch.repository._serializer.read_inventory_from_string(
1827
inv = self.branch.repository.deserialise_inventory(
1924
1829
xml = self._create_basis_xml_from_inventory(new_revision, inv)
1925
1830
self._write_basis_inventory(xml)
1926
1831
except (errors.NoSuchRevision, errors.RevisionNotPresent):
1930
1835
"""Read the cached basis inventory."""
1931
1836
path = self._basis_inventory_name()
1932
1837
return self._transport.get_bytes(path)
1934
1839
@needs_read_lock
1935
1840
def read_working_inventory(self):
1936
1841
"""Read the working inventory.
1938
1843
:raises errors.InventoryModified: read_working_inventory will fail
1939
1844
when the current in memory inventory has been modified.
1941
# conceptually this should be an implementation detail of the tree.
1846
# conceptually this should be an implementation detail of the tree.
1942
1847
# XXX: Deprecate this.
1943
1848
# ElementTree does its own conversion from UTF-8, so open in
1945
1850
if self._inventory_is_modified:
1946
1851
raise errors.InventoryModified(self)
1947
f = self._transport.get('inventory')
1949
result = self._deserialize(f)
1852
result = self._deserialize(self._transport.get('inventory'))
1952
1853
self._set_inventory(result, dirty=False)
2198
2096
def unlock(self):
2199
2097
"""See Branch.unlock.
2201
2099
WorkingTree locking just uses the Branch locking facilities.
2202
2100
This is current because all working trees have an embedded branch
2203
2101
within them. IF in the future, we were to make branch data shareable
2204
between multiple working trees, i.e. via shared storage, then we
2102
between multiple working trees, i.e. via shared storage, then we
2205
2103
would probably want to lock both the local tree, and the branch.
2207
2105
raise NotImplementedError(self.unlock)
2211
def update(self, change_reporter=None, possible_transports=None,
2212
revision=None, old_tip=_marker):
2107
def update(self, change_reporter=None, possible_transports=None):
2213
2108
"""Update a working tree along its branch.
2215
2110
This will update the branch if its bound too, which means we have
2269
2157
# cant set that until we update the working trees last revision to be
2270
2158
# one from the new branch, because it will just get absorbed by the
2271
2159
# parent de-duplication logic.
2273
2161
# We MUST save it even if an error occurs, because otherwise the users
2274
2162
# local work is unreferenced and will appear to have been lost.
2278
2166
last_rev = self.get_parent_ids()[0]
2279
2167
except IndexError:
2280
2168
last_rev = _mod_revision.NULL_REVISION
2281
if revision is None:
2282
revision = self.branch.last_revision()
2284
old_tip = old_tip or _mod_revision.NULL_REVISION
2286
if not _mod_revision.is_null(old_tip) and old_tip != last_rev:
2287
# the branch we are bound to was updated
2288
# merge those changes in first
2289
base_tree = self.basis_tree()
2290
other_tree = self.branch.repository.revision_tree(old_tip)
2291
nb_conflicts = merge.merge_inner(self.branch, other_tree,
2292
base_tree, this_tree=self,
2293
change_reporter=change_reporter)
2295
self.add_parent_tree((old_tip, other_tree))
2296
trace.note('Rerun update after fixing the conflicts.')
2299
if last_rev != _mod_revision.ensure_null(revision):
2300
# the working tree is up to date with the branch
2301
# we can merge the specified revision from master
2302
to_tree = self.branch.repository.revision_tree(revision)
2303
to_root_id = to_tree.get_root_id()
2169
if last_rev != _mod_revision.ensure_null(self.branch.last_revision()):
2170
# merge tree state up to new branch tip.
2305
2171
basis = self.basis_tree()
2306
2172
basis.lock_read()
2308
if (basis.inventory.root is None
2309
or basis.inventory.root.file_id != to_root_id):
2310
self.set_root_id(to_root_id)
2174
to_tree = self.branch.basis_tree()
2175
if basis.inventory.root is None:
2176
self.set_root_id(to_tree.get_root_id())
2178
result += merge.merge_inner(
2183
change_reporter=change_reporter)
2315
# determine the branch point
2316
graph = self.branch.repository.get_graph()
2317
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2319
base_tree = self.branch.repository.revision_tree(base_rev_id)
2321
nb_conflicts = merge.merge_inner(self.branch, to_tree, base_tree,
2323
change_reporter=change_reporter)
2324
self.set_last_revision(revision)
2325
2186
# TODO - dedup parents list with things merged by pull ?
2326
2187
# reuse the tree we've updated to to set the basis:
2327
parent_trees = [(revision, to_tree)]
2188
parent_trees = [(self.branch.last_revision(), to_tree)]
2328
2189
merges = self.get_parent_ids()[1:]
2329
2190
# Ideally we ask the tree for the trees here, that way the working
2330
# tree can decide whether to give us the entire tree or give us a
2191
# tree can decide whether to give us teh entire tree or give us a
2331
2192
# lazy initialised tree. dirstate for instance will have the trees
2332
2193
# in ram already, whereas a last-revision + basis-inventory tree
2333
2194
# will not, but also does not need them when setting parents.
2334
2195
for parent in merges:
2335
2196
parent_trees.append(
2336
2197
(parent, self.branch.repository.revision_tree(parent)))
2337
if not _mod_revision.is_null(old_tip):
2198
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2338
2199
parent_trees.append(
2339
2200
(old_tip, self.branch.repository.revision_tree(old_tip)))
2340
2201
self.set_parent_trees(parent_trees)
2341
2202
last_rev = parent_trees[0][0]
2204
# the working tree had the same last-revision as the master
2205
# branch did. We may still have pivot local work from the local
2206
# branch into old_tip:
2207
if (old_tip is not None and not _mod_revision.is_null(old_tip)):
2208
self.add_parent_tree_id(old_tip)
2209
if (old_tip is not None and not _mod_revision.is_null(old_tip)
2210
and old_tip != last_rev):
2211
# our last revision was not the prior branch last revision
2212
# and we have converted that last revision to a pending merge.
2213
# base is somewhere between the branch tip now
2214
# and the now pending merge
2216
# Since we just modified the working tree and inventory, flush out
2217
# the current state, before we modify it again.
2218
# TODO: jam 20070214 WorkingTree3 doesn't require this, dirstate
2219
# requires it only because TreeTransform directly munges the
2220
# inventory and calls tree._write_inventory(). Ultimately we
2221
# should be able to remove this extra flush.
2223
graph = self.branch.repository.get_graph()
2224
base_rev_id = graph.find_unique_lca(self.branch.last_revision(),
2226
base_tree = self.branch.repository.revision_tree(base_rev_id)
2227
other_tree = self.branch.repository.revision_tree(old_tip)
2228
result += merge.merge_inner(
2233
change_reporter=change_reporter)
2344
2236
def _write_hashcache_if_dirty(self):
2345
2237
"""Write out the hashcache if it is dirty."""