1
# Copyright (C) 2005, 2006, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
from itertools import chain
30
revision as _mod_revision,
34
from bzrlib.branch import Branch
35
from bzrlib.conflicts import ConflictList, Conflict
36
from bzrlib.errors import (BzrCommandError,
46
WorkingTreeNotRevision,
49
from bzrlib.graph import Graph
50
from bzrlib.merge3 import Merge3
51
from bzrlib.osutils import rename, pathjoin
52
from progress import DummyProgress, ProgressPhase
53
from bzrlib.revision import (NULL_REVISION, ensure_null)
54
from bzrlib.textfile import check_text_lines
55
from bzrlib.trace import mutter, warning, note, is_quiet
56
from bzrlib.transform import (TransformPreview, TreeTransform,
57
resolve_conflicts, cook_conflicts,
58
conflict_pass, FinalPaths, create_by_entry,
59
unique_add, ROOT_PARENT)
60
from bzrlib.versionedfile import PlanWeaveMerge
63
# TODO: Report back as changes are merged in
66
def transform_tree(from_tree, to_tree, interesting_ids=None):
67
merge_inner(from_tree.branch, to_tree, from_tree, ignore_zero=True,
68
interesting_ids=interesting_ids, this_tree=from_tree)
72
def __init__(self, this_branch, other_tree=None, base_tree=None,
73
this_tree=None, pb=DummyProgress(), change_reporter=None,
74
recurse='down', revision_graph=None):
76
self.this_branch = this_branch
77
self.this_basis = _mod_revision.ensure_null(
78
this_branch.last_revision())
79
self.this_rev_id = None
80
self.this_tree = this_tree
81
self.this_revision_tree = None
82
self.this_basis_tree = None
83
self.other_tree = other_tree
84
self.other_branch = None
85
self.base_tree = base_tree
86
self.ignore_zero = False
87
self.backup_files = False
88
self.interesting_ids = None
89
self.interesting_files = None
90
self.show_base = False
91
self.reprocess = False
94
self.recurse = recurse
95
self.change_reporter = change_reporter
96
self._cached_trees = {}
97
self._revision_graph = revision_graph
98
self._base_is_ancestor = None
99
self._base_is_other_ancestor = None
100
self._is_criss_cross = False
101
self._lca_trees = None
104
def revision_graph(self):
105
if self._revision_graph is None:
106
self._revision_graph = self.this_branch.repository.get_graph()
107
return self._revision_graph
109
def _set_base_is_ancestor(self, value):
110
self._base_is_ancestor = value
112
def _get_base_is_ancestor(self):
113
if self._base_is_ancestor is None:
114
self._base_is_ancestor = self.revision_graph.is_ancestor(
115
self.base_rev_id, self.this_basis)
116
return self._base_is_ancestor
118
base_is_ancestor = property(_get_base_is_ancestor, _set_base_is_ancestor)
120
def _set_base_is_other_ancestor(self, value):
121
self._base_is_other_ancestor = value
123
def _get_base_is_other_ancestor(self):
124
if self._base_is_other_ancestor is None:
125
if self.other_basis is None:
127
self._base_is_other_ancestor = self.revision_graph.is_ancestor(
128
self.base_rev_id, self.other_basis)
129
return self._base_is_other_ancestor
131
base_is_other_ancestor = property(_get_base_is_other_ancestor,
132
_set_base_is_other_ancestor)
135
def from_uncommitted(tree, other_tree, pb):
136
"""Return a Merger for uncommitted changes in other_tree.
138
:param tree: The tree to merge into
139
:param other_tree: The tree to get uncommitted changes from
140
:param pb: A progress indicator
142
merger = Merger(tree.branch, other_tree, other_tree.basis_tree(), tree,
144
merger.base_rev_id = merger.base_tree.get_revision_id()
145
merger.other_rev_id = None
146
merger.other_basis = merger.base_rev_id
150
def from_mergeable(klass, tree, mergeable, pb):
151
"""Return a Merger for a bundle or merge directive.
153
:param tree: The tree to merge changes into
154
:param mergeable: A merge directive or bundle
155
:param pb: A progress indicator
157
mergeable.install_revisions(tree.branch.repository)
158
base_revision_id, other_revision_id, verified =\
159
mergeable.get_merge_request(tree.branch.repository)
160
revision_graph = tree.branch.repository.get_graph()
161
if base_revision_id is not None:
162
if (base_revision_id != _mod_revision.NULL_REVISION and
163
revision_graph.is_ancestor(
164
base_revision_id, tree.branch.last_revision())):
165
base_revision_id = None
167
warning('Performing cherrypick')
168
merger = klass.from_revision_ids(pb, tree, other_revision_id,
169
base_revision_id, revision_graph=
171
return merger, verified
174
def from_revision_ids(pb, tree, other, base=None, other_branch=None,
175
base_branch=None, revision_graph=None):
176
"""Return a Merger for revision-ids.
178
:param tree: The tree to merge changes into
179
:param other: The revision-id to use as OTHER
180
:param base: The revision-id to use as BASE. If not specified, will
182
:param other_branch: A branch containing the other revision-id. If
183
not supplied, tree.branch is used.
184
:param base_branch: A branch containing the base revision-id. If
185
not supplied, other_branch or tree.branch will be used.
186
:param revision_graph: If you have a revision_graph precomputed, pass
187
it in, otherwise it will be created for you.
188
:param pb: A progress indicator
190
merger = Merger(tree.branch, this_tree=tree, pb=pb,
191
revision_graph=revision_graph)
192
if other_branch is None:
193
other_branch = tree.branch
194
merger.set_other_revision(other, other_branch)
198
if base_branch is None:
199
base_branch = other_branch
200
merger.set_base_revision(base, base_branch)
203
def revision_tree(self, revision_id, branch=None):
204
if revision_id not in self._cached_trees:
206
branch = self.this_branch
208
tree = self.this_tree.revision_tree(revision_id)
209
except errors.NoSuchRevisionInTree:
210
tree = branch.repository.revision_tree(revision_id)
211
self._cached_trees[revision_id] = tree
212
return self._cached_trees[revision_id]
214
def _get_tree(self, treespec, possible_transports=None):
215
from bzrlib import workingtree
216
location, revno = treespec
218
tree = workingtree.WorkingTree.open_containing(location)[0]
219
return tree.branch, tree
220
branch = Branch.open_containing(location, possible_transports)[0]
222
revision_id = branch.last_revision()
224
revision_id = branch.get_rev_id(revno)
225
revision_id = ensure_null(revision_id)
226
return branch, self.revision_tree(revision_id, branch)
228
def ensure_revision_trees(self):
229
if self.this_revision_tree is None:
230
self.this_basis_tree = self.revision_tree(self.this_basis)
231
if self.this_basis == self.this_rev_id:
232
self.this_revision_tree = self.this_basis_tree
234
if self.other_rev_id is None:
235
other_basis_tree = self.revision_tree(self.other_basis)
236
changes = other_basis_tree.changes_from(self.other_tree)
237
if changes.has_changed():
238
raise WorkingTreeNotRevision(self.this_tree)
239
other_rev_id = self.other_basis
240
self.other_tree = other_basis_tree
242
def file_revisions(self, file_id):
243
self.ensure_revision_trees()
244
def get_id(tree, file_id):
245
revision_id = tree.inventory[file_id].revision
247
if self.this_rev_id is None:
248
if self.this_basis_tree.get_file_sha1(file_id) != \
249
self.this_tree.get_file_sha1(file_id):
250
raise WorkingTreeNotRevision(self.this_tree)
252
trees = (self.this_basis_tree, self.other_tree)
253
return [get_id(tree, file_id) for tree in trees]
255
def check_basis(self, check_clean, require_commits=True):
256
if self.this_basis is None and require_commits is True:
257
raise BzrCommandError("This branch has no commits."
258
" (perhaps you would prefer 'bzr pull')")
261
if self.this_basis != self.this_rev_id:
262
raise errors.UncommittedChanges(self.this_tree)
264
def compare_basis(self):
266
basis_tree = self.revision_tree(self.this_tree.last_revision())
267
except errors.NoSuchRevision:
268
basis_tree = self.this_tree.basis_tree()
269
changes = self.this_tree.changes_from(basis_tree)
270
if not changes.has_changed():
271
self.this_rev_id = self.this_basis
273
def set_interesting_files(self, file_list):
274
self.interesting_files = file_list
276
def set_pending(self):
277
if not self.base_is_ancestor or not self.base_is_other_ancestor or self.other_rev_id is None:
281
def _add_parent(self):
282
new_parents = self.this_tree.get_parent_ids() + [self.other_rev_id]
283
new_parent_trees = []
284
for revision_id in new_parents:
286
tree = self.revision_tree(revision_id)
287
except errors.NoSuchRevision:
291
new_parent_trees.append((revision_id, tree))
293
self.this_tree.set_parent_trees(new_parent_trees,
294
allow_leftmost_as_ghost=True)
296
for _revision_id, tree in new_parent_trees:
300
def set_other(self, other_revision, possible_transports=None):
301
"""Set the revision and tree to merge from.
303
This sets the other_tree, other_rev_id, other_basis attributes.
305
:param other_revision: The [path, revision] list to merge from.
307
self.other_branch, self.other_tree = self._get_tree(other_revision,
309
if other_revision[1] == -1:
310
self.other_rev_id = _mod_revision.ensure_null(
311
self.other_branch.last_revision())
312
if _mod_revision.is_null(self.other_rev_id):
313
raise NoCommits(self.other_branch)
314
self.other_basis = self.other_rev_id
315
elif other_revision[1] is not None:
316
self.other_rev_id = self.other_branch.get_rev_id(other_revision[1])
317
self.other_basis = self.other_rev_id
319
self.other_rev_id = None
320
self.other_basis = self.other_branch.last_revision()
321
if self.other_basis is None:
322
raise NoCommits(self.other_branch)
323
if self.other_rev_id is not None:
324
self._cached_trees[self.other_rev_id] = self.other_tree
325
self._maybe_fetch(self.other_branch,self.this_branch, self.other_basis)
327
def set_other_revision(self, revision_id, other_branch):
328
"""Set 'other' based on a branch and revision id
330
:param revision_id: The revision to use for a tree
331
:param other_branch: The branch containing this tree
333
self.other_rev_id = revision_id
334
self.other_branch = other_branch
335
self._maybe_fetch(other_branch, self.this_branch, self.other_rev_id)
336
self.other_tree = self.revision_tree(revision_id)
337
self.other_basis = revision_id
339
def set_base_revision(self, revision_id, branch):
340
"""Set 'base' based on a branch and revision id
342
:param revision_id: The revision to use for a tree
343
:param branch: The branch containing this tree
345
self.base_rev_id = revision_id
346
self.base_branch = branch
347
self._maybe_fetch(branch, self.this_branch, revision_id)
348
self.base_tree = self.revision_tree(revision_id)
350
def _maybe_fetch(self, source, target, revision_id):
351
if not source.repository.has_same_location(target.repository):
352
target.fetch(source, revision_id)
355
revisions = [ensure_null(self.this_basis),
356
ensure_null(self.other_basis)]
357
if NULL_REVISION in revisions:
358
self.base_rev_id = NULL_REVISION
360
lcas = self.revision_graph.find_lca(revisions[0], revisions[1])
362
self.base_rev_id = NULL_REVISION
364
self.base_rev_id = list(lcas)[0]
365
else: # len(lcas) > 1
366
self.base_rev_id = self.revision_graph.find_unique_lca(
368
self._is_criss_cross = True
369
if self.base_rev_id == NULL_REVISION:
370
raise UnrelatedBranches()
371
if self._is_criss_cross:
372
warning('Warning: criss-cross merge encountered. See bzr'
373
' help criss-cross.')
374
interesting_revision_ids = [self.base_rev_id]
375
interesting_revision_ids.extend(lcas)
376
interesting_trees = dict((t.get_revision_id(), t)
377
for t in self.this_branch.repository.revision_trees(
378
interesting_revision_ids))
379
self._cached_trees.update(interesting_trees)
380
self.base_tree = interesting_trees.pop(self.base_rev_id)
381
sorted_lca_keys = self.revision_graph.find_merge_order(
383
self._lca_trees = [interesting_trees[key]
384
for key in sorted_lca_keys]
386
self.base_tree = self.revision_tree(self.base_rev_id)
387
self.base_is_ancestor = True
388
self.base_is_other_ancestor = True
390
def set_base(self, base_revision):
391
"""Set the base revision to use for the merge.
393
:param base_revision: A 2-list containing a path and revision number.
395
mutter("doing merge() with no base_revision specified")
396
if base_revision == [None, None]:
399
base_branch, self.base_tree = self._get_tree(base_revision)
400
if base_revision[1] == -1:
401
self.base_rev_id = base_branch.last_revision()
402
elif base_revision[1] is None:
403
self.base_rev_id = _mod_revision.NULL_REVISION
405
self.base_rev_id = _mod_revision.ensure_null(
406
base_branch.get_rev_id(base_revision[1]))
407
self._maybe_fetch(base_branch, self.this_branch, self.base_rev_id)
409
def make_merger(self):
410
kwargs = {'working_tree':self.this_tree, 'this_tree': self.this_tree,
411
'other_tree': self.other_tree,
412
'interesting_ids': self.interesting_ids,
413
'interesting_files': self.interesting_files,
416
if self.merge_type.requires_base:
417
kwargs['base_tree'] = self.base_tree
418
if self.merge_type.supports_reprocess:
419
kwargs['reprocess'] = self.reprocess
421
raise BzrError("Conflict reduction is not supported for merge"
422
" type %s." % self.merge_type)
423
if self.merge_type.supports_show_base:
424
kwargs['show_base'] = self.show_base
426
raise BzrError("Showing base is not supported for this"
427
" merge type. %s" % self.merge_type)
428
if (not getattr(self.merge_type, 'supports_reverse_cherrypick', True)
429
and not self.base_is_other_ancestor):
430
raise errors.CannotReverseCherrypick()
431
if self.merge_type.supports_cherrypick:
432
kwargs['cherrypick'] = (not self.base_is_ancestor or
433
not self.base_is_other_ancestor)
434
if self._is_criss_cross and getattr(self.merge_type,
435
'supports_lca_trees', False):
436
kwargs['lca_trees'] = self._lca_trees
437
return self.merge_type(pb=self._pb,
438
change_reporter=self.change_reporter,
442
self.this_tree.lock_tree_write()
443
if self.base_tree is not None:
444
self.base_tree.lock_read()
445
if self.other_tree is not None:
446
self.other_tree.lock_read()
448
merge = self.make_merger()
450
if self.recurse == 'down':
451
for relpath, file_id in self.this_tree.iter_references():
452
sub_tree = self.this_tree.get_nested_tree(file_id, relpath)
453
other_revision = self.other_tree.get_reference_revision(
455
if other_revision == sub_tree.last_revision():
457
sub_merge = Merger(sub_tree.branch, this_tree=sub_tree)
458
sub_merge.merge_type = self.merge_type
459
other_branch = self.other_branch.reference_parent(file_id, relpath)
460
sub_merge.set_other_revision(other_revision, other_branch)
461
base_revision = self.base_tree.get_reference_revision(file_id)
462
sub_merge.base_tree = \
463
sub_tree.branch.repository.revision_tree(base_revision)
464
sub_merge.base_rev_id = base_revision
468
if self.other_tree is not None:
469
self.other_tree.unlock()
470
if self.base_tree is not None:
471
self.base_tree.unlock()
472
self.this_tree.unlock()
473
if len(merge.cooked_conflicts) == 0:
474
if not self.ignore_zero and not is_quiet():
475
note("All changes applied successfully.")
477
note("%d conflicts encountered." % len(merge.cooked_conflicts))
479
return len(merge.cooked_conflicts)
482
class Merge3Merger(object):
483
"""Three-way merger that uses the merge3 text merger"""
485
supports_reprocess = True
486
supports_show_base = True
487
history_based = False
488
supports_cherrypick = True
489
supports_reverse_cherrypick = True
490
winner_idx = {"this": 2, "other": 1, "conflict": 1}
491
supports_lca_trees = True
493
def __init__(self, working_tree, this_tree, base_tree, other_tree,
494
interesting_ids=None, reprocess=False, show_base=False,
495
pb=DummyProgress(), pp=None, change_reporter=None,
496
interesting_files=None, do_merge=True,
497
cherrypick=False, lca_trees=None):
498
"""Initialize the merger object and perform the merge.
500
:param working_tree: The working tree to apply the merge to
501
:param this_tree: The local tree in the merge operation
502
:param base_tree: The common tree in the merge operation
503
:param other_tree: The other other tree to merge changes from
504
:param interesting_ids: The file_ids of files that should be
505
participate in the merge. May not be combined with
507
:param: reprocess If True, perform conflict-reduction processing.
508
:param show_base: If True, show the base revision in text conflicts.
509
(incompatible with reprocess)
510
:param pb: A Progress bar
511
:param pp: A ProgressPhase object
512
:param change_reporter: An object that should report changes made
513
:param interesting_files: The tree-relative paths of files that should
514
participate in the merge. If these paths refer to directories,
515
the contents of those directories will also be included. May not
516
be combined with interesting_ids. If neither interesting_files nor
517
interesting_ids is specified, all files may participate in the
519
:param lca_trees: Can be set to a dictionary of {revision_id:rev_tree}
520
if the ancestry was found to include a criss-cross merge.
521
Otherwise should be None.
523
object.__init__(self)
524
if interesting_files is not None and interesting_ids is not None:
526
'specify either interesting_ids or interesting_files')
527
self.interesting_ids = interesting_ids
528
self.interesting_files = interesting_files
529
self.this_tree = working_tree
530
self.base_tree = base_tree
531
self.other_tree = other_tree
532
self._raw_conflicts = []
533
self.cooked_conflicts = []
534
self.reprocess = reprocess
535
self.show_base = show_base
536
self._lca_trees = lca_trees
539
self.change_reporter = change_reporter
540
self.cherrypick = cherrypick
542
self.pp = ProgressPhase("Merge phase", 3, self.pb)
547
self.this_tree.lock_tree_write()
548
self.base_tree.lock_read()
549
self.other_tree.lock_read()
550
self.tt = TreeTransform(self.this_tree, self.pb)
553
self._compute_transform()
555
results = self.tt.apply(no_conflicts=True)
556
self.write_modified(results)
558
self.this_tree.add_conflicts(self.cooked_conflicts)
559
except UnsupportedOperation:
563
self.other_tree.unlock()
564
self.base_tree.unlock()
565
self.this_tree.unlock()
568
def make_preview_transform(self):
569
self.base_tree.lock_read()
570
self.other_tree.lock_read()
571
self.tt = TransformPreview(self.this_tree)
574
self._compute_transform()
577
self.other_tree.unlock()
578
self.base_tree.unlock()
582
def _compute_transform(self):
583
entries = self._entries3()
584
child_pb = ui.ui_factory.nested_progress_bar()
586
for num, (file_id, changed, parents3, names3,
587
executable3) in enumerate(entries):
588
child_pb.update('Preparing file merge', num, len(entries))
589
self._merge_names(file_id, parents3, names3)
591
file_status = self.merge_contents(file_id)
593
file_status = 'unmodified'
594
self._merge_executable(file_id,
595
executable3, file_status)
600
child_pb = ui.ui_factory.nested_progress_bar()
602
fs_conflicts = resolve_conflicts(self.tt, child_pb,
603
lambda t, c: conflict_pass(t, c, self.other_tree))
606
if self.change_reporter is not None:
607
from bzrlib import delta
608
delta.report_changes(
609
self.tt.iter_changes(), self.change_reporter)
610
self.cook_conflicts(fs_conflicts)
611
for conflict in self.cooked_conflicts:
615
"""Gather data about files modified between three trees.
617
Return a list of tuples of file_id, changed, parents3, names3,
618
executable3. changed is a boolean indicating whether the file contents
619
or kind were changed. parents3 is a tuple of parent ids for base,
620
other and this. names3 is a tuple of names for base, other and this.
621
executable3 is a tuple of execute-bit values for base, other and this.
624
iterator = self.other_tree.iter_changes(self.base_tree,
625
include_unchanged=True, specific_files=self.interesting_files,
626
extra_trees=[self.this_tree])
627
for (file_id, paths, changed, versioned, parents, names, kind,
628
executable) in iterator:
629
if (self.interesting_ids is not None and
630
file_id not in self.interesting_ids):
632
if file_id in self.this_tree.inventory:
633
entry = self.this_tree.inventory[file_id]
634
this_name = entry.name
635
this_parent = entry.parent_id
636
this_executable = entry.executable
640
this_executable = None
641
parents3 = parents + (this_parent,)
642
names3 = names + (this_name,)
643
executable3 = executable + (this_executable,)
644
result.append((file_id, changed, parents3, names3, executable3))
647
def _entries_lca(self):
648
"""Gather data about files modified between multiple trees.
650
This compares OTHER versus all LCA trees, and for interesting entries,
651
it then compares with THIS and BASE.
653
For the multi-valued entries, the format will be (BASE, [lca1, lca2])
654
:return: [(file_id, changed, parents, names, executable)]
655
file_id Simple file_id of the entry
656
changed Boolean, True if the kind or contents changed
658
parents ((base, [parent_id, in, lcas]), parent_id_other,
660
names ((base, [name, in, lcas]), name_in_other, name_in_this)
661
executable ((base, [exec, in, lcas]), exec_in_other, exec_in_this)
664
# XXX: Do we want a better sort order than this?
665
walker = _mod_tree.MultiWalker(self.other_tree, self._lca_trees)
667
base_inventory = self.base_tree.inventory
668
this_inventory = self.this_tree.inventory
669
for path, file_id, other_ie, lca_values in walker.iter_all():
670
# Is this modified at all from any of the other trees?
672
last_rev = other_kind = other_parent_id = other_name = None
673
other_executable = None
675
last_rev = other_ie.revision
676
other_kind = other_ie.kind
677
other_parent_id = other_ie.parent_id
678
other_name = other_ie.name
679
other_executable = other_ie.executable
681
# I believe we can actually change this to see if last_rev is
682
# identical to *any* of the lca values.
683
for lca_path, ie in lca_values:
684
if ((ie is None and other_ie is not None)
685
or ie.revision != last_rev):
687
else: # Identical in all trees
690
parent_id_changed = False
692
for lca_path, ie in lca_values:
693
if ie is None and other_ie is not None:
694
kind_changed = parent_id_changed = name_changed = True
696
if ie.kind != other_kind:
698
if ie.parent_id != other_parent_id:
699
parent_id_changed = True
700
if ie.name != other_name:
703
if (not kind_changed and not parent_id_changed
704
and not name_changed and other_kind == 'directory'):
705
# Even though last-modified has changed, the actual attributes
706
# of this entry hasn't changed, so skip it.
709
if file_id in base_inventory:
710
base_ie = self.base_tree.inventory[file_id]
711
base_parent_id = base_ie.parent_id
712
base_name = base_ie.name
713
base_executable = base_ie.executable
715
base_parent_id = base_name = base_executable = None
716
if file_id in this_inventory:
717
this_ie = this_inventory[file_id]
718
this_parent_id = this_ie.parent_id
719
this_name = this_ie.name
720
this_executable = this_ie.executable
722
this_parent_id = this_name = this_executable = None
727
for path, ie in lca_values:
729
lca_parent_ids.append(None)
730
lca_names.append(None)
731
lca_executable.append(None)
733
lca_parent_ids.append(ie.parent_id)
734
lca_names.append(ie.name)
735
lca_executable.append(ie.executable)
737
# If we have gotten this far, that means something has changed
738
result.append((file_id, True,
739
((base_parent_id, lca_parent_ids),
740
other_parent_id, this_parent_id),
741
((base_name, lca_names),
742
other_name, this_name),
743
((base_executable, lca_executable),
744
other_executable, this_executable)
751
self.tt.final_kind(self.tt.root)
753
self.tt.cancel_deletion(self.tt.root)
754
if self.tt.final_file_id(self.tt.root) is None:
755
self.tt.version_file(self.tt.tree_file_id(self.tt.root),
757
if self.other_tree.inventory.root is None:
759
other_root_file_id = self.other_tree.get_root_id()
760
other_root = self.tt.trans_id_file_id(other_root_file_id)
761
if other_root == self.tt.root:
764
self.tt.final_kind(other_root)
767
self.reparent_children(self.other_tree.inventory.root, self.tt.root)
768
self.tt.cancel_creation(other_root)
769
self.tt.cancel_versioning(other_root)
771
def reparent_children(self, ie, target):
772
for thing, child in ie.children.iteritems():
773
trans_id = self.tt.trans_id_file_id(child.file_id)
774
self.tt.adjust_path(self.tt.final_name(trans_id), target, trans_id)
776
def write_modified(self, results):
778
for path in results.modified_paths:
779
file_id = self.this_tree.path2id(self.this_tree.relpath(path))
782
hash = self.this_tree.get_file_sha1(file_id)
785
modified_hashes[file_id] = hash
786
self.this_tree.set_merge_modified(modified_hashes)
789
def parent(entry, file_id):
790
"""Determine the parent for a file_id (used as a key method)"""
793
return entry.parent_id
796
def name(entry, file_id):
797
"""Determine the name for a file_id (used as a key method)"""
803
def contents_sha1(tree, file_id):
804
"""Determine the sha1 of the file contents (used as a key method)."""
805
if file_id not in tree:
807
return tree.get_file_sha1(file_id)
810
def executable(tree, file_id):
811
"""Determine the executability of a file-id (used as a key method)."""
812
if file_id not in tree:
814
if tree.kind(file_id) != "file":
816
return tree.is_executable(file_id)
819
def kind(tree, file_id):
820
"""Determine the kind of a file-id (used as a key method)."""
821
if file_id not in tree:
823
return tree.kind(file_id)
826
def _three_way(base, other, this):
827
#if base == other, either they all agree, or only THIS has changed.
830
elif this not in (base, other):
832
# "Ambiguous clean merge" -- both sides have made the same change.
835
# this == base: only other has changed.
840
def scalar_three_way(this_tree, base_tree, other_tree, file_id, key):
841
"""Do a three-way test on a scalar.
842
Return "this", "other" or "conflict", depending whether a value wins.
844
key_base = key(base_tree, file_id)
845
key_other = key(other_tree, file_id)
846
#if base == other, either they all agree, or only THIS has changed.
847
if key_base == key_other:
849
key_this = key(this_tree, file_id)
850
# "Ambiguous clean merge"
851
if key_this == key_other:
853
elif key_this == key_base:
858
def merge_names(self, file_id):
860
if file_id in tree.inventory:
861
return tree.inventory[file_id]
864
this_entry = get_entry(self.this_tree)
865
other_entry = get_entry(self.other_tree)
866
base_entry = get_entry(self.base_tree)
867
entries = (base_entry, other_entry, this_entry)
870
for entry in entries:
875
names.append(entry.name)
876
parents.append(entry.parent_id)
877
return self._merge_names(file_id, parents, names)
879
def _merge_names(self, file_id, parents, names):
880
"""Perform a merge on file_id names and parents"""
881
base_name, other_name, this_name = names
882
base_parent, other_parent, this_parent = parents
884
name_winner = self._three_way(*names)
886
parent_id_winner = self._three_way(*parents)
887
if this_name is None:
888
if name_winner == "this":
889
name_winner = "other"
890
if parent_id_winner == "this":
891
parent_id_winner = "other"
892
if name_winner == "this" and parent_id_winner == "this":
894
if name_winner == "conflict":
895
trans_id = self.tt.trans_id_file_id(file_id)
896
self._raw_conflicts.append(('name conflict', trans_id,
897
this_name, other_name))
898
if parent_id_winner == "conflict":
899
trans_id = self.tt.trans_id_file_id(file_id)
900
self._raw_conflicts.append(('parent conflict', trans_id,
901
this_parent, other_parent))
902
if other_name is None:
903
# it doesn't matter whether the result was 'other' or
904
# 'conflict'-- if there's no 'other', we leave it alone.
906
# if we get here, name_winner and parent_winner are set to safe values.
907
trans_id = self.tt.trans_id_file_id(file_id)
908
parent_id = parents[self.winner_idx[parent_id_winner]]
909
if parent_id is not None:
910
parent_trans_id = self.tt.trans_id_file_id(parent_id)
911
self.tt.adjust_path(names[self.winner_idx[name_winner]],
912
parent_trans_id, trans_id)
914
def merge_contents(self, file_id):
915
"""Performa a merge on file_id contents."""
916
def contents_pair(tree):
917
if file_id not in tree:
919
kind = tree.kind(file_id)
921
contents = tree.get_file_sha1(file_id)
922
elif kind == "symlink":
923
contents = tree.get_symlink_target(file_id)
926
return kind, contents
928
def contents_conflict():
929
trans_id = self.tt.trans_id_file_id(file_id)
930
name = self.tt.final_name(trans_id)
931
parent_id = self.tt.final_parent(trans_id)
932
if file_id in self.this_tree.inventory:
933
self.tt.unversion_file(trans_id)
934
if file_id in self.this_tree:
935
self.tt.delete_contents(trans_id)
936
file_group = self._dump_conflicts(name, parent_id, file_id,
938
self._raw_conflicts.append(('contents conflict', file_group))
940
# See SPOT run. run, SPOT, run.
941
# So we're not QUITE repeating ourselves; we do tricky things with
943
base_pair = contents_pair(self.base_tree)
944
other_pair = contents_pair(self.other_tree)
945
if base_pair == other_pair:
946
# OTHER introduced no changes
948
this_pair = contents_pair(self.this_tree)
949
if this_pair == other_pair:
950
# THIS and OTHER introduced the same changes
953
trans_id = self.tt.trans_id_file_id(file_id)
954
if this_pair == base_pair:
955
# only OTHER introduced changes
956
if file_id in self.this_tree:
957
# Remove any existing contents
958
self.tt.delete_contents(trans_id)
959
if file_id in self.other_tree:
960
# OTHER changed the file
961
create_by_entry(self.tt,
962
self.other_tree.inventory[file_id],
963
self.other_tree, trans_id)
964
if file_id not in self.this_tree.inventory:
965
self.tt.version_file(file_id, trans_id)
967
elif file_id in self.this_tree.inventory:
968
# OTHER deleted the file
969
self.tt.unversion_file(trans_id)
971
#BOTH THIS and OTHER introduced changes; scalar conflict
972
elif this_pair[0] == "file" and other_pair[0] == "file":
973
# THIS and OTHER are both files, so text merge. Either
974
# BASE is a file, or both converted to files, so at least we
975
# have agreement that output should be a file.
977
self.text_merge(file_id, trans_id)
979
return contents_conflict()
980
if file_id not in self.this_tree.inventory:
981
self.tt.version_file(file_id, trans_id)
983
self.tt.tree_kind(trans_id)
984
self.tt.delete_contents(trans_id)
989
# Scalar conflict, can't text merge. Dump conflicts
990
return contents_conflict()
992
def get_lines(self, tree, file_id):
993
"""Return the lines in a file, or an empty list."""
995
return tree.get_file(file_id).readlines()
999
def text_merge(self, file_id, trans_id):
1000
"""Perform a three-way text merge on a file_id"""
1001
# it's possible that we got here with base as a different type.
1002
# if so, we just want two-way text conflicts.
1003
if file_id in self.base_tree and \
1004
self.base_tree.kind(file_id) == "file":
1005
base_lines = self.get_lines(self.base_tree, file_id)
1008
other_lines = self.get_lines(self.other_tree, file_id)
1009
this_lines = self.get_lines(self.this_tree, file_id)
1010
m3 = Merge3(base_lines, this_lines, other_lines,
1011
is_cherrypick=self.cherrypick)
1012
start_marker = "!START OF MERGE CONFLICT!" + "I HOPE THIS IS UNIQUE"
1013
if self.show_base is True:
1014
base_marker = '|' * 7
1018
def iter_merge3(retval):
1019
retval["text_conflicts"] = False
1020
for line in m3.merge_lines(name_a = "TREE",
1021
name_b = "MERGE-SOURCE",
1022
name_base = "BASE-REVISION",
1023
start_marker=start_marker,
1024
base_marker=base_marker,
1025
reprocess=self.reprocess):
1026
if line.startswith(start_marker):
1027
retval["text_conflicts"] = True
1028
yield line.replace(start_marker, '<' * 7)
1032
merge3_iterator = iter_merge3(retval)
1033
self.tt.create_file(merge3_iterator, trans_id)
1034
if retval["text_conflicts"] is True:
1035
self._raw_conflicts.append(('text conflict', trans_id))
1036
name = self.tt.final_name(trans_id)
1037
parent_id = self.tt.final_parent(trans_id)
1038
file_group = self._dump_conflicts(name, parent_id, file_id,
1039
this_lines, base_lines,
1041
file_group.append(trans_id)
1043
def _dump_conflicts(self, name, parent_id, file_id, this_lines=None,
1044
base_lines=None, other_lines=None, set_version=False,
1046
"""Emit conflict files.
1047
If this_lines, base_lines, or other_lines are omitted, they will be
1048
determined automatically. If set_version is true, the .OTHER, .THIS
1049
or .BASE (in that order) will be created as versioned files.
1051
data = [('OTHER', self.other_tree, other_lines),
1052
('THIS', self.this_tree, this_lines)]
1054
data.append(('BASE', self.base_tree, base_lines))
1057
for suffix, tree, lines in data:
1059
trans_id = self._conflict_file(name, parent_id, tree, file_id,
1061
file_group.append(trans_id)
1062
if set_version and not versioned:
1063
self.tt.version_file(file_id, trans_id)
1067
def _conflict_file(self, name, parent_id, tree, file_id, suffix,
1069
"""Emit a single conflict file."""
1070
name = name + '.' + suffix
1071
trans_id = self.tt.create_path(name, parent_id)
1072
entry = tree.inventory[file_id]
1073
create_by_entry(self.tt, entry, tree, trans_id, lines)
1076
def merge_executable(self, file_id, file_status):
1077
"""Perform a merge on the execute bit."""
1078
executable = [self.executable(t, file_id) for t in (self.base_tree,
1079
self.other_tree, self.this_tree)]
1080
self._merge_executable(file_id, executable, file_status)
1082
def _merge_executable(self, file_id, executable, file_status):
1083
"""Perform a merge on the execute bit."""
1084
base_executable, other_executable, this_executable = executable
1085
if file_status == "deleted":
1087
winner = self._three_way(*executable)
1088
if winner == "conflict":
1089
# There must be a None in here, if we have a conflict, but we
1090
# need executability since file status was not deleted.
1091
if self.executable(self.other_tree, file_id) is None:
1095
if winner == 'this' and file_status != "modified":
1097
trans_id = self.tt.trans_id_file_id(file_id)
1099
if self.tt.final_kind(trans_id) != "file":
1103
if winner == "this":
1104
executability = this_executable
1106
if file_id in self.other_tree:
1107
executability = other_executable
1108
elif file_id in self.this_tree:
1109
executability = this_executable
1110
elif file_id in self.base_tree:
1111
executability = base_executable
1112
if executability is not None:
1113
trans_id = self.tt.trans_id_file_id(file_id)
1114
self.tt.set_executability(executability, trans_id)
1116
def cook_conflicts(self, fs_conflicts):
1117
"""Convert all conflicts into a form that doesn't depend on trans_id"""
1118
from conflicts import Conflict
1120
self.cooked_conflicts.extend(cook_conflicts(fs_conflicts, self.tt))
1121
fp = FinalPaths(self.tt)
1122
for conflict in self._raw_conflicts:
1123
conflict_type = conflict[0]
1124
if conflict_type in ('name conflict', 'parent conflict'):
1125
trans_id = conflict[1]
1126
conflict_args = conflict[2:]
1127
if trans_id not in name_conflicts:
1128
name_conflicts[trans_id] = {}
1129
unique_add(name_conflicts[trans_id], conflict_type,
1131
if conflict_type == 'contents conflict':
1132
for trans_id in conflict[1]:
1133
file_id = self.tt.final_file_id(trans_id)
1134
if file_id is not None:
1136
path = fp.get_path(trans_id)
1137
for suffix in ('.BASE', '.THIS', '.OTHER'):
1138
if path.endswith(suffix):
1139
path = path[:-len(suffix)]
1141
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1142
self.cooked_conflicts.append(c)
1143
if conflict_type == 'text conflict':
1144
trans_id = conflict[1]
1145
path = fp.get_path(trans_id)
1146
file_id = self.tt.final_file_id(trans_id)
1147
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1148
self.cooked_conflicts.append(c)
1150
for trans_id, conflicts in name_conflicts.iteritems():
1152
this_parent, other_parent = conflicts['parent conflict']
1153
if this_parent == other_parent:
1154
raise AssertionError()
1156
this_parent = other_parent = \
1157
self.tt.final_file_id(self.tt.final_parent(trans_id))
1159
this_name, other_name = conflicts['name conflict']
1160
if this_name == other_name:
1161
raise AssertionError()
1163
this_name = other_name = self.tt.final_name(trans_id)
1164
other_path = fp.get_path(trans_id)
1165
if this_parent is not None and this_name is not None:
1166
this_parent_path = \
1167
fp.get_path(self.tt.trans_id_file_id(this_parent))
1168
this_path = pathjoin(this_parent_path, this_name)
1170
this_path = "<deleted>"
1171
file_id = self.tt.final_file_id(trans_id)
1172
c = Conflict.factory('path conflict', path=this_path,
1173
conflict_path=other_path, file_id=file_id)
1174
self.cooked_conflicts.append(c)
1175
self.cooked_conflicts.sort(key=Conflict.sort_key)
1178
class WeaveMerger(Merge3Merger):
1179
"""Three-way tree merger, text weave merger."""
1180
supports_reprocess = True
1181
supports_show_base = False
1182
supports_reverse_cherrypick = False
1183
history_based = True
1185
def _merged_lines(self, file_id):
1186
"""Generate the merged lines.
1187
There is no distinction between lines that are meant to contain <<<<<<<
1191
base = self.base_tree
1194
plan = self.this_tree.plan_file_merge(file_id, self.other_tree,
1196
if 'merge' in debug.debug_flags:
1198
trans_id = self.tt.trans_id_file_id(file_id)
1199
name = self.tt.final_name(trans_id) + '.plan'
1200
contents = ('%10s|%s' % l for l in plan)
1201
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1202
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1203
'>>>>>>> MERGE-SOURCE\n')
1204
return textmerge.merge_lines(self.reprocess)
1206
def text_merge(self, file_id, trans_id):
1207
"""Perform a (weave) text merge for a given file and file-id.
1208
If conflicts are encountered, .THIS and .OTHER files will be emitted,
1209
and a conflict will be noted.
1211
lines, conflicts = self._merged_lines(file_id)
1213
# Note we're checking whether the OUTPUT is binary in this case,
1214
# because we don't want to get into weave merge guts.
1215
check_text_lines(lines)
1216
self.tt.create_file(lines, trans_id)
1218
self._raw_conflicts.append(('text conflict', trans_id))
1219
name = self.tt.final_name(trans_id)
1220
parent_id = self.tt.final_parent(trans_id)
1221
file_group = self._dump_conflicts(name, parent_id, file_id,
1223
file_group.append(trans_id)
1226
class LCAMerger(WeaveMerger):
1228
def _merged_lines(self, file_id):
1229
"""Generate the merged lines.
1230
There is no distinction between lines that are meant to contain <<<<<<<
1234
base = self.base_tree
1237
plan = self.this_tree.plan_file_lca_merge(file_id, self.other_tree,
1239
if 'merge' in debug.debug_flags:
1241
trans_id = self.tt.trans_id_file_id(file_id)
1242
name = self.tt.final_name(trans_id) + '.plan'
1243
contents = ('%10s|%s' % l for l in plan)
1244
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1245
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1246
'>>>>>>> MERGE-SOURCE\n')
1247
return textmerge.merge_lines(self.reprocess)
1250
class Diff3Merger(Merge3Merger):
1251
"""Three-way merger using external diff3 for text merging"""
1253
def dump_file(self, temp_dir, name, tree, file_id):
1254
out_path = pathjoin(temp_dir, name)
1255
out_file = open(out_path, "wb")
1257
in_file = tree.get_file(file_id)
1258
for line in in_file:
1259
out_file.write(line)
1264
def text_merge(self, file_id, trans_id):
1265
"""Perform a diff3 merge using a specified file-id and trans-id.
1266
If conflicts are encountered, .BASE, .THIS. and .OTHER conflict files
1267
will be dumped, and a will be conflict noted.
1270
temp_dir = osutils.mkdtemp(prefix="bzr-")
1272
new_file = pathjoin(temp_dir, "new")
1273
this = self.dump_file(temp_dir, "this", self.this_tree, file_id)
1274
base = self.dump_file(temp_dir, "base", self.base_tree, file_id)
1275
other = self.dump_file(temp_dir, "other", self.other_tree, file_id)
1276
status = bzrlib.patch.diff3(new_file, this, base, other)
1277
if status not in (0, 1):
1278
raise BzrError("Unhandled diff3 exit code")
1279
f = open(new_file, 'rb')
1281
self.tt.create_file(f, trans_id)
1285
name = self.tt.final_name(trans_id)
1286
parent_id = self.tt.final_parent(trans_id)
1287
self._dump_conflicts(name, parent_id, file_id)
1288
self._raw_conflicts.append(('text conflict', trans_id))
1290
osutils.rmtree(temp_dir)
1293
def merge_inner(this_branch, other_tree, base_tree, ignore_zero=False,
1295
merge_type=Merge3Merger,
1296
interesting_ids=None,
1300
interesting_files=None,
1303
change_reporter=None):
1304
"""Primary interface for merging.
1306
typical use is probably
1307
'merge_inner(branch, branch.get_revision_tree(other_revision),
1308
branch.get_revision_tree(base_revision))'
1310
if this_tree is None:
1311
raise BzrError("bzrlib.merge.merge_inner requires a this_tree "
1312
"parameter as of bzrlib version 0.8.")
1313
merger = Merger(this_branch, other_tree, base_tree, this_tree=this_tree,
1314
pb=pb, change_reporter=change_reporter)
1315
merger.backup_files = backup_files
1316
merger.merge_type = merge_type
1317
merger.interesting_ids = interesting_ids
1318
merger.ignore_zero = ignore_zero
1319
if interesting_files:
1321
raise ValueError('Only supply interesting_ids'
1322
' or interesting_files')
1323
merger.interesting_files = interesting_files
1324
merger.show_base = show_base
1325
merger.reprocess = reprocess
1326
merger.other_rev_id = other_rev_id
1327
merger.other_basis = other_rev_id
1328
get_revision_id = getattr(base_tree, 'get_revision_id', None)
1329
if get_revision_id is None:
1330
get_revision_id = base_tree.last_revision
1331
merger.set_base_revision(get_revision_id(), this_branch)
1332
return merger.do_merge()
1334
def get_merge_type_registry():
1335
"""Merge type registry is in bzrlib.option to avoid circular imports.
1337
This method provides a sanctioned way to retrieve it.
1339
from bzrlib import option
1340
return option._merge_type_registry
1343
def _plan_annotate_merge(annotated_a, annotated_b, ancestors_a, ancestors_b):
1344
def status_a(revision, text):
1345
if revision in ancestors_b:
1346
return 'killed-b', text
1348
return 'new-a', text
1350
def status_b(revision, text):
1351
if revision in ancestors_a:
1352
return 'killed-a', text
1354
return 'new-b', text
1356
plain_a = [t for (a, t) in annotated_a]
1357
plain_b = [t for (a, t) in annotated_b]
1358
matcher = patiencediff.PatienceSequenceMatcher(None, plain_a, plain_b)
1359
blocks = matcher.get_matching_blocks()
1362
for ai, bi, l in blocks:
1363
# process all mismatched sections
1364
# (last mismatched section is handled because blocks always
1365
# includes a 0-length last block)
1366
for revision, text in annotated_a[a_cur:ai]:
1367
yield status_a(revision, text)
1368
for revision, text in annotated_b[b_cur:bi]:
1369
yield status_b(revision, text)
1370
# and now the matched section
1373
for text_a in plain_a[ai:a_cur]:
1374
yield "unchanged", text_a
1377
class _PlanMergeBase(object):
1379
def __init__(self, a_rev, b_rev, vf, key_prefix):
1382
:param a_rev: Revision-id of one revision to merge
1383
:param b_rev: Revision-id of the other revision to merge
1384
:param vf: A VersionedFiles containing both revisions
1385
:param key_prefix: A prefix for accessing keys in vf, typically
1391
self._last_lines = None
1392
self._last_lines_revision_id = None
1393
self._cached_matching_blocks = {}
1394
self._key_prefix = key_prefix
1395
self._precache_tip_lines()
1397
def _precache_tip_lines(self):
1398
lines = self.get_lines([self.a_rev, self.b_rev])
1399
self.lines_a = lines[self.a_rev]
1400
self.lines_b = lines[self.b_rev]
1402
def get_lines(self, revisions):
1403
"""Get lines for revisions from the backing VersionedFiles.
1405
:raises RevisionNotPresent: on absent texts.
1407
keys = [(self._key_prefix + (rev,)) for rev in revisions]
1409
for record in self.vf.get_record_stream(keys, 'unordered', True):
1410
if record.storage_kind == 'absent':
1411
raise errors.RevisionNotPresent(record.key, self.vf)
1412
result[record.key[-1]] = osutils.split_lines(
1413
record.get_bytes_as('fulltext'))
1416
def plan_merge(self):
1417
"""Generate a 'plan' for merging the two revisions.
1419
This involves comparing their texts and determining the cause of
1420
differences. If text A has a line and text B does not, then either the
1421
line was added to text A, or it was deleted from B. Once the causes
1422
are combined, they are written out in the format described in
1423
VersionedFile.plan_merge
1425
blocks = self._get_matching_blocks(self.a_rev, self.b_rev)
1426
unique_a, unique_b = self._unique_lines(blocks)
1427
new_a, killed_b = self._determine_status(self.a_rev, unique_a)
1428
new_b, killed_a = self._determine_status(self.b_rev, unique_b)
1429
return self._iter_plan(blocks, new_a, killed_b, new_b, killed_a)
1431
def _iter_plan(self, blocks, new_a, killed_b, new_b, killed_a):
1434
for i, j, n in blocks:
1435
for a_index in range(last_i, i):
1436
if a_index in new_a:
1437
if a_index in killed_b:
1438
yield 'conflicted-a', self.lines_a[a_index]
1440
yield 'new-a', self.lines_a[a_index]
1442
yield 'killed-b', self.lines_a[a_index]
1443
for b_index in range(last_j, j):
1444
if b_index in new_b:
1445
if b_index in killed_a:
1446
yield 'conflicted-b', self.lines_b[b_index]
1448
yield 'new-b', self.lines_b[b_index]
1450
yield 'killed-a', self.lines_b[b_index]
1451
# handle common lines
1452
for a_index in range(i, i+n):
1453
yield 'unchanged', self.lines_a[a_index]
1457
def _get_matching_blocks(self, left_revision, right_revision):
1458
"""Return a description of which sections of two revisions match.
1460
See SequenceMatcher.get_matching_blocks
1462
cached = self._cached_matching_blocks.get((left_revision,
1464
if cached is not None:
1466
if self._last_lines_revision_id == left_revision:
1467
left_lines = self._last_lines
1468
right_lines = self.get_lines([right_revision])[right_revision]
1470
lines = self.get_lines([left_revision, right_revision])
1471
left_lines = lines[left_revision]
1472
right_lines = lines[right_revision]
1473
self._last_lines = right_lines
1474
self._last_lines_revision_id = right_revision
1475
matcher = patiencediff.PatienceSequenceMatcher(None, left_lines,
1477
return matcher.get_matching_blocks()
1479
def _unique_lines(self, matching_blocks):
1480
"""Analyse matching_blocks to determine which lines are unique
1482
:return: a tuple of (unique_left, unique_right), where the values are
1483
sets of line numbers of unique lines.
1489
for i, j, n in matching_blocks:
1490
unique_left.extend(range(last_i, i))
1491
unique_right.extend(range(last_j, j))
1494
return unique_left, unique_right
1497
def _subtract_plans(old_plan, new_plan):
1498
"""Remove changes from new_plan that came from old_plan.
1500
It is assumed that the difference between the old_plan and new_plan
1501
is their choice of 'b' text.
1503
All lines from new_plan that differ from old_plan are emitted
1504
verbatim. All lines from new_plan that match old_plan but are
1505
not about the 'b' revision are emitted verbatim.
1507
Lines that match and are about the 'b' revision are the lines we
1508
don't want, so we convert 'killed-b' -> 'unchanged', and 'new-b'
1509
is skipped entirely.
1511
matcher = patiencediff.PatienceSequenceMatcher(None, old_plan,
1514
for i, j, n in matcher.get_matching_blocks():
1515
for jj in range(last_j, j):
1517
for jj in range(j, j+n):
1518
plan_line = new_plan[jj]
1519
if plan_line[0] == 'new-b':
1521
elif plan_line[0] == 'killed-b':
1522
yield 'unchanged', plan_line[1]
1528
class _PlanMerge(_PlanMergeBase):
1529
"""Plan an annotate merge using on-the-fly annotation"""
1531
def __init__(self, a_rev, b_rev, vf, key_prefix):
1532
super(_PlanMerge, self).__init__(a_rev, b_rev, vf, key_prefix)
1533
self.a_key = self._key_prefix + (self.a_rev,)
1534
self.b_key = self._key_prefix + (self.b_rev,)
1535
self.graph = Graph(self.vf)
1536
heads = self.graph.heads((self.a_key, self.b_key))
1538
# one side dominates, so we can just return its values, yay for
1540
# Ideally we would know that before we get this far
1541
self._head_key = heads.pop()
1542
if self._head_key == self.a_key:
1546
mutter('found dominating revision for %s\n%s > %s', self.vf,
1547
self._head_key[-1], other)
1550
self._head_key = None
1553
def _precache_tip_lines(self):
1554
# Turn this into a no-op, because we will do this later
1557
def _find_recursive_lcas(self):
1558
"""Find all the ancestors back to a unique lca"""
1559
cur_ancestors = (self.a_key, self.b_key)
1560
# graph.find_lca(uncommon, keys) now returns plain NULL_REVISION,
1561
# rather than a key tuple. We will just map that directly to no common
1565
next_lcas = self.graph.find_lca(*cur_ancestors)
1566
# Map a plain NULL_REVISION to a simple no-ancestors
1567
if next_lcas == set([NULL_REVISION]):
1569
# Order the lca's based on when they were merged into the tip
1570
# While the actual merge portion of weave merge uses a set() of
1571
# active revisions, the order of insertion *does* effect the
1572
# implicit ordering of the texts.
1573
for rev_key in cur_ancestors:
1574
ordered_parents = tuple(self.graph.find_merge_order(rev_key,
1576
parent_map[rev_key] = ordered_parents
1577
if len(next_lcas) == 0:
1579
elif len(next_lcas) == 1:
1580
parent_map[list(next_lcas)[0]] = ()
1582
elif len(next_lcas) > 2:
1583
# More than 2 lca's, fall back to grabbing all nodes between
1584
# this and the unique lca.
1585
mutter('More than 2 LCAs, falling back to all nodes for:'
1586
' %s, %s\n=> %s', self.a_key, self.b_key, cur_ancestors)
1587
cur_lcas = next_lcas
1588
while len(cur_lcas) > 1:
1589
cur_lcas = self.graph.find_lca(*cur_lcas)
1590
if len(cur_lcas) == 0:
1591
# No common base to find, use the full ancestry
1594
unique_lca = list(cur_lcas)[0]
1595
if unique_lca == NULL_REVISION:
1596
# find_lca will return a plain 'NULL_REVISION' rather
1597
# than a key tuple when there is no common ancestor, we
1598
# prefer to just use None, because it doesn't confuse
1599
# _get_interesting_texts()
1601
parent_map.update(self._find_unique_parents(next_lcas,
1604
cur_ancestors = next_lcas
1607
def _find_unique_parents(self, tip_keys, base_key):
1608
"""Find ancestors of tip that aren't ancestors of base.
1610
:param tip_keys: Nodes that are interesting
1611
:param base_key: Cull all ancestors of this node
1612
:return: The parent map for all revisions between tip_keys and
1613
base_key. base_key will be included. References to nodes outside of
1614
the ancestor set will also be removed.
1616
# TODO: this would be simpler if find_unique_ancestors took a list
1617
# instead of a single tip, internally it supports it, but it
1618
# isn't a "backwards compatible" api change.
1619
if base_key is None:
1620
parent_map = dict(self.graph.iter_ancestry(tip_keys))
1621
# We remove NULL_REVISION because it isn't a proper tuple key, and
1622
# thus confuses things like _get_interesting_texts, and our logic
1623
# to add the texts into the memory weave.
1624
if NULL_REVISION in parent_map:
1625
parent_map.pop(NULL_REVISION)
1628
for tip in tip_keys:
1630
self.graph.find_unique_ancestors(tip, [base_key]))
1631
parent_map = self.graph.get_parent_map(interesting)
1632
parent_map[base_key] = ()
1633
culled_parent_map, child_map, tails = self._remove_external_references(
1635
# Remove all the tails but base_key
1636
if base_key is not None:
1637
tails.remove(base_key)
1638
self._prune_tails(culled_parent_map, child_map, tails)
1639
# Now remove all the uninteresting 'linear' regions
1640
simple_map = _mod_graph.collapse_linear_regions(culled_parent_map)
1644
def _remove_external_references(parent_map):
1645
"""Remove references that go outside of the parent map.
1647
:param parent_map: Something returned from Graph.get_parent_map(keys)
1648
:return: (filtered_parent_map, child_map, tails)
1649
filtered_parent_map is parent_map without external references
1650
child_map is the {parent_key: [child_keys]} mapping
1651
tails is a list of nodes that do not have any parents in the map
1653
# TODO: The basic effect of this function seems more generic than
1654
# _PlanMerge. But the specific details of building a child_map,
1655
# and computing tails seems very specific to _PlanMerge.
1656
# Still, should this be in Graph land?
1657
filtered_parent_map = {}
1660
for key, parent_keys in parent_map.iteritems():
1661
culled_parent_keys = [p for p in parent_keys if p in parent_map]
1662
if not culled_parent_keys:
1664
for parent_key in culled_parent_keys:
1665
child_map.setdefault(parent_key, []).append(key)
1666
# TODO: Do we want to do this, it adds overhead for every node,
1667
# just to say that the node has no children
1668
child_map.setdefault(key, [])
1669
filtered_parent_map[key] = culled_parent_keys
1670
return filtered_parent_map, child_map, tails
1673
def _prune_tails(parent_map, child_map, tails_to_remove):
1674
"""Remove tails from the parent map.
1676
This will remove the supplied revisions until no more children have 0
1679
:param parent_map: A dict of {child: [parents]}, this dictionary will
1680
be modified in place.
1681
:param tails_to_remove: A list of tips that should be removed,
1682
this list will be consumed
1683
:param child_map: The reverse dict of parent_map ({parent: [children]})
1684
this dict will be modified
1685
:return: None, parent_map will be modified in place.
1687
while tails_to_remove:
1688
next = tails_to_remove.pop()
1689
parent_map.pop(next)
1690
children = child_map.pop(next)
1691
for child in children:
1692
child_parents = parent_map[child]
1693
child_parents.remove(next)
1694
if len(child_parents) == 0:
1695
tails_to_remove.append(child)
1697
def _get_interesting_texts(self, parent_map):
1698
"""Return a dict of texts we are interested in.
1700
Note that the input is in key tuples, but the output is in plain
1703
:param parent_map: The output from _find_recursive_lcas
1704
:return: A dict of {'revision_id':lines} as returned by
1705
_PlanMergeBase.get_lines()
1707
all_revision_keys = set(parent_map)
1708
all_revision_keys.add(self.a_key)
1709
all_revision_keys.add(self.b_key)
1711
# Everything else is in 'keys' but get_lines is in 'revision_ids'
1712
all_texts = self.get_lines([k[-1] for k in all_revision_keys])
1715
def _build_weave(self):
1716
from bzrlib import weave
1717
self._weave = weave.Weave(weave_name='in_memory_weave',
1718
allow_reserved=True)
1719
parent_map = self._find_recursive_lcas()
1721
all_texts = self._get_interesting_texts(parent_map)
1723
# Note: Unfortunately, the order given by topo_sort will effect the
1724
# ordering resolution in the output. Specifically, if you add A then B,
1725
# then in the output text A lines will show up before B lines. And, of
1726
# course, topo_sort doesn't guarantee any real ordering.
1727
# So we use merge_sort, and add a fake node on the tip.
1728
# This ensures that left-hand parents will always be inserted into the
1729
# weave before right-hand parents.
1730
tip_key = self._key_prefix + (_mod_revision.CURRENT_REVISION,)
1731
parent_map[tip_key] = (self.a_key, self.b_key)
1733
for seq_num, key, depth, eom in reversed(tsort.merge_sort(parent_map,
1737
# for key in tsort.topo_sort(parent_map):
1738
parent_keys = parent_map[key]
1739
revision_id = key[-1]
1740
parent_ids = [k[-1] for k in parent_keys]
1741
self._weave.add_lines(revision_id, parent_ids,
1742
all_texts[revision_id])
1744
def plan_merge(self):
1745
"""Generate a 'plan' for merging the two revisions.
1747
This involves comparing their texts and determining the cause of
1748
differences. If text A has a line and text B does not, then either the
1749
line was added to text A, or it was deleted from B. Once the causes
1750
are combined, they are written out in the format described in
1751
VersionedFile.plan_merge
1753
if self._head_key is not None: # There was a single head
1754
if self._head_key == self.a_key:
1757
if self._head_key != self.b_key:
1758
raise AssertionError('There was an invalid head: %s != %s'
1759
% (self.b_key, self._head_key))
1761
head_rev = self._head_key[-1]
1762
lines = self.get_lines([head_rev])[head_rev]
1763
return ((plan, line) for line in lines)
1764
return self._weave.plan_merge(self.a_rev, self.b_rev)
1767
class _PlanLCAMerge(_PlanMergeBase):
1769
This merge algorithm differs from _PlanMerge in that:
1770
1. comparisons are done against LCAs only
1771
2. cases where a contested line is new versus one LCA but old versus
1772
another are marked as conflicts, by emitting the line as conflicted-a
1775
This is faster, and hopefully produces more useful output.
1778
def __init__(self, a_rev, b_rev, vf, key_prefix, graph):
1779
_PlanMergeBase.__init__(self, a_rev, b_rev, vf, key_prefix)
1780
lcas = graph.find_lca(key_prefix + (a_rev,), key_prefix + (b_rev,))
1783
if lca == NULL_REVISION:
1786
self.lcas.add(lca[-1])
1787
for lca in self.lcas:
1788
if _mod_revision.is_null(lca):
1791
lca_lines = self.get_lines([lca])[lca]
1792
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_a,
1794
blocks = list(matcher.get_matching_blocks())
1795
self._cached_matching_blocks[(a_rev, lca)] = blocks
1796
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_b,
1798
blocks = list(matcher.get_matching_blocks())
1799
self._cached_matching_blocks[(b_rev, lca)] = blocks
1801
def _determine_status(self, revision_id, unique_line_numbers):
1802
"""Determines the status unique lines versus all lcas.
1804
Basically, determines why the line is unique to this revision.
1806
A line may be determined new, killed, or both.
1808
If a line is determined new, that means it was not present in at least
1809
one LCA, and is not present in the other merge revision.
1811
If a line is determined killed, that means the line was present in
1814
If a line is killed and new, this indicates that the two merge
1815
revisions contain differing conflict resolutions.
1816
:param revision_id: The id of the revision in which the lines are
1818
:param unique_line_numbers: The line numbers of unique lines.
1819
:return a tuple of (new_this, killed_other):
1823
unique_line_numbers = set(unique_line_numbers)
1824
for lca in self.lcas:
1825
blocks = self._get_matching_blocks(revision_id, lca)
1826
unique_vs_lca, _ignored = self._unique_lines(blocks)
1827
new.update(unique_line_numbers.intersection(unique_vs_lca))
1828
killed.update(unique_line_numbers.difference(unique_vs_lca))