1
# Copyright (C) 2005, 2006, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
from itertools import chain
30
revision as _mod_revision,
33
from bzrlib.branch import Branch
34
from bzrlib.conflicts import ConflictList, Conflict
35
from bzrlib.errors import (BzrCommandError,
45
WorkingTreeNotRevision,
48
from bzrlib.graph import Graph
49
from bzrlib.merge3 import Merge3
50
from bzrlib.osutils import rename, pathjoin
51
from progress import DummyProgress, ProgressPhase
52
from bzrlib.revision import (NULL_REVISION, ensure_null)
53
from bzrlib.textfile import check_text_lines
54
from bzrlib.trace import mutter, warning, note, is_quiet
55
from bzrlib.transform import (TransformPreview, TreeTransform,
56
resolve_conflicts, cook_conflicts,
57
conflict_pass, FinalPaths, create_by_entry,
58
unique_add, ROOT_PARENT)
59
from bzrlib.versionedfile import PlanWeaveMerge
62
# TODO: Report back as changes are merged in
65
def transform_tree(from_tree, to_tree, interesting_ids=None):
66
merge_inner(from_tree.branch, to_tree, from_tree, ignore_zero=True,
67
interesting_ids=interesting_ids, this_tree=from_tree)
71
def __init__(self, this_branch, other_tree=None, base_tree=None,
72
this_tree=None, pb=DummyProgress(), change_reporter=None,
73
recurse='down', revision_graph=None):
75
self.this_branch = this_branch
76
self.this_basis = _mod_revision.ensure_null(
77
this_branch.last_revision())
78
self.this_rev_id = None
79
self.this_tree = this_tree
80
self.this_revision_tree = None
81
self.this_basis_tree = None
82
self.other_tree = other_tree
83
self.other_branch = None
84
self.base_tree = base_tree
85
self.ignore_zero = False
86
self.backup_files = False
87
self.interesting_ids = None
88
self.interesting_files = None
89
self.show_base = False
90
self.reprocess = False
93
self.recurse = recurse
94
self.change_reporter = change_reporter
95
self._cached_trees = {}
96
self._revision_graph = revision_graph
97
self._base_is_ancestor = None
98
self._base_is_other_ancestor = None
101
def revision_graph(self):
102
if self._revision_graph is None:
103
self._revision_graph = self.this_branch.repository.get_graph()
104
return self._revision_graph
106
def _set_base_is_ancestor(self, value):
107
self._base_is_ancestor = value
109
def _get_base_is_ancestor(self):
110
if self._base_is_ancestor is None:
111
self._base_is_ancestor = self.revision_graph.is_ancestor(
112
self.base_rev_id, self.this_basis)
113
return self._base_is_ancestor
115
base_is_ancestor = property(_get_base_is_ancestor, _set_base_is_ancestor)
117
def _set_base_is_other_ancestor(self, value):
118
self._base_is_other_ancestor = value
120
def _get_base_is_other_ancestor(self):
121
if self._base_is_other_ancestor is None:
122
if self.other_basis is None:
124
self._base_is_other_ancestor = self.revision_graph.is_ancestor(
125
self.base_rev_id, self.other_basis)
126
return self._base_is_other_ancestor
128
base_is_other_ancestor = property(_get_base_is_other_ancestor,
129
_set_base_is_other_ancestor)
132
def from_uncommitted(tree, other_tree, pb):
133
"""Return a Merger for uncommitted changes in other_tree.
135
:param tree: The tree to merge into
136
:param other_tree: The tree to get uncommitted changes from
137
:param pb: A progress indicator
139
merger = Merger(tree.branch, other_tree, other_tree.basis_tree(), tree,
141
merger.base_rev_id = merger.base_tree.get_revision_id()
142
merger.other_rev_id = None
143
merger.other_basis = merger.base_rev_id
147
def from_mergeable(klass, tree, mergeable, pb):
148
"""Return a Merger for a bundle or merge directive.
150
:param tree: The tree to merge changes into
151
:param mergeable: A merge directive or bundle
152
:param pb: A progress indicator
154
mergeable.install_revisions(tree.branch.repository)
155
base_revision_id, other_revision_id, verified =\
156
mergeable.get_merge_request(tree.branch.repository)
157
revision_graph = tree.branch.repository.get_graph()
158
if base_revision_id is not None:
159
if (base_revision_id != _mod_revision.NULL_REVISION and
160
revision_graph.is_ancestor(
161
base_revision_id, tree.branch.last_revision())):
162
base_revision_id = None
164
warning('Performing cherrypick')
165
merger = klass.from_revision_ids(pb, tree, other_revision_id,
166
base_revision_id, revision_graph=
168
return merger, verified
171
def from_revision_ids(pb, tree, other, base=None, other_branch=None,
172
base_branch=None, revision_graph=None):
173
"""Return a Merger for revision-ids.
175
:param tree: The tree to merge changes into
176
:param other: The revision-id to use as OTHER
177
:param base: The revision-id to use as BASE. If not specified, will
179
:param other_branch: A branch containing the other revision-id. If
180
not supplied, tree.branch is used.
181
:param base_branch: A branch containing the base revision-id. If
182
not supplied, other_branch or tree.branch will be used.
183
:param revision_graph: If you have a revision_graph precomputed, pass
184
it in, otherwise it will be created for you.
185
:param pb: A progress indicator
187
merger = Merger(tree.branch, this_tree=tree, pb=pb,
188
revision_graph=revision_graph)
189
if other_branch is None:
190
other_branch = tree.branch
191
merger.set_other_revision(other, other_branch)
195
if base_branch is None:
196
base_branch = other_branch
197
merger.set_base_revision(base, base_branch)
200
def revision_tree(self, revision_id, branch=None):
201
if revision_id not in self._cached_trees:
203
branch = self.this_branch
205
tree = self.this_tree.revision_tree(revision_id)
206
except errors.NoSuchRevisionInTree:
207
tree = branch.repository.revision_tree(revision_id)
208
self._cached_trees[revision_id] = tree
209
return self._cached_trees[revision_id]
211
def _get_tree(self, treespec, possible_transports=None):
212
from bzrlib import workingtree
213
location, revno = treespec
215
tree = workingtree.WorkingTree.open_containing(location)[0]
216
return tree.branch, tree
217
branch = Branch.open_containing(location, possible_transports)[0]
219
revision_id = branch.last_revision()
221
revision_id = branch.get_rev_id(revno)
222
revision_id = ensure_null(revision_id)
223
return branch, self.revision_tree(revision_id, branch)
225
def ensure_revision_trees(self):
226
if self.this_revision_tree is None:
227
self.this_basis_tree = self.revision_tree(self.this_basis)
228
if self.this_basis == self.this_rev_id:
229
self.this_revision_tree = self.this_basis_tree
231
if self.other_rev_id is None:
232
other_basis_tree = self.revision_tree(self.other_basis)
233
changes = other_basis_tree.changes_from(self.other_tree)
234
if changes.has_changed():
235
raise WorkingTreeNotRevision(self.this_tree)
236
other_rev_id = self.other_basis
237
self.other_tree = other_basis_tree
239
def file_revisions(self, file_id):
240
self.ensure_revision_trees()
241
def get_id(tree, file_id):
242
revision_id = tree.inventory[file_id].revision
244
if self.this_rev_id is None:
245
if self.this_basis_tree.get_file_sha1(file_id) != \
246
self.this_tree.get_file_sha1(file_id):
247
raise WorkingTreeNotRevision(self.this_tree)
249
trees = (self.this_basis_tree, self.other_tree)
250
return [get_id(tree, file_id) for tree in trees]
252
def check_basis(self, check_clean, require_commits=True):
253
if self.this_basis is None and require_commits is True:
254
raise BzrCommandError("This branch has no commits."
255
" (perhaps you would prefer 'bzr pull')")
258
if self.this_basis != self.this_rev_id:
259
raise errors.UncommittedChanges(self.this_tree)
261
def compare_basis(self):
263
basis_tree = self.revision_tree(self.this_tree.last_revision())
264
except errors.NoSuchRevision:
265
basis_tree = self.this_tree.basis_tree()
266
changes = self.this_tree.changes_from(basis_tree)
267
if not changes.has_changed():
268
self.this_rev_id = self.this_basis
270
def set_interesting_files(self, file_list):
271
self.interesting_files = file_list
273
def set_pending(self):
274
if not self.base_is_ancestor or not self.base_is_other_ancestor or self.other_rev_id is None:
278
def _add_parent(self):
279
new_parents = self.this_tree.get_parent_ids() + [self.other_rev_id]
280
new_parent_trees = []
281
for revision_id in new_parents:
283
tree = self.revision_tree(revision_id)
284
except errors.NoSuchRevision:
288
new_parent_trees.append((revision_id, tree))
290
self.this_tree.set_parent_trees(new_parent_trees,
291
allow_leftmost_as_ghost=True)
293
for _revision_id, tree in new_parent_trees:
297
def set_other(self, other_revision, possible_transports=None):
298
"""Set the revision and tree to merge from.
300
This sets the other_tree, other_rev_id, other_basis attributes.
302
:param other_revision: The [path, revision] list to merge from.
304
self.other_branch, self.other_tree = self._get_tree(other_revision,
306
if other_revision[1] == -1:
307
self.other_rev_id = _mod_revision.ensure_null(
308
self.other_branch.last_revision())
309
if _mod_revision.is_null(self.other_rev_id):
310
raise NoCommits(self.other_branch)
311
self.other_basis = self.other_rev_id
312
elif other_revision[1] is not None:
313
self.other_rev_id = self.other_branch.get_rev_id(other_revision[1])
314
self.other_basis = self.other_rev_id
316
self.other_rev_id = None
317
self.other_basis = self.other_branch.last_revision()
318
if self.other_basis is None:
319
raise NoCommits(self.other_branch)
320
if self.other_rev_id is not None:
321
self._cached_trees[self.other_rev_id] = self.other_tree
322
self._maybe_fetch(self.other_branch,self.this_branch, self.other_basis)
324
def set_other_revision(self, revision_id, other_branch):
325
"""Set 'other' based on a branch and revision id
327
:param revision_id: The revision to use for a tree
328
:param other_branch: The branch containing this tree
330
self.other_rev_id = revision_id
331
self.other_branch = other_branch
332
self._maybe_fetch(other_branch, self.this_branch, self.other_rev_id)
333
self.other_tree = self.revision_tree(revision_id)
334
self.other_basis = revision_id
336
def set_base_revision(self, revision_id, branch):
337
"""Set 'base' based on a branch and revision id
339
:param revision_id: The revision to use for a tree
340
:param branch: The branch containing this tree
342
self.base_rev_id = revision_id
343
self.base_branch = branch
344
self._maybe_fetch(branch, self.this_branch, revision_id)
345
self.base_tree = self.revision_tree(revision_id)
347
def _maybe_fetch(self, source, target, revision_id):
348
if not source.repository.has_same_location(target.repository):
349
target.fetch(source, revision_id)
352
revisions = [ensure_null(self.this_basis),
353
ensure_null(self.other_basis)]
354
if NULL_REVISION in revisions:
355
self.base_rev_id = NULL_REVISION
357
self.base_rev_id, steps = self.revision_graph.find_unique_lca(
358
revisions[0], revisions[1], count_steps=True)
359
if self.base_rev_id == NULL_REVISION:
360
raise UnrelatedBranches()
362
warning('Warning: criss-cross merge encountered. See bzr'
363
' help criss-cross.')
364
self.base_tree = self.revision_tree(self.base_rev_id)
365
self.base_is_ancestor = True
366
self.base_is_other_ancestor = True
368
def set_base(self, base_revision):
369
"""Set the base revision to use for the merge.
371
:param base_revision: A 2-list containing a path and revision number.
373
mutter("doing merge() with no base_revision specified")
374
if base_revision == [None, None]:
377
base_branch, self.base_tree = self._get_tree(base_revision)
378
if base_revision[1] == -1:
379
self.base_rev_id = base_branch.last_revision()
380
elif base_revision[1] is None:
381
self.base_rev_id = _mod_revision.NULL_REVISION
383
self.base_rev_id = _mod_revision.ensure_null(
384
base_branch.get_rev_id(base_revision[1]))
385
self._maybe_fetch(base_branch, self.this_branch, self.base_rev_id)
387
def make_merger(self):
388
kwargs = {'working_tree':self.this_tree, 'this_tree': self.this_tree,
389
'other_tree': self.other_tree,
390
'interesting_ids': self.interesting_ids,
391
'interesting_files': self.interesting_files,
394
if self.merge_type.requires_base:
395
kwargs['base_tree'] = self.base_tree
396
if self.merge_type.supports_reprocess:
397
kwargs['reprocess'] = self.reprocess
399
raise BzrError("Conflict reduction is not supported for merge"
400
" type %s." % self.merge_type)
401
if self.merge_type.supports_show_base:
402
kwargs['show_base'] = self.show_base
404
raise BzrError("Showing base is not supported for this"
405
" merge type. %s" % self.merge_type)
406
if (not getattr(self.merge_type, 'supports_reverse_cherrypick', True)
407
and not self.base_is_other_ancestor):
408
raise errors.CannotReverseCherrypick()
409
if self.merge_type.supports_cherrypick:
410
kwargs['cherrypick'] = (not self.base_is_ancestor or
411
not self.base_is_other_ancestor)
412
return self.merge_type(pb=self._pb,
413
change_reporter=self.change_reporter,
416
def _do_merge_to(self, merge):
418
if self.recurse == 'down':
419
for relpath, file_id in self.this_tree.iter_references():
420
sub_tree = self.this_tree.get_nested_tree(file_id, relpath)
421
other_revision = self.other_tree.get_reference_revision(
423
if other_revision == sub_tree.last_revision():
425
sub_merge = Merger(sub_tree.branch, this_tree=sub_tree)
426
sub_merge.merge_type = self.merge_type
427
other_branch = self.other_branch.reference_parent(file_id, relpath)
428
sub_merge.set_other_revision(other_revision, other_branch)
429
base_revision = self.base_tree.get_reference_revision(file_id)
430
sub_merge.base_tree = \
431
sub_tree.branch.repository.revision_tree(base_revision)
432
sub_merge.base_rev_id = base_revision
436
self.this_tree.lock_tree_write()
438
if self.base_tree is not None:
439
self.base_tree.lock_read()
441
if self.other_tree is not None:
442
self.other_tree.lock_read()
444
merge = self.make_merger()
445
self._do_merge_to(merge)
447
if self.other_tree is not None:
448
self.other_tree.unlock()
450
if self.base_tree is not None:
451
self.base_tree.unlock()
453
self.this_tree.unlock()
454
if len(merge.cooked_conflicts) == 0:
455
if not self.ignore_zero and not is_quiet():
456
note("All changes applied successfully.")
458
note("%d conflicts encountered." % len(merge.cooked_conflicts))
460
return len(merge.cooked_conflicts)
463
class Merge3Merger(object):
464
"""Three-way merger that uses the merge3 text merger"""
466
supports_reprocess = True
467
supports_show_base = True
468
history_based = False
469
supports_cherrypick = True
470
supports_reverse_cherrypick = True
471
winner_idx = {"this": 2, "other": 1, "conflict": 1}
473
def __init__(self, working_tree, this_tree, base_tree, other_tree,
474
interesting_ids=None, reprocess=False, show_base=False,
475
pb=DummyProgress(), pp=None, change_reporter=None,
476
interesting_files=None, do_merge=True,
478
"""Initialize the merger object and perform the merge.
480
:param working_tree: The working tree to apply the merge to
481
:param this_tree: The local tree in the merge operation
482
:param base_tree: The common tree in the merge operation
483
:param other_tree: The other other tree to merge changes from
484
:param interesting_ids: The file_ids of files that should be
485
participate in the merge. May not be combined with
487
:param: reprocess If True, perform conflict-reduction processing.
488
:param show_base: If True, show the base revision in text conflicts.
489
(incompatible with reprocess)
490
:param pb: A Progress bar
491
:param pp: A ProgressPhase object
492
:param change_reporter: An object that should report changes made
493
:param interesting_files: The tree-relative paths of files that should
494
participate in the merge. If these paths refer to directories,
495
the contents of those directories will also be included. May not
496
be combined with interesting_ids. If neither interesting_files nor
497
interesting_ids is specified, all files may participate in the
500
object.__init__(self)
501
if interesting_files is not None and interesting_ids is not None:
503
'specify either interesting_ids or interesting_files')
504
self.interesting_ids = interesting_ids
505
self.interesting_files = interesting_files
506
self.this_tree = working_tree
507
self.base_tree = base_tree
508
self.other_tree = other_tree
509
self._raw_conflicts = []
510
self.cooked_conflicts = []
511
self.reprocess = reprocess
512
self.show_base = show_base
515
self.change_reporter = change_reporter
516
self.cherrypick = cherrypick
518
self.pp = ProgressPhase("Merge phase", 3, self.pb)
523
self.this_tree.lock_tree_write()
524
self.base_tree.lock_read()
525
self.other_tree.lock_read()
526
self.tt = TreeTransform(self.this_tree, self.pb)
529
self._compute_transform()
531
results = self.tt.apply(no_conflicts=True)
532
self.write_modified(results)
534
self.this_tree.add_conflicts(self.cooked_conflicts)
535
except UnsupportedOperation:
539
self.other_tree.unlock()
540
self.base_tree.unlock()
541
self.this_tree.unlock()
544
def make_preview_transform(self):
545
self.base_tree.lock_read()
546
self.other_tree.lock_read()
547
self.tt = TransformPreview(self.this_tree)
550
self._compute_transform()
553
self.other_tree.unlock()
554
self.base_tree.unlock()
558
def _compute_transform(self):
559
entries = self._entries3()
560
child_pb = ui.ui_factory.nested_progress_bar()
562
for num, (file_id, changed, parents3, names3,
563
executable3) in enumerate(entries):
564
child_pb.update('Preparing file merge', num, len(entries))
565
self._merge_names(file_id, parents3, names3)
567
file_status = self.merge_contents(file_id)
569
file_status = 'unmodified'
570
self._merge_executable(file_id,
571
executable3, file_status)
576
child_pb = ui.ui_factory.nested_progress_bar()
578
fs_conflicts = resolve_conflicts(self.tt, child_pb,
579
lambda t, c: conflict_pass(t, c, self.other_tree))
582
if self.change_reporter is not None:
583
from bzrlib import delta
584
delta.report_changes(
585
self.tt.iter_changes(), self.change_reporter)
586
self.cook_conflicts(fs_conflicts)
587
for conflict in self.cooked_conflicts:
591
"""Gather data about files modified between three trees.
593
Return a list of tuples of file_id, changed, parents3, names3,
594
executable3. changed is a boolean indicating whether the file contents
595
or kind were changed. parents3 is a tuple of parent ids for base,
596
other and this. names3 is a tuple of names for base, other and this.
597
executable3 is a tuple of execute-bit values for base, other and this.
600
iterator = self.other_tree.iter_changes(self.base_tree,
601
include_unchanged=True, specific_files=self.interesting_files,
602
extra_trees=[self.this_tree])
603
for (file_id, paths, changed, versioned, parents, names, kind,
604
executable) in iterator:
605
if (self.interesting_ids is not None and
606
file_id not in self.interesting_ids):
608
if file_id in self.this_tree.inventory:
609
entry = self.this_tree.inventory[file_id]
610
this_name = entry.name
611
this_parent = entry.parent_id
612
this_executable = entry.executable
616
this_executable = None
617
parents3 = parents + (this_parent,)
618
names3 = names + (this_name,)
619
executable3 = executable + (this_executable,)
620
result.append((file_id, changed, parents3, names3, executable3))
625
self.tt.final_kind(self.tt.root)
627
self.tt.cancel_deletion(self.tt.root)
628
if self.tt.final_file_id(self.tt.root) is None:
629
self.tt.version_file(self.tt.tree_file_id(self.tt.root),
631
if self.other_tree.inventory.root is None:
633
other_root_file_id = self.other_tree.get_root_id()
634
other_root = self.tt.trans_id_file_id(other_root_file_id)
635
if other_root == self.tt.root:
638
self.tt.final_kind(other_root)
641
if self.other_tree.inventory.root.file_id in self.this_tree.inventory:
642
# the other tree's root is a non-root in the current tree
644
self.reparent_children(self.other_tree.inventory.root, self.tt.root)
645
self.tt.cancel_creation(other_root)
646
self.tt.cancel_versioning(other_root)
648
def reparent_children(self, ie, target):
649
for thing, child in ie.children.iteritems():
650
trans_id = self.tt.trans_id_file_id(child.file_id)
651
self.tt.adjust_path(self.tt.final_name(trans_id), target, trans_id)
653
def write_modified(self, results):
655
for path in results.modified_paths:
656
file_id = self.this_tree.path2id(self.this_tree.relpath(path))
659
hash = self.this_tree.get_file_sha1(file_id)
662
modified_hashes[file_id] = hash
663
self.this_tree.set_merge_modified(modified_hashes)
666
def parent(entry, file_id):
667
"""Determine the parent for a file_id (used as a key method)"""
670
return entry.parent_id
673
def name(entry, file_id):
674
"""Determine the name for a file_id (used as a key method)"""
680
def contents_sha1(tree, file_id):
681
"""Determine the sha1 of the file contents (used as a key method)."""
682
if file_id not in tree:
684
return tree.get_file_sha1(file_id)
687
def executable(tree, file_id):
688
"""Determine the executability of a file-id (used as a key method)."""
689
if file_id not in tree:
691
if tree.kind(file_id) != "file":
693
return tree.is_executable(file_id)
696
def kind(tree, file_id):
697
"""Determine the kind of a file-id (used as a key method)."""
698
if file_id not in tree:
700
return tree.kind(file_id)
703
def _three_way(base, other, this):
704
#if base == other, either they all agree, or only THIS has changed.
707
elif this not in (base, other):
709
# "Ambiguous clean merge" -- both sides have made the same change.
712
# this == base: only other has changed.
717
def scalar_three_way(this_tree, base_tree, other_tree, file_id, key):
718
"""Do a three-way test on a scalar.
719
Return "this", "other" or "conflict", depending whether a value wins.
721
key_base = key(base_tree, file_id)
722
key_other = key(other_tree, file_id)
723
#if base == other, either they all agree, or only THIS has changed.
724
if key_base == key_other:
726
key_this = key(this_tree, file_id)
727
# "Ambiguous clean merge"
728
if key_this == key_other:
730
elif key_this == key_base:
735
def merge_names(self, file_id):
737
if file_id in tree.inventory:
738
return tree.inventory[file_id]
741
this_entry = get_entry(self.this_tree)
742
other_entry = get_entry(self.other_tree)
743
base_entry = get_entry(self.base_tree)
744
entries = (base_entry, other_entry, this_entry)
747
for entry in entries:
752
names.append(entry.name)
753
parents.append(entry.parent_id)
754
return self._merge_names(file_id, parents, names)
756
def _merge_names(self, file_id, parents, names):
757
"""Perform a merge on file_id names and parents"""
758
base_name, other_name, this_name = names
759
base_parent, other_parent, this_parent = parents
761
name_winner = self._three_way(*names)
763
parent_id_winner = self._three_way(*parents)
764
if this_name is None:
765
if name_winner == "this":
766
name_winner = "other"
767
if parent_id_winner == "this":
768
parent_id_winner = "other"
769
if name_winner == "this" and parent_id_winner == "this":
771
if name_winner == "conflict":
772
trans_id = self.tt.trans_id_file_id(file_id)
773
self._raw_conflicts.append(('name conflict', trans_id,
774
this_name, other_name))
775
if parent_id_winner == "conflict":
776
trans_id = self.tt.trans_id_file_id(file_id)
777
self._raw_conflicts.append(('parent conflict', trans_id,
778
this_parent, other_parent))
779
if other_name is None:
780
# it doesn't matter whether the result was 'other' or
781
# 'conflict'-- if there's no 'other', we leave it alone.
783
# if we get here, name_winner and parent_winner are set to safe values.
784
trans_id = self.tt.trans_id_file_id(file_id)
785
parent_id = parents[self.winner_idx[parent_id_winner]]
786
if parent_id is not None:
787
parent_trans_id = self.tt.trans_id_file_id(parent_id)
788
self.tt.adjust_path(names[self.winner_idx[name_winner]],
789
parent_trans_id, trans_id)
791
def merge_contents(self, file_id):
792
"""Performa a merge on file_id contents."""
793
def contents_pair(tree):
794
if file_id not in tree:
796
kind = tree.kind(file_id)
798
contents = tree.get_file_sha1(file_id)
799
elif kind == "symlink":
800
contents = tree.get_symlink_target(file_id)
803
return kind, contents
805
def contents_conflict():
806
trans_id = self.tt.trans_id_file_id(file_id)
807
name = self.tt.final_name(trans_id)
808
parent_id = self.tt.final_parent(trans_id)
809
if file_id in self.this_tree.inventory:
810
self.tt.unversion_file(trans_id)
811
if file_id in self.this_tree:
812
self.tt.delete_contents(trans_id)
813
file_group = self._dump_conflicts(name, parent_id, file_id,
815
self._raw_conflicts.append(('contents conflict', file_group))
817
# See SPOT run. run, SPOT, run.
818
# So we're not QUITE repeating ourselves; we do tricky things with
820
base_pair = contents_pair(self.base_tree)
821
other_pair = contents_pair(self.other_tree)
822
if base_pair == other_pair:
823
# OTHER introduced no changes
825
this_pair = contents_pair(self.this_tree)
826
if this_pair == other_pair:
827
# THIS and OTHER introduced the same changes
830
trans_id = self.tt.trans_id_file_id(file_id)
831
if this_pair == base_pair:
832
# only OTHER introduced changes
833
if file_id in self.this_tree:
834
# Remove any existing contents
835
self.tt.delete_contents(trans_id)
836
if file_id in self.other_tree:
837
# OTHER changed the file
838
create_by_entry(self.tt,
839
self.other_tree.inventory[file_id],
840
self.other_tree, trans_id)
841
if file_id not in self.this_tree.inventory:
842
self.tt.version_file(file_id, trans_id)
844
elif file_id in self.this_tree.inventory:
845
# OTHER deleted the file
846
self.tt.unversion_file(trans_id)
848
#BOTH THIS and OTHER introduced changes; scalar conflict
849
elif this_pair[0] == "file" and other_pair[0] == "file":
850
# THIS and OTHER are both files, so text merge. Either
851
# BASE is a file, or both converted to files, so at least we
852
# have agreement that output should be a file.
854
self.text_merge(file_id, trans_id)
856
return contents_conflict()
857
if file_id not in self.this_tree.inventory:
858
self.tt.version_file(file_id, trans_id)
860
self.tt.tree_kind(trans_id)
861
self.tt.delete_contents(trans_id)
866
# Scalar conflict, can't text merge. Dump conflicts
867
return contents_conflict()
869
def get_lines(self, tree, file_id):
870
"""Return the lines in a file, or an empty list."""
872
return tree.get_file(file_id).readlines()
876
def text_merge(self, file_id, trans_id):
877
"""Perform a three-way text merge on a file_id"""
878
# it's possible that we got here with base as a different type.
879
# if so, we just want two-way text conflicts.
880
if file_id in self.base_tree and \
881
self.base_tree.kind(file_id) == "file":
882
base_lines = self.get_lines(self.base_tree, file_id)
885
other_lines = self.get_lines(self.other_tree, file_id)
886
this_lines = self.get_lines(self.this_tree, file_id)
887
m3 = Merge3(base_lines, this_lines, other_lines,
888
is_cherrypick=self.cherrypick)
889
start_marker = "!START OF MERGE CONFLICT!" + "I HOPE THIS IS UNIQUE"
890
if self.show_base is True:
891
base_marker = '|' * 7
895
def iter_merge3(retval):
896
retval["text_conflicts"] = False
897
for line in m3.merge_lines(name_a = "TREE",
898
name_b = "MERGE-SOURCE",
899
name_base = "BASE-REVISION",
900
start_marker=start_marker,
901
base_marker=base_marker,
902
reprocess=self.reprocess):
903
if line.startswith(start_marker):
904
retval["text_conflicts"] = True
905
yield line.replace(start_marker, '<' * 7)
909
merge3_iterator = iter_merge3(retval)
910
self.tt.create_file(merge3_iterator, trans_id)
911
if retval["text_conflicts"] is True:
912
self._raw_conflicts.append(('text conflict', trans_id))
913
name = self.tt.final_name(trans_id)
914
parent_id = self.tt.final_parent(trans_id)
915
file_group = self._dump_conflicts(name, parent_id, file_id,
916
this_lines, base_lines,
918
file_group.append(trans_id)
920
def _dump_conflicts(self, name, parent_id, file_id, this_lines=None,
921
base_lines=None, other_lines=None, set_version=False,
923
"""Emit conflict files.
924
If this_lines, base_lines, or other_lines are omitted, they will be
925
determined automatically. If set_version is true, the .OTHER, .THIS
926
or .BASE (in that order) will be created as versioned files.
928
data = [('OTHER', self.other_tree, other_lines),
929
('THIS', self.this_tree, this_lines)]
931
data.append(('BASE', self.base_tree, base_lines))
934
for suffix, tree, lines in data:
936
trans_id = self._conflict_file(name, parent_id, tree, file_id,
938
file_group.append(trans_id)
939
if set_version and not versioned:
940
self.tt.version_file(file_id, trans_id)
944
def _conflict_file(self, name, parent_id, tree, file_id, suffix,
946
"""Emit a single conflict file."""
947
name = name + '.' + suffix
948
trans_id = self.tt.create_path(name, parent_id)
949
entry = tree.inventory[file_id]
950
create_by_entry(self.tt, entry, tree, trans_id, lines)
953
def merge_executable(self, file_id, file_status):
954
"""Perform a merge on the execute bit."""
955
executable = [self.executable(t, file_id) for t in (self.base_tree,
956
self.other_tree, self.this_tree)]
957
self._merge_executable(file_id, executable, file_status)
959
def _merge_executable(self, file_id, executable, file_status):
960
"""Perform a merge on the execute bit."""
961
base_executable, other_executable, this_executable = executable
962
if file_status == "deleted":
964
winner = self._three_way(*executable)
965
if winner == "conflict":
966
# There must be a None in here, if we have a conflict, but we
967
# need executability since file status was not deleted.
968
if self.executable(self.other_tree, file_id) is None:
972
if winner == 'this' and file_status != "modified":
974
trans_id = self.tt.trans_id_file_id(file_id)
976
if self.tt.final_kind(trans_id) != "file":
981
executability = this_executable
983
if file_id in self.other_tree:
984
executability = other_executable
985
elif file_id in self.this_tree:
986
executability = this_executable
987
elif file_id in self.base_tree:
988
executability = base_executable
989
if executability is not None:
990
trans_id = self.tt.trans_id_file_id(file_id)
991
self.tt.set_executability(executability, trans_id)
993
def cook_conflicts(self, fs_conflicts):
994
"""Convert all conflicts into a form that doesn't depend on trans_id"""
995
from conflicts import Conflict
997
self.cooked_conflicts.extend(cook_conflicts(fs_conflicts, self.tt))
998
fp = FinalPaths(self.tt)
999
for conflict in self._raw_conflicts:
1000
conflict_type = conflict[0]
1001
if conflict_type in ('name conflict', 'parent conflict'):
1002
trans_id = conflict[1]
1003
conflict_args = conflict[2:]
1004
if trans_id not in name_conflicts:
1005
name_conflicts[trans_id] = {}
1006
unique_add(name_conflicts[trans_id], conflict_type,
1008
if conflict_type == 'contents conflict':
1009
for trans_id in conflict[1]:
1010
file_id = self.tt.final_file_id(trans_id)
1011
if file_id is not None:
1013
path = fp.get_path(trans_id)
1014
for suffix in ('.BASE', '.THIS', '.OTHER'):
1015
if path.endswith(suffix):
1016
path = path[:-len(suffix)]
1018
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1019
self.cooked_conflicts.append(c)
1020
if conflict_type == 'text conflict':
1021
trans_id = conflict[1]
1022
path = fp.get_path(trans_id)
1023
file_id = self.tt.final_file_id(trans_id)
1024
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1025
self.cooked_conflicts.append(c)
1027
for trans_id, conflicts in name_conflicts.iteritems():
1029
this_parent, other_parent = conflicts['parent conflict']
1030
if this_parent == other_parent:
1031
raise AssertionError()
1033
this_parent = other_parent = \
1034
self.tt.final_file_id(self.tt.final_parent(trans_id))
1036
this_name, other_name = conflicts['name conflict']
1037
if this_name == other_name:
1038
raise AssertionError()
1040
this_name = other_name = self.tt.final_name(trans_id)
1041
other_path = fp.get_path(trans_id)
1042
if this_parent is not None and this_name is not None:
1043
this_parent_path = \
1044
fp.get_path(self.tt.trans_id_file_id(this_parent))
1045
this_path = pathjoin(this_parent_path, this_name)
1047
this_path = "<deleted>"
1048
file_id = self.tt.final_file_id(trans_id)
1049
c = Conflict.factory('path conflict', path=this_path,
1050
conflict_path=other_path, file_id=file_id)
1051
self.cooked_conflicts.append(c)
1052
self.cooked_conflicts.sort(key=Conflict.sort_key)
1055
class WeaveMerger(Merge3Merger):
1056
"""Three-way tree merger, text weave merger."""
1057
supports_reprocess = True
1058
supports_show_base = False
1059
supports_reverse_cherrypick = False
1060
history_based = True
1062
def _merged_lines(self, file_id):
1063
"""Generate the merged lines.
1064
There is no distinction between lines that are meant to contain <<<<<<<
1068
base = self.base_tree
1071
plan = self.this_tree.plan_file_merge(file_id, self.other_tree,
1073
if 'merge' in debug.debug_flags:
1075
trans_id = self.tt.trans_id_file_id(file_id)
1076
name = self.tt.final_name(trans_id) + '.plan'
1077
contents = ('%10s|%s' % l for l in plan)
1078
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1079
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1080
'>>>>>>> MERGE-SOURCE\n')
1081
return textmerge.merge_lines(self.reprocess)
1083
def text_merge(self, file_id, trans_id):
1084
"""Perform a (weave) text merge for a given file and file-id.
1085
If conflicts are encountered, .THIS and .OTHER files will be emitted,
1086
and a conflict will be noted.
1088
lines, conflicts = self._merged_lines(file_id)
1090
# Note we're checking whether the OUTPUT is binary in this case,
1091
# because we don't want to get into weave merge guts.
1092
check_text_lines(lines)
1093
self.tt.create_file(lines, trans_id)
1095
self._raw_conflicts.append(('text conflict', trans_id))
1096
name = self.tt.final_name(trans_id)
1097
parent_id = self.tt.final_parent(trans_id)
1098
file_group = self._dump_conflicts(name, parent_id, file_id,
1100
file_group.append(trans_id)
1103
class LCAMerger(WeaveMerger):
1105
def _merged_lines(self, file_id):
1106
"""Generate the merged lines.
1107
There is no distinction between lines that are meant to contain <<<<<<<
1111
base = self.base_tree
1114
plan = self.this_tree.plan_file_lca_merge(file_id, self.other_tree,
1116
if 'merge' in debug.debug_flags:
1118
trans_id = self.tt.trans_id_file_id(file_id)
1119
name = self.tt.final_name(trans_id) + '.plan'
1120
contents = ('%10s|%s' % l for l in plan)
1121
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1122
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1123
'>>>>>>> MERGE-SOURCE\n')
1124
return textmerge.merge_lines(self.reprocess)
1127
class Diff3Merger(Merge3Merger):
1128
"""Three-way merger using external diff3 for text merging"""
1130
def dump_file(self, temp_dir, name, tree, file_id):
1131
out_path = pathjoin(temp_dir, name)
1132
out_file = open(out_path, "wb")
1134
in_file = tree.get_file(file_id)
1135
for line in in_file:
1136
out_file.write(line)
1141
def text_merge(self, file_id, trans_id):
1142
"""Perform a diff3 merge using a specified file-id and trans-id.
1143
If conflicts are encountered, .BASE, .THIS. and .OTHER conflict files
1144
will be dumped, and a will be conflict noted.
1147
temp_dir = osutils.mkdtemp(prefix="bzr-")
1149
new_file = pathjoin(temp_dir, "new")
1150
this = self.dump_file(temp_dir, "this", self.this_tree, file_id)
1151
base = self.dump_file(temp_dir, "base", self.base_tree, file_id)
1152
other = self.dump_file(temp_dir, "other", self.other_tree, file_id)
1153
status = bzrlib.patch.diff3(new_file, this, base, other)
1154
if status not in (0, 1):
1155
raise BzrError("Unhandled diff3 exit code")
1156
f = open(new_file, 'rb')
1158
self.tt.create_file(f, trans_id)
1162
name = self.tt.final_name(trans_id)
1163
parent_id = self.tt.final_parent(trans_id)
1164
self._dump_conflicts(name, parent_id, file_id)
1165
self._raw_conflicts.append(('text conflict', trans_id))
1167
osutils.rmtree(temp_dir)
1170
def merge_inner(this_branch, other_tree, base_tree, ignore_zero=False,
1172
merge_type=Merge3Merger,
1173
interesting_ids=None,
1177
interesting_files=None,
1180
change_reporter=None):
1181
"""Primary interface for merging.
1183
typical use is probably
1184
'merge_inner(branch, branch.get_revision_tree(other_revision),
1185
branch.get_revision_tree(base_revision))'
1187
if this_tree is None:
1188
raise BzrError("bzrlib.merge.merge_inner requires a this_tree "
1189
"parameter as of bzrlib version 0.8.")
1190
merger = Merger(this_branch, other_tree, base_tree, this_tree=this_tree,
1191
pb=pb, change_reporter=change_reporter)
1192
merger.backup_files = backup_files
1193
merger.merge_type = merge_type
1194
merger.interesting_ids = interesting_ids
1195
merger.ignore_zero = ignore_zero
1196
if interesting_files:
1198
raise ValueError('Only supply interesting_ids'
1199
' or interesting_files')
1200
merger.interesting_files = interesting_files
1201
merger.show_base = show_base
1202
merger.reprocess = reprocess
1203
merger.other_rev_id = other_rev_id
1204
merger.other_basis = other_rev_id
1205
get_revision_id = getattr(base_tree, 'get_revision_id', None)
1206
if get_revision_id is None:
1207
get_revision_id = base_tree.last_revision
1208
merger.set_base_revision(get_revision_id(), this_branch)
1209
return merger.do_merge()
1211
def get_merge_type_registry():
1212
"""Merge type registry is in bzrlib.option to avoid circular imports.
1214
This method provides a sanctioned way to retrieve it.
1216
from bzrlib import option
1217
return option._merge_type_registry
1220
def _plan_annotate_merge(annotated_a, annotated_b, ancestors_a, ancestors_b):
1221
def status_a(revision, text):
1222
if revision in ancestors_b:
1223
return 'killed-b', text
1225
return 'new-a', text
1227
def status_b(revision, text):
1228
if revision in ancestors_a:
1229
return 'killed-a', text
1231
return 'new-b', text
1233
plain_a = [t for (a, t) in annotated_a]
1234
plain_b = [t for (a, t) in annotated_b]
1235
matcher = patiencediff.PatienceSequenceMatcher(None, plain_a, plain_b)
1236
blocks = matcher.get_matching_blocks()
1239
for ai, bi, l in blocks:
1240
# process all mismatched sections
1241
# (last mismatched section is handled because blocks always
1242
# includes a 0-length last block)
1243
for revision, text in annotated_a[a_cur:ai]:
1244
yield status_a(revision, text)
1245
for revision, text in annotated_b[b_cur:bi]:
1246
yield status_b(revision, text)
1247
# and now the matched section
1250
for text_a in plain_a[ai:a_cur]:
1251
yield "unchanged", text_a
1254
class _PlanMergeBase(object):
1256
def __init__(self, a_rev, b_rev, vf, key_prefix):
1259
:param a_rev: Revision-id of one revision to merge
1260
:param b_rev: Revision-id of the other revision to merge
1261
:param vf: A VersionedFiles containing both revisions
1262
:param key_prefix: A prefix for accessing keys in vf, typically
1268
self._last_lines = None
1269
self._last_lines_revision_id = None
1270
self._cached_matching_blocks = {}
1271
self._key_prefix = key_prefix
1272
self._precache_tip_lines()
1274
def _precache_tip_lines(self):
1275
lines = self.get_lines([self.a_rev, self.b_rev])
1276
self.lines_a = lines[self.a_rev]
1277
self.lines_b = lines[self.b_rev]
1279
def get_lines(self, revisions):
1280
"""Get lines for revisions from the backing VersionedFiles.
1282
:raises RevisionNotPresent: on absent texts.
1284
keys = [(self._key_prefix + (rev,)) for rev in revisions]
1286
for record in self.vf.get_record_stream(keys, 'unordered', True):
1287
if record.storage_kind == 'absent':
1288
raise errors.RevisionNotPresent(record.key, self.vf)
1289
result[record.key[-1]] = osutils.split_lines(
1290
record.get_bytes_as('fulltext'))
1293
def plan_merge(self):
1294
"""Generate a 'plan' for merging the two revisions.
1296
This involves comparing their texts and determining the cause of
1297
differences. If text A has a line and text B does not, then either the
1298
line was added to text A, or it was deleted from B. Once the causes
1299
are combined, they are written out in the format described in
1300
VersionedFile.plan_merge
1302
blocks = self._get_matching_blocks(self.a_rev, self.b_rev)
1303
unique_a, unique_b = self._unique_lines(blocks)
1304
new_a, killed_b = self._determine_status(self.a_rev, unique_a)
1305
new_b, killed_a = self._determine_status(self.b_rev, unique_b)
1306
return self._iter_plan(blocks, new_a, killed_b, new_b, killed_a)
1308
def _iter_plan(self, blocks, new_a, killed_b, new_b, killed_a):
1311
for i, j, n in blocks:
1312
for a_index in range(last_i, i):
1313
if a_index in new_a:
1314
if a_index in killed_b:
1315
yield 'conflicted-a', self.lines_a[a_index]
1317
yield 'new-a', self.lines_a[a_index]
1319
yield 'killed-b', self.lines_a[a_index]
1320
for b_index in range(last_j, j):
1321
if b_index in new_b:
1322
if b_index in killed_a:
1323
yield 'conflicted-b', self.lines_b[b_index]
1325
yield 'new-b', self.lines_b[b_index]
1327
yield 'killed-a', self.lines_b[b_index]
1328
# handle common lines
1329
for a_index in range(i, i+n):
1330
yield 'unchanged', self.lines_a[a_index]
1334
def _get_matching_blocks(self, left_revision, right_revision):
1335
"""Return a description of which sections of two revisions match.
1337
See SequenceMatcher.get_matching_blocks
1339
cached = self._cached_matching_blocks.get((left_revision,
1341
if cached is not None:
1343
if self._last_lines_revision_id == left_revision:
1344
left_lines = self._last_lines
1345
right_lines = self.get_lines([right_revision])[right_revision]
1347
lines = self.get_lines([left_revision, right_revision])
1348
left_lines = lines[left_revision]
1349
right_lines = lines[right_revision]
1350
self._last_lines = right_lines
1351
self._last_lines_revision_id = right_revision
1352
matcher = patiencediff.PatienceSequenceMatcher(None, left_lines,
1354
return matcher.get_matching_blocks()
1356
def _unique_lines(self, matching_blocks):
1357
"""Analyse matching_blocks to determine which lines are unique
1359
:return: a tuple of (unique_left, unique_right), where the values are
1360
sets of line numbers of unique lines.
1366
for i, j, n in matching_blocks:
1367
unique_left.extend(range(last_i, i))
1368
unique_right.extend(range(last_j, j))
1371
return unique_left, unique_right
1374
def _subtract_plans(old_plan, new_plan):
1375
"""Remove changes from new_plan that came from old_plan.
1377
It is assumed that the difference between the old_plan and new_plan
1378
is their choice of 'b' text.
1380
All lines from new_plan that differ from old_plan are emitted
1381
verbatim. All lines from new_plan that match old_plan but are
1382
not about the 'b' revision are emitted verbatim.
1384
Lines that match and are about the 'b' revision are the lines we
1385
don't want, so we convert 'killed-b' -> 'unchanged', and 'new-b'
1386
is skipped entirely.
1388
matcher = patiencediff.PatienceSequenceMatcher(None, old_plan,
1391
for i, j, n in matcher.get_matching_blocks():
1392
for jj in range(last_j, j):
1394
for jj in range(j, j+n):
1395
plan_line = new_plan[jj]
1396
if plan_line[0] == 'new-b':
1398
elif plan_line[0] == 'killed-b':
1399
yield 'unchanged', plan_line[1]
1405
class _PlanMerge(_PlanMergeBase):
1406
"""Plan an annotate merge using on-the-fly annotation"""
1408
def __init__(self, a_rev, b_rev, vf, key_prefix):
1409
super(_PlanMerge, self).__init__(a_rev, b_rev, vf, key_prefix)
1410
self.a_key = self._key_prefix + (self.a_rev,)
1411
self.b_key = self._key_prefix + (self.b_rev,)
1412
self.graph = Graph(self.vf)
1413
heads = self.graph.heads((self.a_key, self.b_key))
1415
# one side dominates, so we can just return its values, yay for
1417
# Ideally we would know that before we get this far
1418
self._head_key = heads.pop()
1419
if self._head_key == self.a_key:
1423
mutter('found dominating revision for %s\n%s > %s', self.vf,
1424
self._head_key[-1], other)
1427
self._head_key = None
1430
def _precache_tip_lines(self):
1431
# Turn this into a no-op, because we will do this later
1434
def _find_recursive_lcas(self):
1435
"""Find all the ancestors back to a unique lca"""
1436
cur_ancestors = (self.a_key, self.b_key)
1437
# graph.find_lca(uncommon, keys) now returns plain NULL_REVISION,
1438
# rather than a key tuple. We will just map that directly to no common
1442
next_lcas = self.graph.find_lca(*cur_ancestors)
1443
# Map a plain NULL_REVISION to a simple no-ancestors
1444
if next_lcas == set([NULL_REVISION]):
1446
# Order the lca's based on when they were merged into the tip
1447
# While the actual merge portion of weave merge uses a set() of
1448
# active revisions, the order of insertion *does* effect the
1449
# implicit ordering of the texts.
1450
for rev_key in cur_ancestors:
1451
ordered_parents = tuple(self.graph.find_merge_order(rev_key,
1453
parent_map[rev_key] = ordered_parents
1454
if len(next_lcas) == 0:
1456
elif len(next_lcas) == 1:
1457
parent_map[list(next_lcas)[0]] = ()
1459
elif len(next_lcas) > 2:
1460
# More than 2 lca's, fall back to grabbing all nodes between
1461
# this and the unique lca.
1462
mutter('More than 2 LCAs, falling back to all nodes for:'
1463
' %s, %s\n=> %s', self.a_key, self.b_key, cur_ancestors)
1464
cur_lcas = next_lcas
1465
while len(cur_lcas) > 1:
1466
cur_lcas = self.graph.find_lca(*cur_lcas)
1467
if len(cur_lcas) == 0:
1468
# No common base to find, use the full ancestry
1471
unique_lca = list(cur_lcas)[0]
1472
if unique_lca == NULL_REVISION:
1473
# find_lca will return a plain 'NULL_REVISION' rather
1474
# than a key tuple when there is no common ancestor, we
1475
# prefer to just use None, because it doesn't confuse
1476
# _get_interesting_texts()
1478
parent_map.update(self._find_unique_parents(next_lcas,
1481
cur_ancestors = next_lcas
1484
def _find_unique_parents(self, tip_keys, base_key):
1485
"""Find ancestors of tip that aren't ancestors of base.
1487
:param tip_keys: Nodes that are interesting
1488
:param base_key: Cull all ancestors of this node
1489
:return: The parent map for all revisions between tip_keys and
1490
base_key. base_key will be included. References to nodes outside of
1491
the ancestor set will also be removed.
1493
# TODO: this would be simpler if find_unique_ancestors took a list
1494
# instead of a single tip, internally it supports it, but it
1495
# isn't a "backwards compatible" api change.
1496
if base_key is None:
1497
parent_map = dict(self.graph.iter_ancestry(tip_keys))
1498
# We remove NULL_REVISION because it isn't a proper tuple key, and
1499
# thus confuses things like _get_interesting_texts, and our logic
1500
# to add the texts into the memory weave.
1501
if NULL_REVISION in parent_map:
1502
parent_map.pop(NULL_REVISION)
1505
for tip in tip_keys:
1507
self.graph.find_unique_ancestors(tip, [base_key]))
1508
parent_map = self.graph.get_parent_map(interesting)
1509
parent_map[base_key] = ()
1510
culled_parent_map, child_map, tails = self._remove_external_references(
1512
# Remove all the tails but base_key
1513
if base_key is not None:
1514
tails.remove(base_key)
1515
self._prune_tails(culled_parent_map, child_map, tails)
1516
# Now remove all the uninteresting 'linear' regions
1517
simple_map = _mod_graph.collapse_linear_regions(culled_parent_map)
1521
def _remove_external_references(parent_map):
1522
"""Remove references that go outside of the parent map.
1524
:param parent_map: Something returned from Graph.get_parent_map(keys)
1525
:return: (filtered_parent_map, child_map, tails)
1526
filtered_parent_map is parent_map without external references
1527
child_map is the {parent_key: [child_keys]} mapping
1528
tails is a list of nodes that do not have any parents in the map
1530
# TODO: The basic effect of this function seems more generic than
1531
# _PlanMerge. But the specific details of building a child_map,
1532
# and computing tails seems very specific to _PlanMerge.
1533
# Still, should this be in Graph land?
1534
filtered_parent_map = {}
1537
for key, parent_keys in parent_map.iteritems():
1538
culled_parent_keys = [p for p in parent_keys if p in parent_map]
1539
if not culled_parent_keys:
1541
for parent_key in culled_parent_keys:
1542
child_map.setdefault(parent_key, []).append(key)
1543
# TODO: Do we want to do this, it adds overhead for every node,
1544
# just to say that the node has no children
1545
child_map.setdefault(key, [])
1546
filtered_parent_map[key] = culled_parent_keys
1547
return filtered_parent_map, child_map, tails
1550
def _prune_tails(parent_map, child_map, tails_to_remove):
1551
"""Remove tails from the parent map.
1553
This will remove the supplied revisions until no more children have 0
1556
:param parent_map: A dict of {child: [parents]}, this dictionary will
1557
be modified in place.
1558
:param tails_to_remove: A list of tips that should be removed,
1559
this list will be consumed
1560
:param child_map: The reverse dict of parent_map ({parent: [children]})
1561
this dict will be modified
1562
:return: None, parent_map will be modified in place.
1564
while tails_to_remove:
1565
next = tails_to_remove.pop()
1566
parent_map.pop(next)
1567
children = child_map.pop(next)
1568
for child in children:
1569
child_parents = parent_map[child]
1570
child_parents.remove(next)
1571
if len(child_parents) == 0:
1572
tails_to_remove.append(child)
1574
def _get_interesting_texts(self, parent_map):
1575
"""Return a dict of texts we are interested in.
1577
Note that the input is in key tuples, but the output is in plain
1580
:param parent_map: The output from _find_recursive_lcas
1581
:return: A dict of {'revision_id':lines} as returned by
1582
_PlanMergeBase.get_lines()
1584
all_revision_keys = set(parent_map)
1585
all_revision_keys.add(self.a_key)
1586
all_revision_keys.add(self.b_key)
1588
# Everything else is in 'keys' but get_lines is in 'revision_ids'
1589
all_texts = self.get_lines([k[-1] for k in all_revision_keys])
1592
def _build_weave(self):
1593
from bzrlib import weave
1594
self._weave = weave.Weave(weave_name='in_memory_weave',
1595
allow_reserved=True)
1596
parent_map = self._find_recursive_lcas()
1598
all_texts = self._get_interesting_texts(parent_map)
1600
# Note: Unfortunately, the order given by topo_sort will effect the
1601
# ordering resolution in the output. Specifically, if you add A then B,
1602
# then in the output text A lines will show up before B lines. And, of
1603
# course, topo_sort doesn't guarantee any real ordering.
1604
# So we use merge_sort, and add a fake node on the tip.
1605
# This ensures that left-hand parents will always be inserted into the
1606
# weave before right-hand parents.
1607
tip_key = self._key_prefix + (_mod_revision.CURRENT_REVISION,)
1608
parent_map[tip_key] = (self.a_key, self.b_key)
1610
for seq_num, key, depth, eom in reversed(tsort.merge_sort(parent_map,
1614
# for key in tsort.topo_sort(parent_map):
1615
parent_keys = parent_map[key]
1616
revision_id = key[-1]
1617
parent_ids = [k[-1] for k in parent_keys]
1618
self._weave.add_lines(revision_id, parent_ids,
1619
all_texts[revision_id])
1621
def plan_merge(self):
1622
"""Generate a 'plan' for merging the two revisions.
1624
This involves comparing their texts and determining the cause of
1625
differences. If text A has a line and text B does not, then either the
1626
line was added to text A, or it was deleted from B. Once the causes
1627
are combined, they are written out in the format described in
1628
VersionedFile.plan_merge
1630
if self._head_key is not None: # There was a single head
1631
if self._head_key == self.a_key:
1634
if self._head_key != self.b_key:
1635
raise AssertionError('There was an invalid head: %s != %s'
1636
% (self.b_key, self._head_key))
1638
head_rev = self._head_key[-1]
1639
lines = self.get_lines([head_rev])[head_rev]
1640
return ((plan, line) for line in lines)
1641
return self._weave.plan_merge(self.a_rev, self.b_rev)
1644
class _PlanLCAMerge(_PlanMergeBase):
1646
This merge algorithm differs from _PlanMerge in that:
1647
1. comparisons are done against LCAs only
1648
2. cases where a contested line is new versus one LCA but old versus
1649
another are marked as conflicts, by emitting the line as conflicted-a
1652
This is faster, and hopefully produces more useful output.
1655
def __init__(self, a_rev, b_rev, vf, key_prefix, graph):
1656
_PlanMergeBase.__init__(self, a_rev, b_rev, vf, key_prefix)
1657
lcas = graph.find_lca(key_prefix + (a_rev,), key_prefix + (b_rev,))
1660
if lca == NULL_REVISION:
1663
self.lcas.add(lca[-1])
1664
for lca in self.lcas:
1665
if _mod_revision.is_null(lca):
1668
lca_lines = self.get_lines([lca])[lca]
1669
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_a,
1671
blocks = list(matcher.get_matching_blocks())
1672
self._cached_matching_blocks[(a_rev, lca)] = blocks
1673
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_b,
1675
blocks = list(matcher.get_matching_blocks())
1676
self._cached_matching_blocks[(b_rev, lca)] = blocks
1678
def _determine_status(self, revision_id, unique_line_numbers):
1679
"""Determines the status unique lines versus all lcas.
1681
Basically, determines why the line is unique to this revision.
1683
A line may be determined new, killed, or both.
1685
If a line is determined new, that means it was not present in at least
1686
one LCA, and is not present in the other merge revision.
1688
If a line is determined killed, that means the line was present in
1691
If a line is killed and new, this indicates that the two merge
1692
revisions contain differing conflict resolutions.
1693
:param revision_id: The id of the revision in which the lines are
1695
:param unique_line_numbers: The line numbers of unique lines.
1696
:return a tuple of (new_this, killed_other):
1700
unique_line_numbers = set(unique_line_numbers)
1701
for lca in self.lcas:
1702
blocks = self._get_matching_blocks(revision_id, lca)
1703
unique_vs_lca, _ignored = self._unique_lines(blocks)
1704
new.update(unique_line_numbers.intersection(unique_vs_lca))
1705
killed.update(unique_line_numbers.difference(unique_vs_lca))