1
# Copyright (C) 2005, 2006, 2008 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
from itertools import chain
30
revision as _mod_revision,
33
from bzrlib.branch import Branch
34
from bzrlib.conflicts import ConflictList, Conflict
35
from bzrlib.errors import (BzrCommandError,
45
WorkingTreeNotRevision,
48
from bzrlib.graph import Graph
49
from bzrlib.merge3 import Merge3
50
from bzrlib.osutils import rename, pathjoin
51
from progress import DummyProgress, ProgressPhase
52
from bzrlib.revision import (NULL_REVISION, ensure_null)
53
from bzrlib.textfile import check_text_lines
54
from bzrlib.trace import mutter, warning, note, is_quiet
55
from bzrlib.transform import (TransformPreview, TreeTransform,
56
resolve_conflicts, cook_conflicts,
57
conflict_pass, FinalPaths, create_by_entry,
58
unique_add, ROOT_PARENT)
59
from bzrlib.versionedfile import PlanWeaveMerge
62
# TODO: Report back as changes are merged in
65
def transform_tree(from_tree, to_tree, interesting_ids=None):
66
merge_inner(from_tree.branch, to_tree, from_tree, ignore_zero=True,
67
interesting_ids=interesting_ids, this_tree=from_tree)
71
def __init__(self, this_branch, other_tree=None, base_tree=None,
72
this_tree=None, pb=DummyProgress(), change_reporter=None,
73
recurse='down', revision_graph=None):
75
self.this_branch = this_branch
76
self.this_basis = _mod_revision.ensure_null(
77
this_branch.last_revision())
78
self.this_rev_id = None
79
self.this_tree = this_tree
80
self.this_revision_tree = None
81
self.this_basis_tree = None
82
self.other_tree = other_tree
83
self.other_branch = None
84
self.base_tree = base_tree
85
self.ignore_zero = False
86
self.backup_files = False
87
self.interesting_ids = None
88
self.interesting_files = None
89
self.show_base = False
90
self.reprocess = False
93
self.recurse = recurse
94
self.change_reporter = change_reporter
95
self._cached_trees = {}
96
self._revision_graph = revision_graph
97
self._base_is_ancestor = None
98
self._base_is_other_ancestor = None
101
def revision_graph(self):
102
if self._revision_graph is None:
103
self._revision_graph = self.this_branch.repository.get_graph()
104
return self._revision_graph
106
def _set_base_is_ancestor(self, value):
107
self._base_is_ancestor = value
109
def _get_base_is_ancestor(self):
110
if self._base_is_ancestor is None:
111
self._base_is_ancestor = self.revision_graph.is_ancestor(
112
self.base_rev_id, self.this_basis)
113
return self._base_is_ancestor
115
base_is_ancestor = property(_get_base_is_ancestor, _set_base_is_ancestor)
117
def _set_base_is_other_ancestor(self, value):
118
self._base_is_other_ancestor = value
120
def _get_base_is_other_ancestor(self):
121
if self._base_is_other_ancestor is None:
122
if self.other_basis is None:
124
self._base_is_other_ancestor = self.revision_graph.is_ancestor(
125
self.base_rev_id, self.other_basis)
126
return self._base_is_other_ancestor
128
base_is_other_ancestor = property(_get_base_is_other_ancestor,
129
_set_base_is_other_ancestor)
132
def from_uncommitted(tree, other_tree, pb):
133
"""Return a Merger for uncommitted changes in other_tree.
135
:param tree: The tree to merge into
136
:param other_tree: The tree to get uncommitted changes from
137
:param pb: A progress indicator
139
merger = Merger(tree.branch, other_tree, other_tree.basis_tree(), tree,
141
merger.base_rev_id = merger.base_tree.get_revision_id()
142
merger.other_rev_id = None
143
merger.other_basis = merger.base_rev_id
147
def from_mergeable(klass, tree, mergeable, pb):
148
"""Return a Merger for a bundle or merge directive.
150
:param tree: The tree to merge changes into
151
:param mergeable: A merge directive or bundle
152
:param pb: A progress indicator
154
mergeable.install_revisions(tree.branch.repository)
155
base_revision_id, other_revision_id, verified =\
156
mergeable.get_merge_request(tree.branch.repository)
157
revision_graph = tree.branch.repository.get_graph()
158
if base_revision_id is not None:
159
if (base_revision_id != _mod_revision.NULL_REVISION and
160
revision_graph.is_ancestor(
161
base_revision_id, tree.branch.last_revision())):
162
base_revision_id = None
164
warning('Performing cherrypick')
165
merger = klass.from_revision_ids(pb, tree, other_revision_id,
166
base_revision_id, revision_graph=
168
return merger, verified
171
def from_revision_ids(pb, tree, other, base=None, other_branch=None,
172
base_branch=None, revision_graph=None):
173
"""Return a Merger for revision-ids.
175
:param tree: The tree to merge changes into
176
:param other: The revision-id to use as OTHER
177
:param base: The revision-id to use as BASE. If not specified, will
179
:param other_branch: A branch containing the other revision-id. If
180
not supplied, tree.branch is used.
181
:param base_branch: A branch containing the base revision-id. If
182
not supplied, other_branch or tree.branch will be used.
183
:param revision_graph: If you have a revision_graph precomputed, pass
184
it in, otherwise it will be created for you.
185
:param pb: A progress indicator
187
merger = Merger(tree.branch, this_tree=tree, pb=pb,
188
revision_graph=revision_graph)
189
if other_branch is None:
190
other_branch = tree.branch
191
merger.set_other_revision(other, other_branch)
195
if base_branch is None:
196
base_branch = other_branch
197
merger.set_base_revision(base, base_branch)
200
def revision_tree(self, revision_id, branch=None):
201
if revision_id not in self._cached_trees:
203
branch = self.this_branch
205
tree = self.this_tree.revision_tree(revision_id)
206
except errors.NoSuchRevisionInTree:
207
tree = branch.repository.revision_tree(revision_id)
208
self._cached_trees[revision_id] = tree
209
return self._cached_trees[revision_id]
211
def _get_tree(self, treespec, possible_transports=None):
212
from bzrlib import workingtree
213
location, revno = treespec
215
tree = workingtree.WorkingTree.open_containing(location)[0]
216
return tree.branch, tree
217
branch = Branch.open_containing(location, possible_transports)[0]
219
revision_id = branch.last_revision()
221
revision_id = branch.get_rev_id(revno)
222
revision_id = ensure_null(revision_id)
223
return branch, self.revision_tree(revision_id, branch)
225
def ensure_revision_trees(self):
226
if self.this_revision_tree is None:
227
self.this_basis_tree = self.revision_tree(self.this_basis)
228
if self.this_basis == self.this_rev_id:
229
self.this_revision_tree = self.this_basis_tree
231
if self.other_rev_id is None:
232
other_basis_tree = self.revision_tree(self.other_basis)
233
changes = other_basis_tree.changes_from(self.other_tree)
234
if changes.has_changed():
235
raise WorkingTreeNotRevision(self.this_tree)
236
other_rev_id = self.other_basis
237
self.other_tree = other_basis_tree
239
def file_revisions(self, file_id):
240
self.ensure_revision_trees()
241
def get_id(tree, file_id):
242
revision_id = tree.inventory[file_id].revision
244
if self.this_rev_id is None:
245
if self.this_basis_tree.get_file_sha1(file_id) != \
246
self.this_tree.get_file_sha1(file_id):
247
raise WorkingTreeNotRevision(self.this_tree)
249
trees = (self.this_basis_tree, self.other_tree)
250
return [get_id(tree, file_id) for tree in trees]
252
def check_basis(self, check_clean, require_commits=True):
253
if self.this_basis is None and require_commits is True:
254
raise BzrCommandError("This branch has no commits."
255
" (perhaps you would prefer 'bzr pull')")
258
if self.this_basis != self.this_rev_id:
259
raise errors.UncommittedChanges(self.this_tree)
261
def compare_basis(self):
263
basis_tree = self.revision_tree(self.this_tree.last_revision())
264
except errors.NoSuchRevision:
265
basis_tree = self.this_tree.basis_tree()
266
changes = self.this_tree.changes_from(basis_tree)
267
if not changes.has_changed():
268
self.this_rev_id = self.this_basis
270
def set_interesting_files(self, file_list):
271
self.interesting_files = file_list
273
def set_pending(self):
274
if not self.base_is_ancestor or not self.base_is_other_ancestor or self.other_rev_id is None:
278
def _add_parent(self):
279
new_parents = self.this_tree.get_parent_ids() + [self.other_rev_id]
280
new_parent_trees = []
281
for revision_id in new_parents:
283
tree = self.revision_tree(revision_id)
284
except errors.NoSuchRevision:
288
new_parent_trees.append((revision_id, tree))
290
self.this_tree.set_parent_trees(new_parent_trees,
291
allow_leftmost_as_ghost=True)
293
for _revision_id, tree in new_parent_trees:
297
def set_other(self, other_revision, possible_transports=None):
298
"""Set the revision and tree to merge from.
300
This sets the other_tree, other_rev_id, other_basis attributes.
302
:param other_revision: The [path, revision] list to merge from.
304
self.other_branch, self.other_tree = self._get_tree(other_revision,
306
if other_revision[1] == -1:
307
self.other_rev_id = _mod_revision.ensure_null(
308
self.other_branch.last_revision())
309
if _mod_revision.is_null(self.other_rev_id):
310
raise NoCommits(self.other_branch)
311
self.other_basis = self.other_rev_id
312
elif other_revision[1] is not None:
313
self.other_rev_id = self.other_branch.get_rev_id(other_revision[1])
314
self.other_basis = self.other_rev_id
316
self.other_rev_id = None
317
self.other_basis = self.other_branch.last_revision()
318
if self.other_basis is None:
319
raise NoCommits(self.other_branch)
320
if self.other_rev_id is not None:
321
self._cached_trees[self.other_rev_id] = self.other_tree
322
self._maybe_fetch(self.other_branch,self.this_branch, self.other_basis)
324
def set_other_revision(self, revision_id, other_branch):
325
"""Set 'other' based on a branch and revision id
327
:param revision_id: The revision to use for a tree
328
:param other_branch: The branch containing this tree
330
self.other_rev_id = revision_id
331
self.other_branch = other_branch
332
self._maybe_fetch(other_branch, self.this_branch, self.other_rev_id)
333
self.other_tree = self.revision_tree(revision_id)
334
self.other_basis = revision_id
336
def set_base_revision(self, revision_id, branch):
337
"""Set 'base' based on a branch and revision id
339
:param revision_id: The revision to use for a tree
340
:param branch: The branch containing this tree
342
self.base_rev_id = revision_id
343
self.base_branch = branch
344
self._maybe_fetch(branch, self.this_branch, revision_id)
345
self.base_tree = self.revision_tree(revision_id)
347
def _maybe_fetch(self, source, target, revision_id):
348
if not source.repository.has_same_location(target.repository):
349
target.fetch(source, revision_id)
352
revisions = [ensure_null(self.this_basis),
353
ensure_null(self.other_basis)]
354
if NULL_REVISION in revisions:
355
self.base_rev_id = NULL_REVISION
357
self.base_rev_id, steps = self.revision_graph.find_unique_lca(
358
revisions[0], revisions[1], count_steps=True)
359
if self.base_rev_id == NULL_REVISION:
360
raise UnrelatedBranches()
362
warning('Warning: criss-cross merge encountered. See bzr'
363
' help criss-cross.')
364
self.base_tree = self.revision_tree(self.base_rev_id)
365
self.base_is_ancestor = True
366
self.base_is_other_ancestor = True
368
def set_base(self, base_revision):
369
"""Set the base revision to use for the merge.
371
:param base_revision: A 2-list containing a path and revision number.
373
mutter("doing merge() with no base_revision specified")
374
if base_revision == [None, None]:
377
base_branch, self.base_tree = self._get_tree(base_revision)
378
if base_revision[1] == -1:
379
self.base_rev_id = base_branch.last_revision()
380
elif base_revision[1] is None:
381
self.base_rev_id = _mod_revision.NULL_REVISION
383
self.base_rev_id = _mod_revision.ensure_null(
384
base_branch.get_rev_id(base_revision[1]))
385
self._maybe_fetch(base_branch, self.this_branch, self.base_rev_id)
387
def make_merger(self):
388
kwargs = {'working_tree':self.this_tree, 'this_tree': self.this_tree,
389
'other_tree': self.other_tree,
390
'interesting_ids': self.interesting_ids,
391
'interesting_files': self.interesting_files,
394
if self.merge_type.requires_base:
395
kwargs['base_tree'] = self.base_tree
396
if self.merge_type.supports_reprocess:
397
kwargs['reprocess'] = self.reprocess
399
raise BzrError("Conflict reduction is not supported for merge"
400
" type %s." % self.merge_type)
401
if self.merge_type.supports_show_base:
402
kwargs['show_base'] = self.show_base
404
raise BzrError("Showing base is not supported for this"
405
" merge type. %s" % self.merge_type)
406
if (not getattr(self.merge_type, 'supports_reverse_cherrypick', True)
407
and not self.base_is_other_ancestor):
408
raise errors.CannotReverseCherrypick()
409
if self.merge_type.supports_cherrypick:
410
kwargs['cherrypick'] = (not self.base_is_ancestor or
411
not self.base_is_other_ancestor)
412
return self.merge_type(pb=self._pb,
413
change_reporter=self.change_reporter,
416
def _do_merge_to(self, merge):
418
if self.recurse == 'down':
419
for relpath, file_id in self.this_tree.iter_references():
420
sub_tree = self.this_tree.get_nested_tree(file_id, relpath)
421
other_revision = self.other_tree.get_reference_revision(
423
if other_revision == sub_tree.last_revision():
425
sub_merge = Merger(sub_tree.branch, this_tree=sub_tree)
426
sub_merge.merge_type = self.merge_type
427
other_branch = self.other_branch.reference_parent(file_id, relpath)
428
sub_merge.set_other_revision(other_revision, other_branch)
429
base_revision = self.base_tree.get_reference_revision(file_id)
430
sub_merge.base_tree = \
431
sub_tree.branch.repository.revision_tree(base_revision)
432
sub_merge.base_rev_id = base_revision
436
self.this_tree.lock_tree_write()
438
if self.base_tree is not None:
439
self.base_tree.lock_read()
441
if self.other_tree is not None:
442
self.other_tree.lock_read()
444
merge = self.make_merger()
445
self._do_merge_to(merge)
447
if self.other_tree is not None:
448
self.other_tree.unlock()
450
if self.base_tree is not None:
451
self.base_tree.unlock()
453
self.this_tree.unlock()
454
if len(merge.cooked_conflicts) == 0:
455
if not self.ignore_zero and not is_quiet():
456
note("All changes applied successfully.")
458
note("%d conflicts encountered." % len(merge.cooked_conflicts))
460
return len(merge.cooked_conflicts)
463
class Merge3Merger(object):
464
"""Three-way merger that uses the merge3 text merger"""
466
supports_reprocess = True
467
supports_show_base = True
468
history_based = False
469
supports_cherrypick = True
470
supports_reverse_cherrypick = True
471
winner_idx = {"this": 2, "other": 1, "conflict": 1}
473
def __init__(self, working_tree, this_tree, base_tree, other_tree,
474
interesting_ids=None, reprocess=False, show_base=False,
475
pb=DummyProgress(), pp=None, change_reporter=None,
476
interesting_files=None, do_merge=True,
478
"""Initialize the merger object and perform the merge.
480
:param working_tree: The working tree to apply the merge to
481
:param this_tree: The local tree in the merge operation
482
:param base_tree: The common tree in the merge operation
483
:param other_tree: The other other tree to merge changes from
484
:param interesting_ids: The file_ids of files that should be
485
participate in the merge. May not be combined with
487
:param: reprocess If True, perform conflict-reduction processing.
488
:param show_base: If True, show the base revision in text conflicts.
489
(incompatible with reprocess)
490
:param pb: A Progress bar
491
:param pp: A ProgressPhase object
492
:param change_reporter: An object that should report changes made
493
:param interesting_files: The tree-relative paths of files that should
494
participate in the merge. If these paths refer to directories,
495
the contents of those directories will also be included. May not
496
be combined with interesting_ids. If neither interesting_files nor
497
interesting_ids is specified, all files may participate in the
500
object.__init__(self)
501
if interesting_files is not None and interesting_ids is not None:
503
'specify either interesting_ids or interesting_files')
504
self.interesting_ids = interesting_ids
505
self.interesting_files = interesting_files
506
self.this_tree = working_tree
507
self.base_tree = base_tree
508
self.other_tree = other_tree
509
self._raw_conflicts = []
510
self.cooked_conflicts = []
511
self.reprocess = reprocess
512
self.show_base = show_base
515
self.change_reporter = change_reporter
516
self.cherrypick = cherrypick
518
self.pp = ProgressPhase("Merge phase", 3, self.pb)
523
self.this_tree.lock_tree_write()
524
self.base_tree.lock_read()
525
self.other_tree.lock_read()
526
self.tt = TreeTransform(self.this_tree, self.pb)
529
self._compute_transform()
531
results = self.tt.apply(no_conflicts=True)
532
self.write_modified(results)
534
self.this_tree.add_conflicts(self.cooked_conflicts)
535
except UnsupportedOperation:
539
self.other_tree.unlock()
540
self.base_tree.unlock()
541
self.this_tree.unlock()
544
def make_preview_transform(self):
545
self.base_tree.lock_read()
546
self.other_tree.lock_read()
547
self.tt = TransformPreview(self.this_tree)
550
self._compute_transform()
553
self.other_tree.unlock()
554
self.base_tree.unlock()
558
def _compute_transform(self):
559
entries = self._entries3()
560
child_pb = ui.ui_factory.nested_progress_bar()
562
for num, (file_id, changed, parents3, names3,
563
executable3) in enumerate(entries):
564
child_pb.update('Preparing file merge', num, len(entries))
565
self._merge_names(file_id, parents3, names3)
567
file_status = self.merge_contents(file_id)
569
file_status = 'unmodified'
570
self._merge_executable(file_id,
571
executable3, file_status)
576
child_pb = ui.ui_factory.nested_progress_bar()
578
fs_conflicts = resolve_conflicts(self.tt, child_pb,
579
lambda t, c: conflict_pass(t, c, self.other_tree))
582
if self.change_reporter is not None:
583
from bzrlib import delta
584
delta.report_changes(
585
self.tt.iter_changes(), self.change_reporter)
586
self.cook_conflicts(fs_conflicts)
587
for conflict in self.cooked_conflicts:
591
"""Gather data about files modified between three trees.
593
Return a list of tuples of file_id, changed, parents3, names3,
594
executable3. changed is a boolean indicating whether the file contents
595
or kind were changed. parents3 is a tuple of parent ids for base,
596
other and this. names3 is a tuple of names for base, other and this.
597
executable3 is a tuple of execute-bit values for base, other and this.
600
iterator = self.other_tree.iter_changes(self.base_tree,
601
include_unchanged=True, specific_files=self.interesting_files,
602
extra_trees=[self.this_tree])
603
for (file_id, paths, changed, versioned, parents, names, kind,
604
executable) in iterator:
605
if (self.interesting_ids is not None and
606
file_id not in self.interesting_ids):
608
if file_id in self.this_tree.inventory:
609
entry = self.this_tree.inventory[file_id]
610
this_name = entry.name
611
this_parent = entry.parent_id
612
this_executable = entry.executable
616
this_executable = None
617
parents3 = parents + (this_parent,)
618
names3 = names + (this_name,)
619
executable3 = executable + (this_executable,)
620
result.append((file_id, changed, parents3, names3, executable3))
625
self.tt.final_kind(self.tt.root)
627
self.tt.cancel_deletion(self.tt.root)
628
if self.tt.final_file_id(self.tt.root) is None:
629
self.tt.version_file(self.tt.tree_file_id(self.tt.root),
631
if self.other_tree.inventory.root is None:
633
other_root_file_id = self.other_tree.get_root_id()
634
other_root = self.tt.trans_id_file_id(other_root_file_id)
635
if other_root == self.tt.root:
638
self.tt.final_kind(other_root)
641
self.reparent_children(self.other_tree.inventory.root, self.tt.root)
642
self.tt.cancel_creation(other_root)
643
self.tt.cancel_versioning(other_root)
645
def reparent_children(self, ie, target):
646
for thing, child in ie.children.iteritems():
647
trans_id = self.tt.trans_id_file_id(child.file_id)
648
self.tt.adjust_path(self.tt.final_name(trans_id), target, trans_id)
650
def write_modified(self, results):
652
for path in results.modified_paths:
653
file_id = self.this_tree.path2id(self.this_tree.relpath(path))
656
hash = self.this_tree.get_file_sha1(file_id)
659
modified_hashes[file_id] = hash
660
self.this_tree.set_merge_modified(modified_hashes)
663
def parent(entry, file_id):
664
"""Determine the parent for a file_id (used as a key method)"""
667
return entry.parent_id
670
def name(entry, file_id):
671
"""Determine the name for a file_id (used as a key method)"""
677
def contents_sha1(tree, file_id):
678
"""Determine the sha1 of the file contents (used as a key method)."""
679
if file_id not in tree:
681
return tree.get_file_sha1(file_id)
684
def executable(tree, file_id):
685
"""Determine the executability of a file-id (used as a key method)."""
686
if file_id not in tree:
688
if tree.kind(file_id) != "file":
690
return tree.is_executable(file_id)
693
def kind(tree, file_id):
694
"""Determine the kind of a file-id (used as a key method)."""
695
if file_id not in tree:
697
return tree.kind(file_id)
700
def _three_way(base, other, this):
701
#if base == other, either they all agree, or only THIS has changed.
704
elif this not in (base, other):
706
# "Ambiguous clean merge" -- both sides have made the same change.
709
# this == base: only other has changed.
714
def scalar_three_way(this_tree, base_tree, other_tree, file_id, key):
715
"""Do a three-way test on a scalar.
716
Return "this", "other" or "conflict", depending whether a value wins.
718
key_base = key(base_tree, file_id)
719
key_other = key(other_tree, file_id)
720
#if base == other, either they all agree, or only THIS has changed.
721
if key_base == key_other:
723
key_this = key(this_tree, file_id)
724
# "Ambiguous clean merge"
725
if key_this == key_other:
727
elif key_this == key_base:
732
def merge_names(self, file_id):
734
if file_id in tree.inventory:
735
return tree.inventory[file_id]
738
this_entry = get_entry(self.this_tree)
739
other_entry = get_entry(self.other_tree)
740
base_entry = get_entry(self.base_tree)
741
entries = (base_entry, other_entry, this_entry)
744
for entry in entries:
749
names.append(entry.name)
750
parents.append(entry.parent_id)
751
return self._merge_names(file_id, parents, names)
753
def _merge_names(self, file_id, parents, names):
754
"""Perform a merge on file_id names and parents"""
755
base_name, other_name, this_name = names
756
base_parent, other_parent, this_parent = parents
758
name_winner = self._three_way(*names)
760
parent_id_winner = self._three_way(*parents)
761
if this_name is None:
762
if name_winner == "this":
763
name_winner = "other"
764
if parent_id_winner == "this":
765
parent_id_winner = "other"
766
if name_winner == "this" and parent_id_winner == "this":
768
if name_winner == "conflict":
769
trans_id = self.tt.trans_id_file_id(file_id)
770
self._raw_conflicts.append(('name conflict', trans_id,
771
this_name, other_name))
772
if parent_id_winner == "conflict":
773
trans_id = self.tt.trans_id_file_id(file_id)
774
self._raw_conflicts.append(('parent conflict', trans_id,
775
this_parent, other_parent))
776
if other_name is None:
777
# it doesn't matter whether the result was 'other' or
778
# 'conflict'-- if there's no 'other', we leave it alone.
780
# if we get here, name_winner and parent_winner are set to safe values.
781
trans_id = self.tt.trans_id_file_id(file_id)
782
parent_id = parents[self.winner_idx[parent_id_winner]]
783
if parent_id is not None:
784
parent_trans_id = self.tt.trans_id_file_id(parent_id)
785
self.tt.adjust_path(names[self.winner_idx[name_winner]],
786
parent_trans_id, trans_id)
788
def merge_contents(self, file_id):
789
"""Performa a merge on file_id contents."""
790
def contents_pair(tree):
791
if file_id not in tree:
793
kind = tree.kind(file_id)
795
contents = tree.get_file_sha1(file_id)
796
elif kind == "symlink":
797
contents = tree.get_symlink_target(file_id)
800
return kind, contents
802
def contents_conflict():
803
trans_id = self.tt.trans_id_file_id(file_id)
804
name = self.tt.final_name(trans_id)
805
parent_id = self.tt.final_parent(trans_id)
806
if file_id in self.this_tree.inventory:
807
self.tt.unversion_file(trans_id)
808
if file_id in self.this_tree:
809
self.tt.delete_contents(trans_id)
810
file_group = self._dump_conflicts(name, parent_id, file_id,
812
self._raw_conflicts.append(('contents conflict', file_group))
814
# See SPOT run. run, SPOT, run.
815
# So we're not QUITE repeating ourselves; we do tricky things with
817
base_pair = contents_pair(self.base_tree)
818
other_pair = contents_pair(self.other_tree)
819
if base_pair == other_pair:
820
# OTHER introduced no changes
822
this_pair = contents_pair(self.this_tree)
823
if this_pair == other_pair:
824
# THIS and OTHER introduced the same changes
827
trans_id = self.tt.trans_id_file_id(file_id)
828
if this_pair == base_pair:
829
# only OTHER introduced changes
830
if file_id in self.this_tree:
831
# Remove any existing contents
832
self.tt.delete_contents(trans_id)
833
if file_id in self.other_tree:
834
# OTHER changed the file
835
create_by_entry(self.tt,
836
self.other_tree.inventory[file_id],
837
self.other_tree, trans_id)
838
if file_id not in self.this_tree.inventory:
839
self.tt.version_file(file_id, trans_id)
841
elif file_id in self.this_tree.inventory:
842
# OTHER deleted the file
843
self.tt.unversion_file(trans_id)
845
#BOTH THIS and OTHER introduced changes; scalar conflict
846
elif this_pair[0] == "file" and other_pair[0] == "file":
847
# THIS and OTHER are both files, so text merge. Either
848
# BASE is a file, or both converted to files, so at least we
849
# have agreement that output should be a file.
851
self.text_merge(file_id, trans_id)
853
return contents_conflict()
854
if file_id not in self.this_tree.inventory:
855
self.tt.version_file(file_id, trans_id)
857
self.tt.tree_kind(trans_id)
858
self.tt.delete_contents(trans_id)
863
# Scalar conflict, can't text merge. Dump conflicts
864
return contents_conflict()
866
def get_lines(self, tree, file_id):
867
"""Return the lines in a file, or an empty list."""
869
return tree.get_file(file_id).readlines()
873
def text_merge(self, file_id, trans_id):
874
"""Perform a three-way text merge on a file_id"""
875
# it's possible that we got here with base as a different type.
876
# if so, we just want two-way text conflicts.
877
if file_id in self.base_tree and \
878
self.base_tree.kind(file_id) == "file":
879
base_lines = self.get_lines(self.base_tree, file_id)
882
other_lines = self.get_lines(self.other_tree, file_id)
883
this_lines = self.get_lines(self.this_tree, file_id)
884
m3 = Merge3(base_lines, this_lines, other_lines,
885
is_cherrypick=self.cherrypick)
886
start_marker = "!START OF MERGE CONFLICT!" + "I HOPE THIS IS UNIQUE"
887
if self.show_base is True:
888
base_marker = '|' * 7
892
def iter_merge3(retval):
893
retval["text_conflicts"] = False
894
for line in m3.merge_lines(name_a = "TREE",
895
name_b = "MERGE-SOURCE",
896
name_base = "BASE-REVISION",
897
start_marker=start_marker,
898
base_marker=base_marker,
899
reprocess=self.reprocess):
900
if line.startswith(start_marker):
901
retval["text_conflicts"] = True
902
yield line.replace(start_marker, '<' * 7)
906
merge3_iterator = iter_merge3(retval)
907
self.tt.create_file(merge3_iterator, trans_id)
908
if retval["text_conflicts"] is True:
909
self._raw_conflicts.append(('text conflict', trans_id))
910
name = self.tt.final_name(trans_id)
911
parent_id = self.tt.final_parent(trans_id)
912
file_group = self._dump_conflicts(name, parent_id, file_id,
913
this_lines, base_lines,
915
file_group.append(trans_id)
917
def _dump_conflicts(self, name, parent_id, file_id, this_lines=None,
918
base_lines=None, other_lines=None, set_version=False,
920
"""Emit conflict files.
921
If this_lines, base_lines, or other_lines are omitted, they will be
922
determined automatically. If set_version is true, the .OTHER, .THIS
923
or .BASE (in that order) will be created as versioned files.
925
data = [('OTHER', self.other_tree, other_lines),
926
('THIS', self.this_tree, this_lines)]
928
data.append(('BASE', self.base_tree, base_lines))
931
for suffix, tree, lines in data:
933
trans_id = self._conflict_file(name, parent_id, tree, file_id,
935
file_group.append(trans_id)
936
if set_version and not versioned:
937
self.tt.version_file(file_id, trans_id)
941
def _conflict_file(self, name, parent_id, tree, file_id, suffix,
943
"""Emit a single conflict file."""
944
name = name + '.' + suffix
945
trans_id = self.tt.create_path(name, parent_id)
946
entry = tree.inventory[file_id]
947
create_by_entry(self.tt, entry, tree, trans_id, lines)
950
def merge_executable(self, file_id, file_status):
951
"""Perform a merge on the execute bit."""
952
executable = [self.executable(t, file_id) for t in (self.base_tree,
953
self.other_tree, self.this_tree)]
954
self._merge_executable(file_id, executable, file_status)
956
def _merge_executable(self, file_id, executable, file_status):
957
"""Perform a merge on the execute bit."""
958
base_executable, other_executable, this_executable = executable
959
if file_status == "deleted":
961
winner = self._three_way(*executable)
962
if winner == "conflict":
963
# There must be a None in here, if we have a conflict, but we
964
# need executability since file status was not deleted.
965
if self.executable(self.other_tree, file_id) is None:
969
if winner == 'this' and file_status != "modified":
971
trans_id = self.tt.trans_id_file_id(file_id)
973
if self.tt.final_kind(trans_id) != "file":
978
executability = this_executable
980
if file_id in self.other_tree:
981
executability = other_executable
982
elif file_id in self.this_tree:
983
executability = this_executable
984
elif file_id in self.base_tree:
985
executability = base_executable
986
if executability is not None:
987
trans_id = self.tt.trans_id_file_id(file_id)
988
self.tt.set_executability(executability, trans_id)
990
def cook_conflicts(self, fs_conflicts):
991
"""Convert all conflicts into a form that doesn't depend on trans_id"""
992
from conflicts import Conflict
994
self.cooked_conflicts.extend(cook_conflicts(fs_conflicts, self.tt))
995
fp = FinalPaths(self.tt)
996
for conflict in self._raw_conflicts:
997
conflict_type = conflict[0]
998
if conflict_type in ('name conflict', 'parent conflict'):
999
trans_id = conflict[1]
1000
conflict_args = conflict[2:]
1001
if trans_id not in name_conflicts:
1002
name_conflicts[trans_id] = {}
1003
unique_add(name_conflicts[trans_id], conflict_type,
1005
if conflict_type == 'contents conflict':
1006
for trans_id in conflict[1]:
1007
file_id = self.tt.final_file_id(trans_id)
1008
if file_id is not None:
1010
path = fp.get_path(trans_id)
1011
for suffix in ('.BASE', '.THIS', '.OTHER'):
1012
if path.endswith(suffix):
1013
path = path[:-len(suffix)]
1015
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1016
self.cooked_conflicts.append(c)
1017
if conflict_type == 'text conflict':
1018
trans_id = conflict[1]
1019
path = fp.get_path(trans_id)
1020
file_id = self.tt.final_file_id(trans_id)
1021
c = Conflict.factory(conflict_type, path=path, file_id=file_id)
1022
self.cooked_conflicts.append(c)
1024
for trans_id, conflicts in name_conflicts.iteritems():
1026
this_parent, other_parent = conflicts['parent conflict']
1027
if this_parent == other_parent:
1028
raise AssertionError()
1030
this_parent = other_parent = \
1031
self.tt.final_file_id(self.tt.final_parent(trans_id))
1033
this_name, other_name = conflicts['name conflict']
1034
if this_name == other_name:
1035
raise AssertionError()
1037
this_name = other_name = self.tt.final_name(trans_id)
1038
other_path = fp.get_path(trans_id)
1039
if this_parent is not None and this_name is not None:
1040
this_parent_path = \
1041
fp.get_path(self.tt.trans_id_file_id(this_parent))
1042
this_path = pathjoin(this_parent_path, this_name)
1044
this_path = "<deleted>"
1045
file_id = self.tt.final_file_id(trans_id)
1046
c = Conflict.factory('path conflict', path=this_path,
1047
conflict_path=other_path, file_id=file_id)
1048
self.cooked_conflicts.append(c)
1049
self.cooked_conflicts.sort(key=Conflict.sort_key)
1052
class WeaveMerger(Merge3Merger):
1053
"""Three-way tree merger, text weave merger."""
1054
supports_reprocess = True
1055
supports_show_base = False
1056
supports_reverse_cherrypick = False
1057
history_based = True
1059
def _merged_lines(self, file_id):
1060
"""Generate the merged lines.
1061
There is no distinction between lines that are meant to contain <<<<<<<
1065
base = self.base_tree
1068
plan = self.this_tree.plan_file_merge(file_id, self.other_tree,
1070
if 'merge' in debug.debug_flags:
1072
trans_id = self.tt.trans_id_file_id(file_id)
1073
name = self.tt.final_name(trans_id) + '.plan'
1074
contents = ('%10s|%s' % l for l in plan)
1075
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1076
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1077
'>>>>>>> MERGE-SOURCE\n')
1078
return textmerge.merge_lines(self.reprocess)
1080
def text_merge(self, file_id, trans_id):
1081
"""Perform a (weave) text merge for a given file and file-id.
1082
If conflicts are encountered, .THIS and .OTHER files will be emitted,
1083
and a conflict will be noted.
1085
lines, conflicts = self._merged_lines(file_id)
1087
# Note we're checking whether the OUTPUT is binary in this case,
1088
# because we don't want to get into weave merge guts.
1089
check_text_lines(lines)
1090
self.tt.create_file(lines, trans_id)
1092
self._raw_conflicts.append(('text conflict', trans_id))
1093
name = self.tt.final_name(trans_id)
1094
parent_id = self.tt.final_parent(trans_id)
1095
file_group = self._dump_conflicts(name, parent_id, file_id,
1097
file_group.append(trans_id)
1100
class LCAMerger(WeaveMerger):
1102
def _merged_lines(self, file_id):
1103
"""Generate the merged lines.
1104
There is no distinction between lines that are meant to contain <<<<<<<
1108
base = self.base_tree
1111
plan = self.this_tree.plan_file_lca_merge(file_id, self.other_tree,
1113
if 'merge' in debug.debug_flags:
1115
trans_id = self.tt.trans_id_file_id(file_id)
1116
name = self.tt.final_name(trans_id) + '.plan'
1117
contents = ('%10s|%s' % l for l in plan)
1118
self.tt.new_file(name, self.tt.final_parent(trans_id), contents)
1119
textmerge = PlanWeaveMerge(plan, '<<<<<<< TREE\n',
1120
'>>>>>>> MERGE-SOURCE\n')
1121
return textmerge.merge_lines(self.reprocess)
1124
class Diff3Merger(Merge3Merger):
1125
"""Three-way merger using external diff3 for text merging"""
1127
def dump_file(self, temp_dir, name, tree, file_id):
1128
out_path = pathjoin(temp_dir, name)
1129
out_file = open(out_path, "wb")
1131
in_file = tree.get_file(file_id)
1132
for line in in_file:
1133
out_file.write(line)
1138
def text_merge(self, file_id, trans_id):
1139
"""Perform a diff3 merge using a specified file-id and trans-id.
1140
If conflicts are encountered, .BASE, .THIS. and .OTHER conflict files
1141
will be dumped, and a will be conflict noted.
1144
temp_dir = osutils.mkdtemp(prefix="bzr-")
1146
new_file = pathjoin(temp_dir, "new")
1147
this = self.dump_file(temp_dir, "this", self.this_tree, file_id)
1148
base = self.dump_file(temp_dir, "base", self.base_tree, file_id)
1149
other = self.dump_file(temp_dir, "other", self.other_tree, file_id)
1150
status = bzrlib.patch.diff3(new_file, this, base, other)
1151
if status not in (0, 1):
1152
raise BzrError("Unhandled diff3 exit code")
1153
f = open(new_file, 'rb')
1155
self.tt.create_file(f, trans_id)
1159
name = self.tt.final_name(trans_id)
1160
parent_id = self.tt.final_parent(trans_id)
1161
self._dump_conflicts(name, parent_id, file_id)
1162
self._raw_conflicts.append(('text conflict', trans_id))
1164
osutils.rmtree(temp_dir)
1167
def merge_inner(this_branch, other_tree, base_tree, ignore_zero=False,
1169
merge_type=Merge3Merger,
1170
interesting_ids=None,
1174
interesting_files=None,
1177
change_reporter=None):
1178
"""Primary interface for merging.
1180
typical use is probably
1181
'merge_inner(branch, branch.get_revision_tree(other_revision),
1182
branch.get_revision_tree(base_revision))'
1184
if this_tree is None:
1185
raise BzrError("bzrlib.merge.merge_inner requires a this_tree "
1186
"parameter as of bzrlib version 0.8.")
1187
merger = Merger(this_branch, other_tree, base_tree, this_tree=this_tree,
1188
pb=pb, change_reporter=change_reporter)
1189
merger.backup_files = backup_files
1190
merger.merge_type = merge_type
1191
merger.interesting_ids = interesting_ids
1192
merger.ignore_zero = ignore_zero
1193
if interesting_files:
1195
raise ValueError('Only supply interesting_ids'
1196
' or interesting_files')
1197
merger.interesting_files = interesting_files
1198
merger.show_base = show_base
1199
merger.reprocess = reprocess
1200
merger.other_rev_id = other_rev_id
1201
merger.other_basis = other_rev_id
1202
get_revision_id = getattr(base_tree, 'get_revision_id', None)
1203
if get_revision_id is None:
1204
get_revision_id = base_tree.last_revision
1205
merger.set_base_revision(get_revision_id(), this_branch)
1206
return merger.do_merge()
1208
def get_merge_type_registry():
1209
"""Merge type registry is in bzrlib.option to avoid circular imports.
1211
This method provides a sanctioned way to retrieve it.
1213
from bzrlib import option
1214
return option._merge_type_registry
1217
def _plan_annotate_merge(annotated_a, annotated_b, ancestors_a, ancestors_b):
1218
def status_a(revision, text):
1219
if revision in ancestors_b:
1220
return 'killed-b', text
1222
return 'new-a', text
1224
def status_b(revision, text):
1225
if revision in ancestors_a:
1226
return 'killed-a', text
1228
return 'new-b', text
1230
plain_a = [t for (a, t) in annotated_a]
1231
plain_b = [t for (a, t) in annotated_b]
1232
matcher = patiencediff.PatienceSequenceMatcher(None, plain_a, plain_b)
1233
blocks = matcher.get_matching_blocks()
1236
for ai, bi, l in blocks:
1237
# process all mismatched sections
1238
# (last mismatched section is handled because blocks always
1239
# includes a 0-length last block)
1240
for revision, text in annotated_a[a_cur:ai]:
1241
yield status_a(revision, text)
1242
for revision, text in annotated_b[b_cur:bi]:
1243
yield status_b(revision, text)
1244
# and now the matched section
1247
for text_a in plain_a[ai:a_cur]:
1248
yield "unchanged", text_a
1251
class _PlanMergeBase(object):
1253
def __init__(self, a_rev, b_rev, vf, key_prefix):
1256
:param a_rev: Revision-id of one revision to merge
1257
:param b_rev: Revision-id of the other revision to merge
1258
:param vf: A VersionedFiles containing both revisions
1259
:param key_prefix: A prefix for accessing keys in vf, typically
1265
self._last_lines = None
1266
self._last_lines_revision_id = None
1267
self._cached_matching_blocks = {}
1268
self._key_prefix = key_prefix
1269
self._precache_tip_lines()
1271
def _precache_tip_lines(self):
1272
lines = self.get_lines([self.a_rev, self.b_rev])
1273
self.lines_a = lines[self.a_rev]
1274
self.lines_b = lines[self.b_rev]
1276
def get_lines(self, revisions):
1277
"""Get lines for revisions from the backing VersionedFiles.
1279
:raises RevisionNotPresent: on absent texts.
1281
keys = [(self._key_prefix + (rev,)) for rev in revisions]
1283
for record in self.vf.get_record_stream(keys, 'unordered', True):
1284
if record.storage_kind == 'absent':
1285
raise errors.RevisionNotPresent(record.key, self.vf)
1286
result[record.key[-1]] = osutils.split_lines(
1287
record.get_bytes_as('fulltext'))
1290
def plan_merge(self):
1291
"""Generate a 'plan' for merging the two revisions.
1293
This involves comparing their texts and determining the cause of
1294
differences. If text A has a line and text B does not, then either the
1295
line was added to text A, or it was deleted from B. Once the causes
1296
are combined, they are written out in the format described in
1297
VersionedFile.plan_merge
1299
blocks = self._get_matching_blocks(self.a_rev, self.b_rev)
1300
unique_a, unique_b = self._unique_lines(blocks)
1301
new_a, killed_b = self._determine_status(self.a_rev, unique_a)
1302
new_b, killed_a = self._determine_status(self.b_rev, unique_b)
1303
return self._iter_plan(blocks, new_a, killed_b, new_b, killed_a)
1305
def _iter_plan(self, blocks, new_a, killed_b, new_b, killed_a):
1308
for i, j, n in blocks:
1309
for a_index in range(last_i, i):
1310
if a_index in new_a:
1311
if a_index in killed_b:
1312
yield 'conflicted-a', self.lines_a[a_index]
1314
yield 'new-a', self.lines_a[a_index]
1316
yield 'killed-b', self.lines_a[a_index]
1317
for b_index in range(last_j, j):
1318
if b_index in new_b:
1319
if b_index in killed_a:
1320
yield 'conflicted-b', self.lines_b[b_index]
1322
yield 'new-b', self.lines_b[b_index]
1324
yield 'killed-a', self.lines_b[b_index]
1325
# handle common lines
1326
for a_index in range(i, i+n):
1327
yield 'unchanged', self.lines_a[a_index]
1331
def _get_matching_blocks(self, left_revision, right_revision):
1332
"""Return a description of which sections of two revisions match.
1334
See SequenceMatcher.get_matching_blocks
1336
cached = self._cached_matching_blocks.get((left_revision,
1338
if cached is not None:
1340
if self._last_lines_revision_id == left_revision:
1341
left_lines = self._last_lines
1342
right_lines = self.get_lines([right_revision])[right_revision]
1344
lines = self.get_lines([left_revision, right_revision])
1345
left_lines = lines[left_revision]
1346
right_lines = lines[right_revision]
1347
self._last_lines = right_lines
1348
self._last_lines_revision_id = right_revision
1349
matcher = patiencediff.PatienceSequenceMatcher(None, left_lines,
1351
return matcher.get_matching_blocks()
1353
def _unique_lines(self, matching_blocks):
1354
"""Analyse matching_blocks to determine which lines are unique
1356
:return: a tuple of (unique_left, unique_right), where the values are
1357
sets of line numbers of unique lines.
1363
for i, j, n in matching_blocks:
1364
unique_left.extend(range(last_i, i))
1365
unique_right.extend(range(last_j, j))
1368
return unique_left, unique_right
1371
def _subtract_plans(old_plan, new_plan):
1372
"""Remove changes from new_plan that came from old_plan.
1374
It is assumed that the difference between the old_plan and new_plan
1375
is their choice of 'b' text.
1377
All lines from new_plan that differ from old_plan are emitted
1378
verbatim. All lines from new_plan that match old_plan but are
1379
not about the 'b' revision are emitted verbatim.
1381
Lines that match and are about the 'b' revision are the lines we
1382
don't want, so we convert 'killed-b' -> 'unchanged', and 'new-b'
1383
is skipped entirely.
1385
matcher = patiencediff.PatienceSequenceMatcher(None, old_plan,
1388
for i, j, n in matcher.get_matching_blocks():
1389
for jj in range(last_j, j):
1391
for jj in range(j, j+n):
1392
plan_line = new_plan[jj]
1393
if plan_line[0] == 'new-b':
1395
elif plan_line[0] == 'killed-b':
1396
yield 'unchanged', plan_line[1]
1402
class _PlanMerge(_PlanMergeBase):
1403
"""Plan an annotate merge using on-the-fly annotation"""
1405
def __init__(self, a_rev, b_rev, vf, key_prefix):
1406
super(_PlanMerge, self).__init__(a_rev, b_rev, vf, key_prefix)
1407
self.a_key = self._key_prefix + (self.a_rev,)
1408
self.b_key = self._key_prefix + (self.b_rev,)
1409
self.graph = Graph(self.vf)
1410
heads = self.graph.heads((self.a_key, self.b_key))
1412
# one side dominates, so we can just return its values, yay for
1414
# Ideally we would know that before we get this far
1415
self._head_key = heads.pop()
1416
if self._head_key == self.a_key:
1420
mutter('found dominating revision for %s\n%s > %s', self.vf,
1421
self._head_key[-1], other)
1424
self._head_key = None
1427
def _precache_tip_lines(self):
1428
# Turn this into a no-op, because we will do this later
1431
def _find_recursive_lcas(self):
1432
"""Find all the ancestors back to a unique lca"""
1433
cur_ancestors = (self.a_key, self.b_key)
1434
# graph.find_lca(uncommon, keys) now returns plain NULL_REVISION,
1435
# rather than a key tuple. We will just map that directly to no common
1439
next_lcas = self.graph.find_lca(*cur_ancestors)
1440
# Map a plain NULL_REVISION to a simple no-ancestors
1441
if next_lcas == set([NULL_REVISION]):
1443
# Order the lca's based on when they were merged into the tip
1444
# While the actual merge portion of weave merge uses a set() of
1445
# active revisions, the order of insertion *does* effect the
1446
# implicit ordering of the texts.
1447
for rev_key in cur_ancestors:
1448
ordered_parents = tuple(self.graph.find_merge_order(rev_key,
1450
parent_map[rev_key] = ordered_parents
1451
if len(next_lcas) == 0:
1453
elif len(next_lcas) == 1:
1454
parent_map[list(next_lcas)[0]] = ()
1456
elif len(next_lcas) > 2:
1457
# More than 2 lca's, fall back to grabbing all nodes between
1458
# this and the unique lca.
1459
mutter('More than 2 LCAs, falling back to all nodes for:'
1460
' %s, %s\n=> %s', self.a_key, self.b_key, cur_ancestors)
1461
cur_lcas = next_lcas
1462
while len(cur_lcas) > 1:
1463
cur_lcas = self.graph.find_lca(*cur_lcas)
1464
if len(cur_lcas) == 0:
1465
# No common base to find, use the full ancestry
1468
unique_lca = list(cur_lcas)[0]
1469
if unique_lca == NULL_REVISION:
1470
# find_lca will return a plain 'NULL_REVISION' rather
1471
# than a key tuple when there is no common ancestor, we
1472
# prefer to just use None, because it doesn't confuse
1473
# _get_interesting_texts()
1475
parent_map.update(self._find_unique_parents(next_lcas,
1478
cur_ancestors = next_lcas
1481
def _find_unique_parents(self, tip_keys, base_key):
1482
"""Find ancestors of tip that aren't ancestors of base.
1484
:param tip_keys: Nodes that are interesting
1485
:param base_key: Cull all ancestors of this node
1486
:return: The parent map for all revisions between tip_keys and
1487
base_key. base_key will be included. References to nodes outside of
1488
the ancestor set will also be removed.
1490
# TODO: this would be simpler if find_unique_ancestors took a list
1491
# instead of a single tip, internally it supports it, but it
1492
# isn't a "backwards compatible" api change.
1493
if base_key is None:
1494
parent_map = dict(self.graph.iter_ancestry(tip_keys))
1495
# We remove NULL_REVISION because it isn't a proper tuple key, and
1496
# thus confuses things like _get_interesting_texts, and our logic
1497
# to add the texts into the memory weave.
1498
if NULL_REVISION in parent_map:
1499
parent_map.pop(NULL_REVISION)
1502
for tip in tip_keys:
1504
self.graph.find_unique_ancestors(tip, [base_key]))
1505
parent_map = self.graph.get_parent_map(interesting)
1506
parent_map[base_key] = ()
1507
culled_parent_map, child_map, tails = self._remove_external_references(
1509
# Remove all the tails but base_key
1510
if base_key is not None:
1511
tails.remove(base_key)
1512
self._prune_tails(culled_parent_map, child_map, tails)
1513
# Now remove all the uninteresting 'linear' regions
1514
simple_map = _mod_graph.collapse_linear_regions(culled_parent_map)
1518
def _remove_external_references(parent_map):
1519
"""Remove references that go outside of the parent map.
1521
:param parent_map: Something returned from Graph.get_parent_map(keys)
1522
:return: (filtered_parent_map, child_map, tails)
1523
filtered_parent_map is parent_map without external references
1524
child_map is the {parent_key: [child_keys]} mapping
1525
tails is a list of nodes that do not have any parents in the map
1527
# TODO: The basic effect of this function seems more generic than
1528
# _PlanMerge. But the specific details of building a child_map,
1529
# and computing tails seems very specific to _PlanMerge.
1530
# Still, should this be in Graph land?
1531
filtered_parent_map = {}
1534
for key, parent_keys in parent_map.iteritems():
1535
culled_parent_keys = [p for p in parent_keys if p in parent_map]
1536
if not culled_parent_keys:
1538
for parent_key in culled_parent_keys:
1539
child_map.setdefault(parent_key, []).append(key)
1540
# TODO: Do we want to do this, it adds overhead for every node,
1541
# just to say that the node has no children
1542
child_map.setdefault(key, [])
1543
filtered_parent_map[key] = culled_parent_keys
1544
return filtered_parent_map, child_map, tails
1547
def _prune_tails(parent_map, child_map, tails_to_remove):
1548
"""Remove tails from the parent map.
1550
This will remove the supplied revisions until no more children have 0
1553
:param parent_map: A dict of {child: [parents]}, this dictionary will
1554
be modified in place.
1555
:param tails_to_remove: A list of tips that should be removed,
1556
this list will be consumed
1557
:param child_map: The reverse dict of parent_map ({parent: [children]})
1558
this dict will be modified
1559
:return: None, parent_map will be modified in place.
1561
while tails_to_remove:
1562
next = tails_to_remove.pop()
1563
parent_map.pop(next)
1564
children = child_map.pop(next)
1565
for child in children:
1566
child_parents = parent_map[child]
1567
child_parents.remove(next)
1568
if len(child_parents) == 0:
1569
tails_to_remove.append(child)
1571
def _get_interesting_texts(self, parent_map):
1572
"""Return a dict of texts we are interested in.
1574
Note that the input is in key tuples, but the output is in plain
1577
:param parent_map: The output from _find_recursive_lcas
1578
:return: A dict of {'revision_id':lines} as returned by
1579
_PlanMergeBase.get_lines()
1581
all_revision_keys = set(parent_map)
1582
all_revision_keys.add(self.a_key)
1583
all_revision_keys.add(self.b_key)
1585
# Everything else is in 'keys' but get_lines is in 'revision_ids'
1586
all_texts = self.get_lines([k[-1] for k in all_revision_keys])
1589
def _build_weave(self):
1590
from bzrlib import weave
1591
self._weave = weave.Weave(weave_name='in_memory_weave',
1592
allow_reserved=True)
1593
parent_map = self._find_recursive_lcas()
1595
all_texts = self._get_interesting_texts(parent_map)
1597
# Note: Unfortunately, the order given by topo_sort will effect the
1598
# ordering resolution in the output. Specifically, if you add A then B,
1599
# then in the output text A lines will show up before B lines. And, of
1600
# course, topo_sort doesn't guarantee any real ordering.
1601
# So we use merge_sort, and add a fake node on the tip.
1602
# This ensures that left-hand parents will always be inserted into the
1603
# weave before right-hand parents.
1604
tip_key = self._key_prefix + (_mod_revision.CURRENT_REVISION,)
1605
parent_map[tip_key] = (self.a_key, self.b_key)
1607
for seq_num, key, depth, eom in reversed(tsort.merge_sort(parent_map,
1611
# for key in tsort.topo_sort(parent_map):
1612
parent_keys = parent_map[key]
1613
revision_id = key[-1]
1614
parent_ids = [k[-1] for k in parent_keys]
1615
self._weave.add_lines(revision_id, parent_ids,
1616
all_texts[revision_id])
1618
def plan_merge(self):
1619
"""Generate a 'plan' for merging the two revisions.
1621
This involves comparing their texts and determining the cause of
1622
differences. If text A has a line and text B does not, then either the
1623
line was added to text A, or it was deleted from B. Once the causes
1624
are combined, they are written out in the format described in
1625
VersionedFile.plan_merge
1627
if self._head_key is not None: # There was a single head
1628
if self._head_key == self.a_key:
1631
if self._head_key != self.b_key:
1632
raise AssertionError('There was an invalid head: %s != %s'
1633
% (self.b_key, self._head_key))
1635
head_rev = self._head_key[-1]
1636
lines = self.get_lines([head_rev])[head_rev]
1637
return ((plan, line) for line in lines)
1638
return self._weave.plan_merge(self.a_rev, self.b_rev)
1641
class _PlanLCAMerge(_PlanMergeBase):
1643
This merge algorithm differs from _PlanMerge in that:
1644
1. comparisons are done against LCAs only
1645
2. cases where a contested line is new versus one LCA but old versus
1646
another are marked as conflicts, by emitting the line as conflicted-a
1649
This is faster, and hopefully produces more useful output.
1652
def __init__(self, a_rev, b_rev, vf, key_prefix, graph):
1653
_PlanMergeBase.__init__(self, a_rev, b_rev, vf, key_prefix)
1654
lcas = graph.find_lca(key_prefix + (a_rev,), key_prefix + (b_rev,))
1657
if lca == NULL_REVISION:
1660
self.lcas.add(lca[-1])
1661
for lca in self.lcas:
1662
if _mod_revision.is_null(lca):
1665
lca_lines = self.get_lines([lca])[lca]
1666
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_a,
1668
blocks = list(matcher.get_matching_blocks())
1669
self._cached_matching_blocks[(a_rev, lca)] = blocks
1670
matcher = patiencediff.PatienceSequenceMatcher(None, self.lines_b,
1672
blocks = list(matcher.get_matching_blocks())
1673
self._cached_matching_blocks[(b_rev, lca)] = blocks
1675
def _determine_status(self, revision_id, unique_line_numbers):
1676
"""Determines the status unique lines versus all lcas.
1678
Basically, determines why the line is unique to this revision.
1680
A line may be determined new, killed, or both.
1682
If a line is determined new, that means it was not present in at least
1683
one LCA, and is not present in the other merge revision.
1685
If a line is determined killed, that means the line was present in
1688
If a line is killed and new, this indicates that the two merge
1689
revisions contain differing conflict resolutions.
1690
:param revision_id: The id of the revision in which the lines are
1692
:param unique_line_numbers: The line numbers of unique lines.
1693
:return a tuple of (new_this, killed_other):
1697
unique_line_numbers = set(unique_line_numbers)
1698
for lca in self.lcas:
1699
blocks = self._get_matching_blocks(revision_id, lca)
1700
unique_vs_lca, _ignored = self._unique_lines(blocks)
1701
new.update(unique_line_numbers.intersection(unique_vs_lca))
1702
killed.update(unique_line_numbers.difference(unique_vs_lca))