1
# Copyright (C) 2007 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
from bzrlib.deprecated_graph import (node_distances, select_farthest)
23
# DIAGRAM of terminology
33
# In this diagram, relative to G and H:
34
# A, B, C, D, E are common ancestors.
35
# C, D and E are border ancestors, because each has a non-common descendant.
36
# D and E are least common ancestors because none of their descendants are
38
# C is not a least common ancestor because its descendant, E, is a common
41
# The find_unique_lca algorithm will pick A in two steps:
42
# 1. find_lca('G', 'H') => ['D', 'E']
43
# 2. Since len(['D', 'E']) > 1, find_lca('D', 'E') => ['A']
46
class DictParentsProvider(object):
48
def __init__(self, ancestry):
49
self.ancestry = ancestry
52
return 'DictParentsProvider(%r)' % self.ancestry
54
def get_parents(self, revisions):
55
return [self.ancestry.get(r, None) for r in revisions]
58
class _StackedParentsProvider(object):
60
def __init__(self, parent_providers):
61
self._parent_providers = parent_providers
64
return "_StackedParentsProvider(%r)" % self._parent_providers
66
def get_parents(self, revision_ids):
67
"""Find revision ids of the parents of a list of revisions
69
A list is returned of the same length as the input. Each entry
70
is a list of parent ids for the corresponding input revision.
72
[NULL_REVISION] is used as the parent of the first user-committed
73
revision. Its parent list is empty.
75
If the revision is not present (i.e. a ghost), None is used in place
76
of the list of parents.
79
for parents_provider in self._parent_providers:
80
pending_revisions = [r for r in revision_ids if r not in found]
81
parent_list = parents_provider.get_parents(pending_revisions)
82
new_found = dict((k, v) for k, v in zip(pending_revisions,
83
parent_list) if v is not None)
84
found.update(new_found)
85
if len(found) == len(revision_ids):
87
return [found.get(r, None) for r in revision_ids]
91
"""Provide incremental access to revision graphs.
93
This is the generic implementation; it is intended to be subclassed to
94
specialize it for other repository types.
97
def __init__(self, parents_provider):
98
"""Construct a Graph that uses several graphs as its input
100
This should not normally be invoked directly, because there may be
101
specialized implementations for particular repository types. See
102
Repository.get_graph()
104
:param parents_provider: An object providing a get_parents call
105
conforming to the behavior of StackedParentsProvider.get_parents
107
self.get_parents = parents_provider.get_parents
108
self._parents_provider = parents_provider
111
return 'Graph(%r)' % self._parents_provider
113
def find_lca(self, *revisions):
114
"""Determine the lowest common ancestors of the provided revisions
116
A lowest common ancestor is a common ancestor none of whose
117
descendants are common ancestors. In graphs, unlike trees, there may
118
be multiple lowest common ancestors.
120
This algorithm has two phases. Phase 1 identifies border ancestors,
121
and phase 2 filters border ancestors to determine lowest common
124
In phase 1, border ancestors are identified, using a breadth-first
125
search starting at the bottom of the graph. Searches are stopped
126
whenever a node or one of its descendants is determined to be common
128
In phase 2, the border ancestors are filtered to find the least
129
common ancestors. This is done by searching the ancestries of each
132
Phase 2 is perfomed on the principle that a border ancestor that is
133
not an ancestor of any other border ancestor is a least common
136
Searches are stopped when they find a node that is determined to be a
137
common ancestor of all border ancestors, because this shows that it
138
cannot be a descendant of any border ancestor.
140
The scaling of this operation should be proportional to
141
1. The number of uncommon ancestors
142
2. The number of border ancestors
143
3. The length of the shortest path between a border ancestor and an
144
ancestor of all border ancestors.
146
border_common, common, sides = self._find_border_ancestors(revisions)
147
# We may have common ancestors that can be reached from each other.
148
# - ask for the heads of them to filter it down to only ones that
149
# cannot be reached from each other - phase 2.
150
return self.heads(border_common)
152
def find_difference(self, left_revision, right_revision):
153
"""Determine the graph difference between two revisions"""
154
border, common, (left, right) = self._find_border_ancestors(
155
[left_revision, right_revision])
156
return (left.difference(right).difference(common),
157
right.difference(left).difference(common))
159
def _make_breadth_first_searcher(self, revisions):
160
return _BreadthFirstSearcher(revisions, self)
162
def _find_border_ancestors(self, revisions):
163
"""Find common ancestors with at least one uncommon descendant.
165
Border ancestors are identified using a breadth-first
166
search starting at the bottom of the graph. Searches are stopped
167
whenever a node or one of its descendants is determined to be common.
169
This will scale with the number of uncommon ancestors.
171
As well as the border ancestors, a set of seen common ancestors and a
172
list of sets of seen ancestors for each input revision is returned.
173
This allows calculation of graph difference from the results of this
176
if None in revisions:
177
raise errors.InvalidRevisionId(None, self)
178
common_searcher = self._make_breadth_first_searcher([])
179
common_ancestors = set()
180
searchers = [self._make_breadth_first_searcher([r])
182
active_searchers = searchers[:]
183
border_ancestors = set()
184
def update_common(searcher, revisions):
185
w_seen_ancestors = searcher.find_seen_ancestors(
187
stopped = searcher.stop_searching_any(w_seen_ancestors)
188
common_ancestors.update(w_seen_ancestors)
189
common_searcher.start_searching(stopped)
192
if len(active_searchers) == 0:
193
return border_ancestors, common_ancestors, [s.seen for s in
196
new_common = common_searcher.next()
197
common_ancestors.update(new_common)
198
except StopIteration:
201
for searcher in active_searchers:
202
for revision in new_common.intersection(searcher.seen):
203
update_common(searcher, revision)
206
new_active_searchers = []
207
for searcher in active_searchers:
209
newly_seen.update(searcher.next())
210
except StopIteration:
213
new_active_searchers.append(searcher)
214
active_searchers = new_active_searchers
215
for revision in newly_seen:
216
if revision in common_ancestors:
217
for searcher in searchers:
218
update_common(searcher, revision)
220
for searcher in searchers:
221
if revision not in searcher.seen:
224
border_ancestors.add(revision)
225
for searcher in searchers:
226
update_common(searcher, revision)
228
def heads(self, keys):
229
"""Return the heads from amongst keys.
231
This is done by searching the ancestries of each key. Any key that is
232
reachable from another key is not returned; all the others are.
234
This operation scales with the relative depth between any two keys. If
235
any two keys are completely disconnected all ancestry of both sides
238
:param keys: An iterable of keys.
239
:return: A set of the heads. Note that as a set there is no ordering
240
information. Callers will need to filter their input to create
241
order if they need it.
243
candidate_heads = set(keys)
244
if len(candidate_heads) < 2:
245
return candidate_heads
246
searchers = dict((c, self._make_breadth_first_searcher([c]))
247
for c in candidate_heads)
248
active_searchers = dict(searchers)
249
# skip over the actual candidate for each searcher
250
for searcher in active_searchers.itervalues():
252
# The common walker finds nodes that are common to two or more of the
253
# input keys, so that we don't access all history when a currently
254
# uncommon search point actually meets up with something behind a
255
# common search point. Common search points do not keep searches
256
# active; they just allow us to make searches inactive without
257
# accessing all history.
258
common_walker = self._make_breadth_first_searcher([])
259
while len(active_searchers) > 0:
264
except StopIteration:
265
# No common points being searched at this time.
267
for candidate in active_searchers.keys():
269
searcher = active_searchers[candidate]
271
# rare case: we deleted candidate in a previous iteration
272
# through this for loop, because it was determined to be
273
# a descendant of another candidate.
276
ancestors.update(searcher.next())
277
except StopIteration:
278
del active_searchers[candidate]
280
# process found nodes
282
for ancestor in ancestors:
283
if ancestor in candidate_heads:
284
candidate_heads.remove(ancestor)
285
del searchers[ancestor]
286
if ancestor in active_searchers:
287
del active_searchers[ancestor]
288
# it may meet up with a known common node
289
if ancestor in common_walker.seen:
290
# some searcher has encountered our known common nodes:
292
ancestor_set = set([ancestor])
293
for searcher in searchers.itervalues():
294
searcher.stop_searching_any(ancestor_set)
296
# or it may have been just reached by all the searchers:
297
for searcher in searchers.itervalues():
298
if ancestor not in searcher.seen:
301
# The final active searcher has just reached this node,
302
# making it be known as a descendant of all candidates,
303
# so we can stop searching it, and any seen ancestors
304
new_common.add(ancestor)
305
for searcher in searchers.itervalues():
307
searcher.find_seen_ancestors(ancestor)
308
searcher.stop_searching_any(seen_ancestors)
309
common_walker.start_searching(new_common)
310
return candidate_heads
312
def find_unique_lca(self, left_revision, right_revision):
313
"""Find a unique LCA.
315
Find lowest common ancestors. If there is no unique common
316
ancestor, find the lowest common ancestors of those ancestors.
318
Iteration stops when a unique lowest common ancestor is found.
319
The graph origin is necessarily a unique lowest common ancestor.
321
Note that None is not an acceptable substitute for NULL_REVISION.
322
in the input for this method.
324
revisions = [left_revision, right_revision]
326
lca = self.find_lca(*revisions)
330
raise errors.NoCommonAncestor(left_revision, right_revision)
333
def iter_topo_order(self, revisions):
334
"""Iterate through the input revisions in topological order.
336
This sorting only ensures that parents come before their children.
337
An ancestor may sort after a descendant if the relationship is not
338
visible in the supplied list of revisions.
340
sorter = tsort.TopoSorter(zip(revisions, self.get_parents(revisions)))
341
return sorter.iter_topo_order()
343
def is_ancestor(self, candidate_ancestor, candidate_descendant):
344
"""Determine whether a revision is an ancestor of another.
346
We answer this using heads() as heads() has the logic to perform the
347
smallest number of parent looksup to determine the ancestral
348
relationship between N revisions.
350
return set([candidate_descendant]) == self.heads(
351
[candidate_ancestor, candidate_descendant])
354
class HeadsCache(object):
355
"""A cache of results for graph heads calls."""
357
def __init__(self, graph):
361
def heads(self, keys):
362
"""Return the heads of keys.
364
This matches the API of Graph.heads(), specifically the return value is
365
a set which can be mutated, and ordering of the input is not preserved
368
:see also: Graph.heads.
369
:param keys: The keys to calculate heads for.
370
:return: A set containing the heads, which may be mutated without
371
affecting future lookups.
373
keys = frozenset(keys)
375
return set(self._heads[keys])
377
heads = self.graph.heads(keys)
378
self._heads[keys] = heads
382
class HeadsCache(object):
383
"""A cache of results for graph heads calls."""
385
def __init__(self, graph):
389
def heads(self, keys):
390
"""Return the heads of keys.
392
:see also: Graph.heads.
393
:param keys: The keys to calculate heads for.
394
:return: A set containing the heads, which may be mutated without
395
affecting future lookups.
397
keys = frozenset(keys)
399
return set(self._heads[keys])
401
heads = self.graph.heads(keys)
402
self._heads[keys] = heads
406
class _BreadthFirstSearcher(object):
407
"""Parallel search breadth-first the ancestry of revisions.
409
This class implements the iterator protocol, but additionally
410
1. provides a set of seen ancestors, and
411
2. allows some ancestries to be unsearched, via stop_searching_any
414
def __init__(self, revisions, parents_provider):
415
self._start = set(revisions)
416
self._search_revisions = None
417
self.seen = set(revisions)
418
self._parents_provider = parents_provider
421
return ('_BreadthFirstSearcher(self._search_revisions=%r,'
422
' self.seen=%r)' % (self._search_revisions, self.seen))
425
"""Return the next ancestors of this revision.
427
Ancestors are returned in the order they are seen in a breadth-first
428
traversal. No ancestor will be returned more than once.
430
if self._search_revisions is None:
431
self._search_revisions = self._start
433
new_search_revisions = set()
434
for parents in self._parents_provider.get_parents(
435
self._search_revisions):
438
new_search_revisions.update(p for p in parents if
440
self._search_revisions = new_search_revisions
441
if len(self._search_revisions) == 0:
442
raise StopIteration()
443
self.seen.update(self._search_revisions)
444
return self._search_revisions
449
def find_seen_ancestors(self, revision):
450
"""Find ancestors of this revision that have already been seen."""
451
searcher = _BreadthFirstSearcher([revision], self._parents_provider)
452
seen_ancestors = set()
453
for ancestors in searcher:
454
for ancestor in ancestors:
455
if ancestor not in self.seen:
456
searcher.stop_searching_any([ancestor])
458
seen_ancestors.add(ancestor)
459
return seen_ancestors
461
def stop_searching_any(self, revisions):
463
Remove any of the specified revisions from the search list.
465
None of the specified revisions are required to be present in the
466
search list. In this case, the call is a no-op.
468
stopped = self._search_revisions.intersection(revisions)
469
self._search_revisions = self._search_revisions.difference(revisions)
472
def start_searching(self, revisions):
473
if self._search_revisions is None:
474
self._start = set(revisions)
476
self._search_revisions.update(revisions.difference(self.seen))
477
self.seen.update(revisions)