/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
1
# Copyright (C) 2007 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
17
from bzrlib import (
18
    errors,
3052.1.3 by John Arbash Meinel
deprecate revision.is_ancestor, update the callers and the tests.
19
    revision,
3099.3.3 by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map()
20
    symbol_versioning,
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
21
    tsort,
22
    )
2490.2.21 by Aaron Bentley
Rename graph to deprecated_graph
23
from bzrlib.deprecated_graph import (node_distances, select_farthest)
2490.2.1 by Aaron Bentley
Start work on GraphWalker
24
2490.2.25 by Aaron Bentley
Update from review
25
# DIAGRAM of terminology
26
#       A
27
#       /\
28
#      B  C
29
#      |  |\
30
#      D  E F
31
#      |\/| |
32
#      |/\|/
33
#      G  H
34
#
35
# In this diagram, relative to G and H:
36
# A, B, C, D, E are common ancestors.
37
# C, D and E are border ancestors, because each has a non-common descendant.
38
# D and E are least common ancestors because none of their descendants are
39
# common ancestors.
40
# C is not a least common ancestor because its descendant, E, is a common
41
# ancestor.
42
#
43
# The find_unique_lca algorithm will pick A in two steps:
44
# 1. find_lca('G', 'H') => ['D', 'E']
45
# 2. Since len(['D', 'E']) > 1, find_lca('D', 'E') => ['A']
46
47
2988.1.3 by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check.
48
class DictParentsProvider(object):
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
49
    """A parents provider for Graph objects."""
2988.1.3 by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check.
50
51
    def __init__(self, ancestry):
52
        self.ancestry = ancestry
53
54
    def __repr__(self):
55
        return 'DictParentsProvider(%r)' % self.ancestry
56
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
57
    def get_parent_map(self, keys):
58
        """See _StackedParentsProvider.get_parent_map"""
59
        ancestry = self.ancestry
60
        return dict((k, ancestry[k]) for k in keys if k in ancestry)
61
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
62
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
63
class _StackedParentsProvider(object):
64
65
    def __init__(self, parent_providers):
66
        self._parent_providers = parent_providers
67
2490.2.28 by Aaron Bentley
Fix handling of null revision
68
    def __repr__(self):
69
        return "_StackedParentsProvider(%r)" % self._parent_providers
70
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
71
    def get_parent_map(self, keys):
72
        """Get a mapping of keys => parents
73
74
        A dictionary is returned with an entry for each key present in this
75
        source. If this source doesn't have information about a key, it should
76
        not include an entry.
77
78
        [NULL_REVISION] is used as the parent of the first user-committed
79
        revision.  Its parent list is empty.
80
81
        :param keys: An iterable returning keys to check (eg revision_ids)
82
        :return: A dictionary mapping each key to its parents
83
        """
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
84
        found = {}
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
85
        remaining = set(keys)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
86
        for parents_provider in self._parent_providers:
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
87
            new_found = parents_provider.get_parent_map(remaining)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
88
            found.update(new_found)
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
89
            remaining.difference_update(new_found)
90
            if not remaining:
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
91
                break
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
92
        return found
93
94
95
class CachingParentsProvider(object):
96
    """A parents provider which will cache the revision => parents in a dict.
97
98
    This is useful for providers that have an expensive lookup.
99
    """
100
101
    def __init__(self, parent_provider):
102
        self._real_provider = parent_provider
103
        # Theoretically we could use an LRUCache here
104
        self._cache = {}
105
106
    def __repr__(self):
107
        return "%s(%r)" % (self.__class__.__name__, self._real_provider)
108
109
    def get_parent_map(self, keys):
110
        """See _StackedParentsProvider.get_parent_map"""
111
        needed = set()
112
        # If the _real_provider doesn't have a key, we cache a value of None,
113
        # which we then later use to realize we cannot provide a value for that
114
        # key.
115
        parent_map = {}
116
        cache = self._cache
117
        for key in keys:
118
            if key in cache:
119
                value = cache[key]
120
                if value is not None:
121
                    parent_map[key] = value
122
            else:
123
                needed.add(key)
124
125
        if needed:
126
            new_parents = self._real_provider.get_parent_map(needed)
127
            cache.update(new_parents)
128
            parent_map.update(new_parents)
129
            needed.difference_update(new_parents)
130
            cache.update(dict.fromkeys(needed, None))
131
        return parent_map
132
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
133
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
134
class Graph(object):
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
135
    """Provide incremental access to revision graphs.
136
137
    This is the generic implementation; it is intended to be subclassed to
138
    specialize it for other repository types.
139
    """
2490.2.1 by Aaron Bentley
Start work on GraphWalker
140
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
141
    def __init__(self, parents_provider):
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
142
        """Construct a Graph that uses several graphs as its input
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
143
144
        This should not normally be invoked directly, because there may be
145
        specialized implementations for particular repository types.  See
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
146
        Repository.get_graph().
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
147
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
148
        :param parents_provider: An object providing a get_parent_map call
149
            conforming to the behavior of
150
            StackedParentsProvider.get_parent_map.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
151
        """
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
152
        if getattr(parents_provider, 'get_parents', None) is not None:
153
            self.get_parents = parents_provider.get_parents
154
        if getattr(parents_provider, 'get_parent_map', None) is not None:
155
            self.get_parent_map = parents_provider.get_parent_map
2490.2.29 by Aaron Bentley
Make parents provider private
156
        self._parents_provider = parents_provider
2490.2.28 by Aaron Bentley
Fix handling of null revision
157
158
    def __repr__(self):
2490.2.29 by Aaron Bentley
Make parents provider private
159
        return 'Graph(%r)' % self._parents_provider
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
160
161
    def find_lca(self, *revisions):
162
        """Determine the lowest common ancestors of the provided revisions
163
164
        A lowest common ancestor is a common ancestor none of whose
165
        descendants are common ancestors.  In graphs, unlike trees, there may
166
        be multiple lowest common ancestors.
2490.2.12 by Aaron Bentley
Improve documentation
167
168
        This algorithm has two phases.  Phase 1 identifies border ancestors,
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
169
        and phase 2 filters border ancestors to determine lowest common
170
        ancestors.
2490.2.12 by Aaron Bentley
Improve documentation
171
172
        In phase 1, border ancestors are identified, using a breadth-first
173
        search starting at the bottom of the graph.  Searches are stopped
174
        whenever a node or one of its descendants is determined to be common
175
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
176
        In phase 2, the border ancestors are filtered to find the least
2490.2.12 by Aaron Bentley
Improve documentation
177
        common ancestors.  This is done by searching the ancestries of each
178
        border ancestor.
179
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
180
        Phase 2 is perfomed on the principle that a border ancestor that is
181
        not an ancestor of any other border ancestor is a least common
182
        ancestor.
2490.2.12 by Aaron Bentley
Improve documentation
183
184
        Searches are stopped when they find a node that is determined to be a
185
        common ancestor of all border ancestors, because this shows that it
186
        cannot be a descendant of any border ancestor.
187
188
        The scaling of this operation should be proportional to
189
        1. The number of uncommon ancestors
190
        2. The number of border ancestors
191
        3. The length of the shortest path between a border ancestor and an
192
           ancestor of all border ancestors.
2490.2.3 by Aaron Bentley
Implement new merge base picker
193
        """
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
194
        border_common, common, sides = self._find_border_ancestors(revisions)
2776.3.1 by Robert Collins
* Deprecated method ``find_previous_heads`` on
195
        # We may have common ancestors that can be reached from each other.
196
        # - ask for the heads of them to filter it down to only ones that
197
        # cannot be reached from each other - phase 2.
198
        return self.heads(border_common)
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
199
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
200
    def find_difference(self, left_revision, right_revision):
2490.2.25 by Aaron Bentley
Update from review
201
        """Determine the graph difference between two revisions"""
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
202
        border, common, searchers = self._find_border_ancestors(
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
203
            [left_revision, right_revision])
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
204
        self._search_for_extra_common(common, searchers)
205
        left = searchers[0].seen
206
        right = searchers[1].seen
207
        return (left.difference(right), right.difference(left))
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
208
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
209
    @symbol_versioning.deprecated_method(symbol_versioning.one_one)
210
    def get_parents(self, revisions):
211
        """Find revision ids of the parents of a list of revisions
212
213
        A list is returned of the same length as the input.  Each entry
214
        is a list of parent ids for the corresponding input revision.
215
216
        [NULL_REVISION] is used as the parent of the first user-committed
217
        revision.  Its parent list is empty.
218
219
        If the revision is not present (i.e. a ghost), None is used in place
220
        of the list of parents.
221
222
        Deprecated in bzr 1.2 - please see get_parent_map.
223
        """
224
        parents = self.get_parent_map(revisions)
225
        return [parent.get(r, None) for r in revisions]
226
227
    def get_parent_map(self, revisions):
228
        """Get a map of key:parent_list for revisions.
229
230
        This implementation delegates to get_parents, for old parent_providers
231
        that do not supply get_parent_map.
232
        """
233
        result = {}
234
        for rev, parents in self.get_parents(revisions):
235
            if parents is not None:
236
                result[rev] = parents
237
        return result
238
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
239
    def _make_breadth_first_searcher(self, revisions):
240
        return _BreadthFirstSearcher(revisions, self)
241
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
242
    def _find_border_ancestors(self, revisions):
2490.2.12 by Aaron Bentley
Improve documentation
243
        """Find common ancestors with at least one uncommon descendant.
244
245
        Border ancestors are identified using a breadth-first
246
        search starting at the bottom of the graph.  Searches are stopped
247
        whenever a node or one of its descendants is determined to be common.
248
249
        This will scale with the number of uncommon ancestors.
2490.2.25 by Aaron Bentley
Update from review
250
251
        As well as the border ancestors, a set of seen common ancestors and a
252
        list of sets of seen ancestors for each input revision is returned.
253
        This allows calculation of graph difference from the results of this
254
        operation.
2490.2.12 by Aaron Bentley
Improve documentation
255
        """
2490.2.28 by Aaron Bentley
Fix handling of null revision
256
        if None in revisions:
257
            raise errors.InvalidRevisionId(None, self)
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
258
        common_ancestors = set()
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
259
        searchers = [self._make_breadth_first_searcher([r])
260
                     for r in revisions]
261
        active_searchers = searchers[:]
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
262
        border_ancestors = set()
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
263
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
264
        while True:
265
            newly_seen = set()
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
266
            for searcher in searchers:
267
                new_ancestors = searcher.step()
268
                if new_ancestors:
269
                    newly_seen.update(new_ancestors)
270
            new_common = set()
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
271
            for revision in newly_seen:
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
272
                if revision in common_ancestors:
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
273
                    # Not a border ancestor because it was seen as common
274
                    # already
275
                    new_common.add(revision)
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
276
                    continue
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
277
                for searcher in searchers:
278
                    if revision not in searcher.seen:
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
279
                        break
280
                else:
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
281
                    # This is a border because it is a first common that we see
282
                    # after walking for a while.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
283
                    border_ancestors.add(revision)
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
284
                    new_common.add(revision)
285
            if new_common:
286
                for searcher in searchers:
287
                    new_common.update(searcher.find_seen_ancestors(new_common))
288
                for searcher in searchers:
289
                    searcher.start_searching(new_common)
290
                common_ancestors.update(new_common)
291
292
            # Figure out what the searchers will be searching next, and if
293
            # there is only 1 set being searched, then we are done searching,
294
            # since all searchers would have to be searching the same data,
295
            # thus it *must* be in common.
296
            unique_search_sets = set()
297
            for searcher in searchers:
298
                will_search_set = frozenset(searcher._next_query)
299
                if will_search_set not in unique_search_sets:
300
                    # This searcher is searching a unique set of nodes, let it
301
                    unique_search_sets.add(will_search_set)
302
303
            if len(unique_search_sets) == 1:
304
                nodes = unique_search_sets.pop()
305
                uncommon_nodes = nodes.difference(common_ancestors)
306
                assert not uncommon_nodes, ("Somehow we ended up converging"
307
                                            " without actually marking them as"
308
                                            " in common."
309
                                            "\nStart_nodes: %s"
310
                                            "\nuncommon_nodes: %s"
311
                                            % (revisions, uncommon_nodes))
312
                break
313
        return border_ancestors, common_ancestors, searchers
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
314
2776.3.1 by Robert Collins
* Deprecated method ``find_previous_heads`` on
315
    def heads(self, keys):
316
        """Return the heads from amongst keys.
317
318
        This is done by searching the ancestries of each key.  Any key that is
319
        reachable from another key is not returned; all the others are.
320
321
        This operation scales with the relative depth between any two keys. If
322
        any two keys are completely disconnected all ancestry of both sides
323
        will be retrieved.
324
325
        :param keys: An iterable of keys.
2776.1.4 by Robert Collins
Trivial review feedback changes.
326
        :return: A set of the heads. Note that as a set there is no ordering
327
            information. Callers will need to filter their input to create
328
            order if they need it.
2490.2.12 by Aaron Bentley
Improve documentation
329
        """
2776.1.4 by Robert Collins
Trivial review feedback changes.
330
        candidate_heads = set(keys)
3052.5.5 by John Arbash Meinel
Special case Graph.heads() for NULL_REVISION rather than is_ancestor.
331
        if revision.NULL_REVISION in candidate_heads:
332
            # NULL_REVISION is only a head if it is the only entry
333
            candidate_heads.remove(revision.NULL_REVISION)
334
            if not candidate_heads:
335
                return set([revision.NULL_REVISION])
2850.2.1 by Robert Collins
(robertc) Special case the zero-or-no-heads case for Graph.heads(). (Robert Collins)
336
        if len(candidate_heads) < 2:
337
            return candidate_heads
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
338
        searchers = dict((c, self._make_breadth_first_searcher([c]))
2776.1.4 by Robert Collins
Trivial review feedback changes.
339
                          for c in candidate_heads)
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
340
        active_searchers = dict(searchers)
341
        # skip over the actual candidate for each searcher
342
        for searcher in active_searchers.itervalues():
1551.15.81 by Aaron Bentley
Remove testing code
343
            searcher.next()
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
344
        # The common walker finds nodes that are common to two or more of the
345
        # input keys, so that we don't access all history when a currently
346
        # uncommon search point actually meets up with something behind a
347
        # common search point. Common search points do not keep searches
348
        # active; they just allow us to make searches inactive without
349
        # accessing all history.
350
        common_walker = self._make_breadth_first_searcher([])
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
351
        while len(active_searchers) > 0:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
352
            ancestors = set()
353
            # advance searches
354
            try:
355
                common_walker.next()
356
            except StopIteration:
2921.3.4 by Robert Collins
Review feedback.
357
                # No common points being searched at this time.
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
358
                pass
1551.15.78 by Aaron Bentley
Fix KeyError in filter_candidate_lca
359
            for candidate in active_searchers.keys():
360
                try:
361
                    searcher = active_searchers[candidate]
362
                except KeyError:
363
                    # rare case: we deleted candidate in a previous iteration
364
                    # through this for loop, because it was determined to be
365
                    # a descendant of another candidate.
366
                    continue
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
367
                try:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
368
                    ancestors.update(searcher.next())
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
369
                except StopIteration:
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
370
                    del active_searchers[candidate]
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
371
                    continue
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
372
            # process found nodes
373
            new_common = set()
374
            for ancestor in ancestors:
375
                if ancestor in candidate_heads:
376
                    candidate_heads.remove(ancestor)
377
                    del searchers[ancestor]
378
                    if ancestor in active_searchers:
379
                        del active_searchers[ancestor]
380
                # it may meet up with a known common node
2921.3.4 by Robert Collins
Review feedback.
381
                if ancestor in common_walker.seen:
382
                    # some searcher has encountered our known common nodes:
383
                    # just stop it
384
                    ancestor_set = set([ancestor])
385
                    for searcher in searchers.itervalues():
386
                        searcher.stop_searching_any(ancestor_set)
387
                else:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
388
                    # or it may have been just reached by all the searchers:
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
389
                    for searcher in searchers.itervalues():
390
                        if ancestor not in searcher.seen:
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
391
                            break
392
                    else:
2921.3.4 by Robert Collins
Review feedback.
393
                        # The final active searcher has just reached this node,
394
                        # making it be known as a descendant of all candidates,
395
                        # so we can stop searching it, and any seen ancestors
396
                        new_common.add(ancestor)
397
                        for searcher in searchers.itervalues():
398
                            seen_ancestors =\
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
399
                                searcher.find_seen_ancestors([ancestor])
2921.3.4 by Robert Collins
Review feedback.
400
                            searcher.stop_searching_any(seen_ancestors)
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
401
            common_walker.start_searching(new_common)
2776.1.4 by Robert Collins
Trivial review feedback changes.
402
        return candidate_heads
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
403
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
404
    def find_unique_lca(self, left_revision, right_revision,
405
                        count_steps=False):
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
406
        """Find a unique LCA.
407
408
        Find lowest common ancestors.  If there is no unique  common
409
        ancestor, find the lowest common ancestors of those ancestors.
410
411
        Iteration stops when a unique lowest common ancestor is found.
412
        The graph origin is necessarily a unique lowest common ancestor.
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
413
414
        Note that None is not an acceptable substitute for NULL_REVISION.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
415
        in the input for this method.
1551.19.12 by Aaron Bentley
Add documentation for the count_steps parameter of Graph.find_unique_lca
416
417
        :param count_steps: If True, the return value will be a tuple of
418
            (unique_lca, steps) where steps is the number of times that
419
            find_lca was run.  If False, only unique_lca is returned.
2490.2.3 by Aaron Bentley
Implement new merge base picker
420
        """
421
        revisions = [left_revision, right_revision]
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
422
        steps = 0
2490.2.3 by Aaron Bentley
Implement new merge base picker
423
        while True:
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
424
            steps += 1
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
425
            lca = self.find_lca(*revisions)
426
            if len(lca) == 1:
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
427
                result = lca.pop()
428
                if count_steps:
429
                    return result, steps
430
                else:
431
                    return result
2520.4.104 by Aaron Bentley
Avoid infinite loop when there is no unique lca
432
            if len(lca) == 0:
433
                raise errors.NoCommonAncestor(left_revision, right_revision)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
434
            revisions = lca
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
435
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
436
    def iter_ancestry(self, revision_ids):
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
437
        """Iterate the ancestry of this revision.
438
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
439
        :param revision_ids: Nodes to start the search
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
440
        :return: Yield tuples mapping a revision_id to its parents for the
441
            ancestry of revision_id.
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
442
            Ghosts will be returned with None as their parents, and nodes
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
443
            with no parents will have NULL_REVISION as their only parent. (As
444
            defined by get_parent_map.)
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
445
            There will also be a node for (NULL_REVISION, ())
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
446
        """
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
447
        pending = set(revision_ids)
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
448
        processed = set()
449
        while pending:
450
            processed.update(pending)
451
            next_map = self.get_parent_map(pending)
452
            next_pending = set()
453
            for item in next_map.iteritems():
454
                yield item
455
                next_pending.update(p for p in item[1] if p not in processed)
456
            ghosts = pending.difference(next_map)
457
            for ghost in ghosts:
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
458
                yield (ghost, None)
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
459
            pending = next_pending
460
2490.2.31 by Aaron Bentley
Fix iter_topo_order to permit un-included parents
461
    def iter_topo_order(self, revisions):
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
462
        """Iterate through the input revisions in topological order.
463
464
        This sorting only ensures that parents come before their children.
465
        An ancestor may sort after a descendant if the relationship is not
466
        visible in the supplied list of revisions.
467
        """
3099.3.3 by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map()
468
        sorter = tsort.TopoSorter(self.get_parent_map(revisions))
2490.2.34 by Aaron Bentley
Update NEWS and change implementation to return an iterator
469
        return sorter.iter_topo_order()
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
470
2653.2.1 by Aaron Bentley
Implement Graph.is_ancestor
471
    def is_ancestor(self, candidate_ancestor, candidate_descendant):
2653.2.5 by Aaron Bentley
Update to clarify algorithm
472
        """Determine whether a revision is an ancestor of another.
473
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
474
        We answer this using heads() as heads() has the logic to perform the
3078.2.6 by Ian Clatworthy
fix efficiency of local commit detection as recommended by jameinel's review
475
        smallest number of parent lookups to determine the ancestral
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
476
        relationship between N revisions.
2653.2.5 by Aaron Bentley
Update to clarify algorithm
477
        """
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
478
        return set([candidate_descendant]) == self.heads(
479
            [candidate_ancestor, candidate_descendant])
2653.2.1 by Aaron Bentley
Implement Graph.is_ancestor
480
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
481
    def _search_for_extra_common(self, common, searchers):
482
        """Make sure that unique nodes are genuinely unique.
483
484
        After a simple search, we end up with genuine common nodes, but some
485
        uncommon nodes might actually be descended from common nodes, and we
486
        just didn't search far enough.
487
488
        We know that we have searched enough when all common search tips are
489
        descended from all unique (uncommon) nodes because we know that a node
490
        cannot be an ancestor of its own ancestor.
491
492
        :param common: A set of common nodes
493
        :param searchers: The searchers returned from _find_border_ancestors
494
        :return: None
495
        """
496
        # Basic algorithm...
497
        #   A) The passed in searchers should all be on the same tips, thus
498
        #      they should be considered the "common" searchers.
499
        #   B) We find the difference between the searchers, these are the
500
        #      "unique" nodes for each side.
501
        #   C) We do a quick culling so that we only start searching from the
502
        #      more interesting unique nodes. (A unique ancestor is more
503
        #      interesting that any of its children.)
504
        #   D) We start searching for ancestors common to all unique nodes.
505
        #   E) We have the common searchers stop searching any ancestors of
506
        #      nodes found by (D)
507
        #   F) When there are no more common search tips, we stop
508
        assert len(searchers) == 2, (
509
            "Algorithm not yet implemented for > 2 searchers")
510
        common_searchers = searchers
511
        left_searcher = searchers[0]
512
        right_searcher = searchers[1]
513
        unique = left_searcher.seen.symmetric_difference(right_searcher.seen)
514
        unique = self._remove_simple_descendants(unique,
515
                    self.get_parent_map(unique))
516
        # TODO: jam 20071214 Would it be possible to seed these searchers with
517
        #       the revisions that we have already seen on each side?
518
        #       Maybe something like:
519
        #       unique_searchers = []
520
        #       for left_unique in left.difference(right):
521
        #          l_searcher = self._make_breadth_first_searcher(left_unique)
522
        #          l_searcher.seen.update(left.seen)
523
        #       ... (same for right.difference(left))
524
        #       This might also be easier to set up for the case of >2
525
        #       searchers.
526
        unique_searchers = [self._make_breadth_first_searcher([r])
527
                            for r in unique]
528
        # Aggregate all of the searchers into a single common searcher, would
529
        # it be okay to do this?
530
        # okay to do this?
531
        # common_searcher = self._make_breadth_first_searcher([])
532
        # for searcher in searchers:
533
        #     common_searcher.start_searching(searcher.will_search())
534
        #     common_searcher.seen.update(searcher.seen)
535
        common_ancestors_unique = set()
536
537
        while True: # If we have no more nodes we have nothing to do
538
            # XXX: Any nodes here which don't match between searchers indicate
539
            #      that we have found a genuinely unique node, which would not
540
            #      have been found by the other searching techniques
541
            newly_seen_common = set()
542
            for searcher in common_searchers:
543
                newly_seen_common.update(searcher.step())
544
            newly_seen_unique = set()
545
            for searcher in unique_searchers:
546
                newly_seen_unique.update(searcher.step())
547
            new_common_unique = set()
548
            for revision in newly_seen_unique:
549
                if revision in common_ancestors_unique:
550
                    # TODO: Do we need to add it to new_common_unique, since it
551
                    #       seems to have already been found... ?
552
                    new_common_unique.add(revision)
553
                    continue
554
                for searcher in unique_searchers:
555
                    if revision not in searcher.seen:
556
                        break
557
                else:
558
                    # This is a border because it is a first common that we see
559
                    # after walking for a while.
560
                    new_common_unique.add(revision)
561
            if newly_seen_common:
562
                # These are nodes descended from one of the 'common' searchers.
563
                # Make sure all searchers are on the same page
564
                for searcher in common_searchers:
565
                    newly_seen_common.update(searcher.find_seen_ancestors(newly_seen_common))
566
                for searcher in common_searchers:
567
                    searcher.start_searching(newly_seen_common)
568
            if new_common_unique:
569
                # We found some ancestors that are common, jump all the way to
570
                # their most ancestral node that we have already seen.
571
                try:
572
                    for searcher in unique_searchers:
573
                        new_common_unique.update(searcher.find_seen_ancestors(new_common_unique))
574
                except TypeError:
575
                    import pdb; pdb.set_trace()
576
                    raise
577
                # Since these are common, we can grab another set of ancestors
578
                # that we have seen
579
                for searcher in common_searchers:
580
                    new_common_unique.update(searcher.find_seen_ancestors(new_common_unique))
581
582
                # Now we have a complete set of common nodes which are
583
                # ancestors of the unique nodes.
584
                # We can tell all of the unique searchers to start at these
585
                # nodes, and tell all of the common searchers to *stop*
586
                # searching these nodes
587
                for searcher in unique_searchers:
588
                    searcher.start_searching(new_common_unique)
589
                for searcher in common_searchers:
590
                    searcher.stop_searching_any(new_common_unique)
591
                common_ancestors_unique.update(new_common_unique)
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
592
            for searcher in unique_searchers:
593
                if searcher._next_query:
594
                    # We have something to look for
595
                    break
596
            else:
597
                # All unique_searchers have stopped searching
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
598
                break
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
599
            for searcher in common_searchers:
600
                if searcher._next_query:
601
                    break
602
            else:
603
                # All common searcher have stopped searching
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
604
                break
605
606
607
    def _remove_simple_descendants(self, revisions, parent_map):
608
        """remove revisions which are children of other ones in the set
609
610
        This doesn't do any graph searching, it just checks the immediate
611
        parent_map to find if there are any children which can be removed.
612
613
        :param revisions: A set of revision_ids
614
        :return: A set of revision_ids with the children removed
615
        """
616
        simple_ancestors = revisions.copy()
617
        # TODO: jam 20071214 we *could* restrict it to searching only the
618
        #       parent_map of revisions already present in 'revisions', but
619
        #       considering the general use case, I think this is actually
620
        #       better.
621
622
        # This is the same as the following loop. I don't know that it is any
623
        # faster.
624
        ## simple_ancestors.difference_update(r for r, p_ids in parent_map.iteritems()
625
        ##     if p_ids is not None and revisions.intersection(p_ids))
626
        ## return simple_ancestors
627
628
        # Yet Another Way, invert the parent map (which can be cached)
629
        ## descendants = {}
630
        ## for revision_id, parent_ids in parent_map.iteritems():
631
        ##   for p_id in parent_ids:
632
        ##       descendants.setdefault(p_id, []).append(revision_id)
633
        ## for revision in revisions.intersection(descendants):
634
        ##   simple_ancestors.difference_update(descendants[revision])
635
        ## return simple_ancestors
636
        for revision, parent_ids in parent_map.iteritems():
637
            if parent_ids is None:
638
                continue
639
            for parent_id in parent_ids:
640
                if parent_id in revisions:
641
                    # This node has a parent present in the set, so we can
642
                    # remove it
643
                    simple_ancestors.discard(revision)
644
                    break
645
        return simple_ancestors
646
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
647
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
648
class HeadsCache(object):
649
    """A cache of results for graph heads calls."""
650
651
    def __init__(self, graph):
652
        self.graph = graph
653
        self._heads = {}
654
655
    def heads(self, keys):
656
        """Return the heads of keys.
657
2911.4.3 by Robert Collins
Make the contract of HeadsCache.heads() more clear.
658
        This matches the API of Graph.heads(), specifically the return value is
659
        a set which can be mutated, and ordering of the input is not preserved
660
        in the output.
661
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
662
        :see also: Graph.heads.
663
        :param keys: The keys to calculate heads for.
664
        :return: A set containing the heads, which may be mutated without
665
            affecting future lookups.
666
        """
2911.4.2 by Robert Collins
Make HeadsCache actually work.
667
        keys = frozenset(keys)
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
668
        try:
669
            return set(self._heads[keys])
670
        except KeyError:
671
            heads = self.graph.heads(keys)
672
            self._heads[keys] = heads
673
            return set(heads)
674
675
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
676
class FrozenHeadsCache(object):
677
    """Cache heads() calls, assuming the caller won't modify them."""
678
679
    def __init__(self, graph):
680
        self.graph = graph
681
        self._heads = {}
682
683
    def heads(self, keys):
684
        """Return the heads of keys.
685
3224.1.24 by John Arbash Meinel
Fix up docstring since FrozenHeadsCache doesn't let you mutate the result.
686
        Similar to Graph.heads(). The main difference is that the return value
687
        is a frozen set which cannot be mutated.
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
688
689
        :see also: Graph.heads.
690
        :param keys: The keys to calculate heads for.
3224.1.24 by John Arbash Meinel
Fix up docstring since FrozenHeadsCache doesn't let you mutate the result.
691
        :return: A frozenset containing the heads.
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
692
        """
693
        keys = frozenset(keys)
694
        try:
695
            return self._heads[keys]
696
        except KeyError:
697
            heads = frozenset(self.graph.heads(keys))
698
            self._heads[keys] = heads
699
            return heads
700
701
    def cache(self, keys, heads):
702
        """Store a known value."""
703
        self._heads[frozenset(keys)] = frozenset(heads)
704
705
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
706
class _BreadthFirstSearcher(object):
2921.3.4 by Robert Collins
Review feedback.
707
    """Parallel search breadth-first the ancestry of revisions.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
708
709
    This class implements the iterator protocol, but additionally
710
    1. provides a set of seen ancestors, and
711
    2. allows some ancestries to be unsearched, via stop_searching_any
712
    """
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
713
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
714
    def __init__(self, revisions, parents_provider):
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
715
        self._iterations = 0
716
        self._next_query = set(revisions)
717
        self.seen = set()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
718
        self._started_keys = set(self._next_query)
719
        self._stopped_keys = set()
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
720
        self._parents_provider = parents_provider
3177.3.3 by Robert Collins
Review feedback.
721
        self._returning = 'next_with_ghosts'
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
722
        self._current_present = set()
723
        self._current_ghosts = set()
724
        self._current_parents = {}
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
725
726
    def __repr__(self):
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
727
        if self._iterations:
728
            prefix = "searching"
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
729
        else:
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
730
            prefix = "starting"
731
        search = '%s=%r' % (prefix, list(self._next_query))
732
        return ('_BreadthFirstSearcher(iterations=%d, %s,'
733
                ' seen=%r)' % (self._iterations, search, list(self.seen)))
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
734
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
735
    def get_result(self):
736
        """Get a SearchResult for the current state of this searcher.
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
737
        
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
738
        :return: A SearchResult for this search so far. The SearchResult is
739
            static - the search can be advanced and the search result will not
740
            be invalidated or altered.
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
741
        """
742
        if self._returning == 'next':
743
            # We have to know the current nodes children to be able to list the
744
            # exclude keys for them. However, while we could have a second
745
            # look-ahead result buffer and shuffle things around, this method
746
            # is typically only called once per search - when memoising the
3211.5.1 by Robert Collins
Change the smart server get_parents method to take a graph search to exclude already recieved parents from. This prevents history shortcuts causing huge numbers of duplicates.
747
            # results of the search. 
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
748
            found, ghosts, next, parents = self._do_query(self._next_query)
749
            # pretend we didn't query: perhaps we should tweak _do_query to be
750
            # entirely stateless?
751
            self.seen.difference_update(next)
3184.1.3 by Robert Collins
Automatically exclude ghosts.
752
            next_query = next.union(ghosts)
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
753
        else:
754
            next_query = self._next_query
3184.1.5 by Robert Collins
Record the number of found revisions for cross checking.
755
        excludes = self._stopped_keys.union(next_query)
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
756
        included_keys = self.seen.difference(excludes)
757
        return SearchResult(self._started_keys, excludes, len(included_keys),
758
            included_keys)
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
759
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
760
    def step(self):
761
        try:
762
            return self.next()
763
        except StopIteration:
764
            return ()
765
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
766
    def next(self):
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
767
        """Return the next ancestors of this revision.
768
2490.2.12 by Aaron Bentley
Improve documentation
769
        Ancestors are returned in the order they are seen in a breadth-first
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
770
        traversal.  No ancestor will be returned more than once. Ancestors are
771
        returned before their parentage is queried, so ghosts and missing
772
        revisions (including the start revisions) are included in the result.
773
        This can save a round trip in LCA style calculation by allowing
774
        convergence to be detected without reading the data for the revision
775
        the convergence occurs on.
776
777
        :return: A set of revision_ids.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
778
        """
3177.3.3 by Robert Collins
Review feedback.
779
        if self._returning != 'next':
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
780
            # switch to returning the query, not the results.
3177.3.3 by Robert Collins
Review feedback.
781
            self._returning = 'next'
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
782
            self._iterations += 1
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
783
        else:
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
784
            self._advance()
785
        if len(self._next_query) == 0:
786
            raise StopIteration()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
787
        # We have seen what we're querying at this point as we are returning
788
        # the query, not the results.
789
        self.seen.update(self._next_query)
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
790
        return self._next_query
791
792
    def next_with_ghosts(self):
793
        """Return the next found ancestors, with ghosts split out.
794
        
795
        Ancestors are returned in the order they are seen in a breadth-first
796
        traversal.  No ancestor will be returned more than once. Ancestors are
3177.3.3 by Robert Collins
Review feedback.
797
        returned only after asking for their parents, which allows us to detect
798
        which revisions are ghosts and which are not.
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
799
800
        :return: A tuple with (present ancestors, ghost ancestors) sets.
801
        """
3177.3.3 by Robert Collins
Review feedback.
802
        if self._returning != 'next_with_ghosts':
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
803
            # switch to returning the results, not the current query.
3177.3.3 by Robert Collins
Review feedback.
804
            self._returning = 'next_with_ghosts'
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
805
            self._advance()
806
        if len(self._next_query) == 0:
807
            raise StopIteration()
808
        self._advance()
809
        return self._current_present, self._current_ghosts
810
811
    def _advance(self):
812
        """Advance the search.
813
814
        Updates self.seen, self._next_query, self._current_present,
3177.3.3 by Robert Collins
Review feedback.
815
        self._current_ghosts, self._current_parents and self._iterations.
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
816
        """
817
        self._iterations += 1
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
818
        found, ghosts, next, parents = self._do_query(self._next_query)
819
        self._current_present = found
820
        self._current_ghosts = ghosts
821
        self._next_query = next
822
        self._current_parents = parents
3184.1.3 by Robert Collins
Automatically exclude ghosts.
823
        # ghosts are implicit stop points, otherwise the search cannot be
824
        # repeated when ghosts are filled.
825
        self._stopped_keys.update(ghosts)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
826
827
    def _do_query(self, revisions):
828
        """Query for revisions.
829
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
830
        Adds revisions to the seen set.
831
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
832
        :param revisions: Revisions to query.
833
        :return: A tuple: (set(found_revisions), set(ghost_revisions),
834
           set(parents_of_found_revisions), dict(found_revisions:parents)).
835
        """
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
836
        found_parents = set()
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
837
        parents_of_found = set()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
838
        # revisions may contain nodes that point to other nodes in revisions:
839
        # we want to filter them out.
840
        self.seen.update(revisions)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
841
        parent_map = self._parents_provider.get_parent_map(revisions)
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
842
        for rev_id, parents in parent_map.iteritems():
843
            found_parents.add(rev_id)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
844
            parents_of_found.update(p for p in parents if p not in self.seen)
845
        ghost_parents = revisions - found_parents
846
        return found_parents, ghost_parents, parents_of_found, parent_map
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
847
2490.2.8 by Aaron Bentley
fix iteration stuff
848
    def __iter__(self):
849
        return self
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
850
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
851
    def find_seen_ancestors(self, revisions):
852
        """Find ancestors of these revisions that have already been seen."""
853
        searcher = _BreadthFirstSearcher(revisions, self._parents_provider)
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
854
        seen_ancestors = set()
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
855
        for ancestors in searcher:
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
856
            for ancestor in ancestors:
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
857
                stop_nodes = set()
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
858
                if ancestor not in self.seen:
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
859
                    stop_nodes.add(ancestor)
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
860
                else:
861
                    seen_ancestors.add(ancestor)
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
862
                searcher.stop_searching_any(stop_nodes)
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
863
        return seen_ancestors
864
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
865
    def stop_searching_any(self, revisions):
866
        """
867
        Remove any of the specified revisions from the search list.
868
869
        None of the specified revisions are required to be present in the
2490.2.12 by Aaron Bentley
Improve documentation
870
        search list.  In this case, the call is a no-op.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
871
        """
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
872
        revisions = frozenset(revisions)
3177.3.3 by Robert Collins
Review feedback.
873
        if self._returning == 'next':
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
874
            stopped = self._next_query.intersection(revisions)
875
            self._next_query = self._next_query.difference(revisions)
876
        else:
3184.2.1 by Robert Collins
Handle stopping ghosts in searches properly.
877
            stopped_present = self._current_present.intersection(revisions)
878
            stopped = stopped_present.union(
879
                self._current_ghosts.intersection(revisions))
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
880
            self._current_present.difference_update(stopped)
881
            self._current_ghosts.difference_update(stopped)
882
            # stopping 'x' should stop returning parents of 'x', but 
883
            # not if 'y' always references those same parents
884
            stop_rev_references = {}
3184.2.1 by Robert Collins
Handle stopping ghosts in searches properly.
885
            for rev in stopped_present:
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
886
                for parent_id in self._current_parents[rev]:
887
                    if parent_id not in stop_rev_references:
888
                        stop_rev_references[parent_id] = 0
889
                    stop_rev_references[parent_id] += 1
890
            # if only the stopped revisions reference it, the ref count will be
891
            # 0 after this loop
3177.3.3 by Robert Collins
Review feedback.
892
            for parents in self._current_parents.itervalues():
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
893
                for parent_id in parents:
894
                    try:
895
                        stop_rev_references[parent_id] -= 1
896
                    except KeyError:
897
                        pass
898
            stop_parents = set()
899
            for rev_id, refs in stop_rev_references.iteritems():
900
                if refs == 0:
901
                    stop_parents.add(rev_id)
902
            self._next_query.difference_update(stop_parents)
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
903
        self._stopped_keys.update(stopped)
2490.2.25 by Aaron Bentley
Update from review
904
        return stopped
2490.2.17 by Aaron Bentley
Add start_searching, tweak stop_searching_any
905
906
    def start_searching(self, revisions):
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
907
        """Add revisions to the search.
908
909
        The parents of revisions will be returned from the next call to next()
910
        or next_with_ghosts(). If next_with_ghosts was the most recently used
911
        next* call then the return value is the result of looking up the
912
        ghost/not ghost status of revisions. (A tuple (present, ghosted)).
913
        """
914
        revisions = frozenset(revisions)
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
915
        self._started_keys.update(revisions)
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
916
        new_revisions = revisions.difference(self.seen)
917
        revs, ghosts, query, parents = self._do_query(revisions)
918
        self._stopped_keys.update(ghosts)
3177.3.3 by Robert Collins
Review feedback.
919
        if self._returning == 'next':
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
920
            self._next_query.update(new_revisions)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
921
        else:
922
            # perform a query on revisions
923
            self._current_present.update(revs)
924
            self._current_ghosts.update(ghosts)
925
            self._next_query.update(query)
926
            self._current_parents.update(parents)
927
            return revs, ghosts
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
928
929
930
class SearchResult(object):
931
    """The result of a breadth first search.
932
933
    A SearchResult provides the ability to reconstruct the search or access a
934
    set of the keys the search found.
935
    """
936
937
    def __init__(self, start_keys, exclude_keys, key_count, keys):
938
        """Create a SearchResult.
939
940
        :param start_keys: The keys the search started at.
941
        :param exclude_keys: The keys the search excludes.
942
        :param key_count: The total number of keys (from start to but not
943
            including exclude).
944
        :param keys: The keys the search found. Note that in future we may get
945
            a SearchResult from a smart server, in which case the keys list is
946
            not necessarily immediately available.
947
        """
948
        self._recipe = (start_keys, exclude_keys, key_count)
949
        self._keys = frozenset(keys)
950
951
    def get_recipe(self):
952
        """Return a recipe that can be used to replay this search.
953
        
954
        The recipe allows reconstruction of the same results at a later date
955
        without knowing all the found keys. The essential elements are a list
956
        of keys to start and and to stop at. In order to give reproducible
957
        results when ghosts are encountered by a search they are automatically
958
        added to the exclude list (or else ghost filling may alter the
959
        results).
960
961
        :return: A tuple (start_keys_set, exclude_keys_set, revision_count). To
962
            recreate the results of this search, create a breadth first
963
            searcher on the same graph starting at start_keys. Then call next()
964
            (or next_with_ghosts()) repeatedly, and on every result, call
965
            stop_searching_any on any keys from the exclude_keys set. The
966
            revision_count value acts as a trivial cross-check - the found
967
            revisions of the new search should have as many elements as
968
            revision_count. If it does not, then additional revisions have been
969
            ghosted since the search was executed the first time and the second
970
            time.
971
        """
972
        return self._recipe
973
974
    def get_keys(self):
975
        """Return the keys found in this search.
976
977
        :return: A set of keys.
978
        """
979
        return self._keys
980