/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
1
# Copyright (C) 2007 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
17
from bzrlib import (
18
    errors,
3052.1.3 by John Arbash Meinel
deprecate revision.is_ancestor, update the callers and the tests.
19
    revision,
3099.3.3 by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map()
20
    symbol_versioning,
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
21
    trace,
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
22
    tsort,
23
    )
2490.2.21 by Aaron Bentley
Rename graph to deprecated_graph
24
from bzrlib.deprecated_graph import (node_distances, select_farthest)
2490.2.1 by Aaron Bentley
Start work on GraphWalker
25
2490.2.25 by Aaron Bentley
Update from review
26
# DIAGRAM of terminology
27
#       A
28
#       /\
29
#      B  C
30
#      |  |\
31
#      D  E F
32
#      |\/| |
33
#      |/\|/
34
#      G  H
35
#
36
# In this diagram, relative to G and H:
37
# A, B, C, D, E are common ancestors.
38
# C, D and E are border ancestors, because each has a non-common descendant.
39
# D and E are least common ancestors because none of their descendants are
40
# common ancestors.
41
# C is not a least common ancestor because its descendant, E, is a common
42
# ancestor.
43
#
44
# The find_unique_lca algorithm will pick A in two steps:
45
# 1. find_lca('G', 'H') => ['D', 'E']
46
# 2. Since len(['D', 'E']) > 1, find_lca('D', 'E') => ['A']
47
48
2988.1.3 by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check.
49
class DictParentsProvider(object):
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
50
    """A parents provider for Graph objects."""
2988.1.3 by Robert Collins
Add a new repositoy method _generate_text_key_index for use by reconcile/check.
51
52
    def __init__(self, ancestry):
53
        self.ancestry = ancestry
54
55
    def __repr__(self):
56
        return 'DictParentsProvider(%r)' % self.ancestry
57
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
58
    def get_parent_map(self, keys):
59
        """See _StackedParentsProvider.get_parent_map"""
60
        ancestry = self.ancestry
61
        return dict((k, ancestry[k]) for k in keys if k in ancestry)
62
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
63
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
64
class _StackedParentsProvider(object):
65
66
    def __init__(self, parent_providers):
67
        self._parent_providers = parent_providers
68
2490.2.28 by Aaron Bentley
Fix handling of null revision
69
    def __repr__(self):
70
        return "_StackedParentsProvider(%r)" % self._parent_providers
71
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
72
    def get_parent_map(self, keys):
73
        """Get a mapping of keys => parents
74
75
        A dictionary is returned with an entry for each key present in this
76
        source. If this source doesn't have information about a key, it should
77
        not include an entry.
78
79
        [NULL_REVISION] is used as the parent of the first user-committed
80
        revision.  Its parent list is empty.
81
82
        :param keys: An iterable returning keys to check (eg revision_ids)
83
        :return: A dictionary mapping each key to its parents
84
        """
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
85
        found = {}
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
86
        remaining = set(keys)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
87
        for parents_provider in self._parent_providers:
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
88
            new_found = parents_provider.get_parent_map(remaining)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
89
            found.update(new_found)
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
90
            remaining.difference_update(new_found)
91
            if not remaining:
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
92
                break
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
93
        return found
94
95
96
class CachingParentsProvider(object):
97
    """A parents provider which will cache the revision => parents in a dict.
98
99
    This is useful for providers that have an expensive lookup.
100
    """
101
102
    def __init__(self, parent_provider):
103
        self._real_provider = parent_provider
104
        # Theoretically we could use an LRUCache here
105
        self._cache = {}
106
107
    def __repr__(self):
108
        return "%s(%r)" % (self.__class__.__name__, self._real_provider)
109
110
    def get_parent_map(self, keys):
111
        """See _StackedParentsProvider.get_parent_map"""
112
        needed = set()
113
        # If the _real_provider doesn't have a key, we cache a value of None,
114
        # which we then later use to realize we cannot provide a value for that
115
        # key.
116
        parent_map = {}
117
        cache = self._cache
118
        for key in keys:
119
            if key in cache:
120
                value = cache[key]
121
                if value is not None:
122
                    parent_map[key] = value
123
            else:
124
                needed.add(key)
125
126
        if needed:
127
            new_parents = self._real_provider.get_parent_map(needed)
128
            cache.update(new_parents)
129
            parent_map.update(new_parents)
130
            needed.difference_update(new_parents)
131
            cache.update(dict.fromkeys(needed, None))
132
        return parent_map
133
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
134
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
135
class Graph(object):
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
136
    """Provide incremental access to revision graphs.
137
138
    This is the generic implementation; it is intended to be subclassed to
139
    specialize it for other repository types.
140
    """
2490.2.1 by Aaron Bentley
Start work on GraphWalker
141
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
142
    def __init__(self, parents_provider):
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
143
        """Construct a Graph that uses several graphs as its input
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
144
145
        This should not normally be invoked directly, because there may be
146
        specialized implementations for particular repository types.  See
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
147
        Repository.get_graph().
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
148
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
149
        :param parents_provider: An object providing a get_parent_map call
150
            conforming to the behavior of
151
            StackedParentsProvider.get_parent_map.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
152
        """
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
153
        if getattr(parents_provider, 'get_parents', None) is not None:
154
            self.get_parents = parents_provider.get_parents
155
        if getattr(parents_provider, 'get_parent_map', None) is not None:
156
            self.get_parent_map = parents_provider.get_parent_map
2490.2.29 by Aaron Bentley
Make parents provider private
157
        self._parents_provider = parents_provider
2490.2.28 by Aaron Bentley
Fix handling of null revision
158
159
    def __repr__(self):
2490.2.29 by Aaron Bentley
Make parents provider private
160
        return 'Graph(%r)' % self._parents_provider
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
161
162
    def find_lca(self, *revisions):
163
        """Determine the lowest common ancestors of the provided revisions
164
165
        A lowest common ancestor is a common ancestor none of whose
166
        descendants are common ancestors.  In graphs, unlike trees, there may
167
        be multiple lowest common ancestors.
2490.2.12 by Aaron Bentley
Improve documentation
168
169
        This algorithm has two phases.  Phase 1 identifies border ancestors,
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
170
        and phase 2 filters border ancestors to determine lowest common
171
        ancestors.
2490.2.12 by Aaron Bentley
Improve documentation
172
173
        In phase 1, border ancestors are identified, using a breadth-first
174
        search starting at the bottom of the graph.  Searches are stopped
175
        whenever a node or one of its descendants is determined to be common
176
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
177
        In phase 2, the border ancestors are filtered to find the least
2490.2.12 by Aaron Bentley
Improve documentation
178
        common ancestors.  This is done by searching the ancestries of each
179
        border ancestor.
180
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
181
        Phase 2 is perfomed on the principle that a border ancestor that is
182
        not an ancestor of any other border ancestor is a least common
183
        ancestor.
2490.2.12 by Aaron Bentley
Improve documentation
184
185
        Searches are stopped when they find a node that is determined to be a
186
        common ancestor of all border ancestors, because this shows that it
187
        cannot be a descendant of any border ancestor.
188
189
        The scaling of this operation should be proportional to
190
        1. The number of uncommon ancestors
191
        2. The number of border ancestors
192
        3. The length of the shortest path between a border ancestor and an
193
           ancestor of all border ancestors.
2490.2.3 by Aaron Bentley
Implement new merge base picker
194
        """
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
195
        border_common, common, sides = self._find_border_ancestors(revisions)
2776.3.1 by Robert Collins
* Deprecated method ``find_previous_heads`` on
196
        # We may have common ancestors that can be reached from each other.
197
        # - ask for the heads of them to filter it down to only ones that
198
        # cannot be reached from each other - phase 2.
199
        return self.heads(border_common)
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
200
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
201
    def find_difference(self, left_revision, right_revision):
2490.2.25 by Aaron Bentley
Update from review
202
        """Determine the graph difference between two revisions"""
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
203
        border, common, searchers = self._find_border_ancestors(
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
204
            [left_revision, right_revision])
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
205
        self._search_for_extra_common(common, searchers)
206
        left = searchers[0].seen
207
        right = searchers[1].seen
208
        return (left.difference(right), right.difference(left))
2490.2.23 by Aaron Bentley
Adapt find_borders to produce a graph difference
209
3377.3.21 by John Arbash Meinel
Simple brute-force implementation of find_unique_ancestors
210
    def find_unique_ancestors(self, unique_revision, common_revisions):
211
        """Find the unique ancestors for a revision versus others.
212
213
        This returns the ancestry of unique_revision, excluding all revisions
214
        in the ancestry of common_revisions. If unique_revision is in the
215
        ancestry, then the empty set will be returned.
216
217
        :param unique_revision: The revision_id whose ancestry we are
218
            interested in.
219
        :param common_revisions: Revision_ids of ancestries to exclude.
220
        :return: A set of revisions in the ancestry of unique_revision
221
        """
222
        common_revisions = set(common_revisions)
223
        # Simple brute force implementation. Ugly, but gets the tests working
224
        # first.
225
        if unique_revision in common_revisions:
226
            return set()
227
        unique_ancestors = set(x[0] for x in self.iter_ancestry([unique_revision]))
228
        common_ancestors = set(x[0] for x in self.iter_ancestry(common_revisions))
229
        return unique_ancestors - common_ancestors
230
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
231
    @symbol_versioning.deprecated_method(symbol_versioning.one_one)
232
    def get_parents(self, revisions):
233
        """Find revision ids of the parents of a list of revisions
234
235
        A list is returned of the same length as the input.  Each entry
236
        is a list of parent ids for the corresponding input revision.
237
238
        [NULL_REVISION] is used as the parent of the first user-committed
239
        revision.  Its parent list is empty.
240
241
        If the revision is not present (i.e. a ghost), None is used in place
242
        of the list of parents.
243
244
        Deprecated in bzr 1.2 - please see get_parent_map.
245
        """
246
        parents = self.get_parent_map(revisions)
3377.3.5 by John Arbash Meinel
Fix a latent bug in Graph.get_parents()
247
        return [parents.get(r, None) for r in revisions]
3172.1.2 by Robert Collins
Parent Providers should now implement ``get_parent_map`` returning a
248
249
    def get_parent_map(self, revisions):
250
        """Get a map of key:parent_list for revisions.
251
252
        This implementation delegates to get_parents, for old parent_providers
253
        that do not supply get_parent_map.
254
        """
255
        result = {}
256
        for rev, parents in self.get_parents(revisions):
257
            if parents is not None:
258
                result[rev] = parents
259
        return result
260
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
261
    def _make_breadth_first_searcher(self, revisions):
262
        return _BreadthFirstSearcher(revisions, self)
263
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
264
    def _find_border_ancestors(self, revisions):
2490.2.12 by Aaron Bentley
Improve documentation
265
        """Find common ancestors with at least one uncommon descendant.
266
267
        Border ancestors are identified using a breadth-first
268
        search starting at the bottom of the graph.  Searches are stopped
269
        whenever a node or one of its descendants is determined to be common.
270
271
        This will scale with the number of uncommon ancestors.
2490.2.25 by Aaron Bentley
Update from review
272
273
        As well as the border ancestors, a set of seen common ancestors and a
274
        list of sets of seen ancestors for each input revision is returned.
275
        This allows calculation of graph difference from the results of this
276
        operation.
2490.2.12 by Aaron Bentley
Improve documentation
277
        """
2490.2.28 by Aaron Bentley
Fix handling of null revision
278
        if None in revisions:
279
            raise errors.InvalidRevisionId(None, self)
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
280
        common_ancestors = set()
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
281
        searchers = [self._make_breadth_first_searcher([r])
282
                     for r in revisions]
283
        active_searchers = searchers[:]
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
284
        border_ancestors = set()
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
285
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
286
        while True:
287
            newly_seen = set()
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
288
            for searcher in searchers:
289
                new_ancestors = searcher.step()
290
                if new_ancestors:
291
                    newly_seen.update(new_ancestors)
292
            new_common = set()
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
293
            for revision in newly_seen:
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
294
                if revision in common_ancestors:
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
295
                    # Not a border ancestor because it was seen as common
296
                    # already
297
                    new_common.add(revision)
2490.2.19 by Aaron Bentley
Implement common-ancestor-based culling
298
                    continue
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
299
                for searcher in searchers:
300
                    if revision not in searcher.seen:
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
301
                        break
302
                else:
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
303
                    # This is a border because it is a first common that we see
304
                    # after walking for a while.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
305
                    border_ancestors.add(revision)
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
306
                    new_common.add(revision)
307
            if new_common:
308
                for searcher in searchers:
309
                    new_common.update(searcher.find_seen_ancestors(new_common))
310
                for searcher in searchers:
311
                    searcher.start_searching(new_common)
312
                common_ancestors.update(new_common)
313
314
            # Figure out what the searchers will be searching next, and if
315
            # there is only 1 set being searched, then we are done searching,
316
            # since all searchers would have to be searching the same data,
317
            # thus it *must* be in common.
318
            unique_search_sets = set()
319
            for searcher in searchers:
320
                will_search_set = frozenset(searcher._next_query)
321
                if will_search_set not in unique_search_sets:
322
                    # This searcher is searching a unique set of nodes, let it
323
                    unique_search_sets.add(will_search_set)
324
325
            if len(unique_search_sets) == 1:
326
                nodes = unique_search_sets.pop()
327
                uncommon_nodes = nodes.difference(common_ancestors)
328
                assert not uncommon_nodes, ("Somehow we ended up converging"
329
                                            " without actually marking them as"
330
                                            " in common."
331
                                            "\nStart_nodes: %s"
332
                                            "\nuncommon_nodes: %s"
333
                                            % (revisions, uncommon_nodes))
334
                break
335
        return border_ancestors, common_ancestors, searchers
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
336
2776.3.1 by Robert Collins
* Deprecated method ``find_previous_heads`` on
337
    def heads(self, keys):
338
        """Return the heads from amongst keys.
339
340
        This is done by searching the ancestries of each key.  Any key that is
341
        reachable from another key is not returned; all the others are.
342
343
        This operation scales with the relative depth between any two keys. If
344
        any two keys are completely disconnected all ancestry of both sides
345
        will be retrieved.
346
347
        :param keys: An iterable of keys.
2776.1.4 by Robert Collins
Trivial review feedback changes.
348
        :return: A set of the heads. Note that as a set there is no ordering
349
            information. Callers will need to filter their input to create
350
            order if they need it.
2490.2.12 by Aaron Bentley
Improve documentation
351
        """
2776.1.4 by Robert Collins
Trivial review feedback changes.
352
        candidate_heads = set(keys)
3052.5.5 by John Arbash Meinel
Special case Graph.heads() for NULL_REVISION rather than is_ancestor.
353
        if revision.NULL_REVISION in candidate_heads:
354
            # NULL_REVISION is only a head if it is the only entry
355
            candidate_heads.remove(revision.NULL_REVISION)
356
            if not candidate_heads:
357
                return set([revision.NULL_REVISION])
2850.2.1 by Robert Collins
(robertc) Special case the zero-or-no-heads case for Graph.heads(). (Robert Collins)
358
        if len(candidate_heads) < 2:
359
            return candidate_heads
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
360
        searchers = dict((c, self._make_breadth_first_searcher([c]))
2776.1.4 by Robert Collins
Trivial review feedback changes.
361
                          for c in candidate_heads)
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
362
        active_searchers = dict(searchers)
363
        # skip over the actual candidate for each searcher
364
        for searcher in active_searchers.itervalues():
1551.15.81 by Aaron Bentley
Remove testing code
365
            searcher.next()
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
366
        # The common walker finds nodes that are common to two or more of the
367
        # input keys, so that we don't access all history when a currently
368
        # uncommon search point actually meets up with something behind a
369
        # common search point. Common search points do not keep searches
370
        # active; they just allow us to make searches inactive without
371
        # accessing all history.
372
        common_walker = self._make_breadth_first_searcher([])
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
373
        while len(active_searchers) > 0:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
374
            ancestors = set()
375
            # advance searches
376
            try:
377
                common_walker.next()
378
            except StopIteration:
2921.3.4 by Robert Collins
Review feedback.
379
                # No common points being searched at this time.
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
380
                pass
1551.15.78 by Aaron Bentley
Fix KeyError in filter_candidate_lca
381
            for candidate in active_searchers.keys():
382
                try:
383
                    searcher = active_searchers[candidate]
384
                except KeyError:
385
                    # rare case: we deleted candidate in a previous iteration
386
                    # through this for loop, because it was determined to be
387
                    # a descendant of another candidate.
388
                    continue
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
389
                try:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
390
                    ancestors.update(searcher.next())
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
391
                except StopIteration:
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
392
                    del active_searchers[candidate]
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
393
                    continue
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
394
            # process found nodes
395
            new_common = set()
396
            for ancestor in ancestors:
397
                if ancestor in candidate_heads:
398
                    candidate_heads.remove(ancestor)
399
                    del searchers[ancestor]
400
                    if ancestor in active_searchers:
401
                        del active_searchers[ancestor]
402
                # it may meet up with a known common node
2921.3.4 by Robert Collins
Review feedback.
403
                if ancestor in common_walker.seen:
404
                    # some searcher has encountered our known common nodes:
405
                    # just stop it
406
                    ancestor_set = set([ancestor])
407
                    for searcher in searchers.itervalues():
408
                        searcher.stop_searching_any(ancestor_set)
409
                else:
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
410
                    # or it may have been just reached by all the searchers:
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
411
                    for searcher in searchers.itervalues():
412
                        if ancestor not in searcher.seen:
2490.2.9 by Aaron Bentley
Fix minimal common ancestor algorithm for non-minimal perhipheral ancestors
413
                            break
414
                    else:
2921.3.4 by Robert Collins
Review feedback.
415
                        # The final active searcher has just reached this node,
416
                        # making it be known as a descendant of all candidates,
417
                        # so we can stop searching it, and any seen ancestors
418
                        new_common.add(ancestor)
419
                        for searcher in searchers.itervalues():
420
                            seen_ancestors =\
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
421
                                searcher.find_seen_ancestors([ancestor])
2921.3.4 by Robert Collins
Review feedback.
422
                            searcher.stop_searching_any(seen_ancestors)
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
423
            common_walker.start_searching(new_common)
2776.1.4 by Robert Collins
Trivial review feedback changes.
424
        return candidate_heads
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
425
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
426
    def find_unique_lca(self, left_revision, right_revision,
427
                        count_steps=False):
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
428
        """Find a unique LCA.
429
430
        Find lowest common ancestors.  If there is no unique  common
431
        ancestor, find the lowest common ancestors of those ancestors.
432
433
        Iteration stops when a unique lowest common ancestor is found.
434
        The graph origin is necessarily a unique lowest common ancestor.
2490.2.5 by Aaron Bentley
Use GraphWalker.unique_ancestor to determine merge base
435
436
        Note that None is not an acceptable substitute for NULL_REVISION.
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
437
        in the input for this method.
1551.19.12 by Aaron Bentley
Add documentation for the count_steps parameter of Graph.find_unique_lca
438
439
        :param count_steps: If True, the return value will be a tuple of
440
            (unique_lca, steps) where steps is the number of times that
441
            find_lca was run.  If False, only unique_lca is returned.
2490.2.3 by Aaron Bentley
Implement new merge base picker
442
        """
443
        revisions = [left_revision, right_revision]
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
444
        steps = 0
2490.2.3 by Aaron Bentley
Implement new merge base picker
445
        while True:
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
446
            steps += 1
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
447
            lca = self.find_lca(*revisions)
448
            if len(lca) == 1:
1551.19.10 by Aaron Bentley
Merge now warns when it encounters a criss-cross
449
                result = lca.pop()
450
                if count_steps:
451
                    return result, steps
452
                else:
453
                    return result
2520.4.104 by Aaron Bentley
Avoid infinite loop when there is no unique lca
454
            if len(lca) == 0:
455
                raise errors.NoCommonAncestor(left_revision, right_revision)
2490.2.13 by Aaron Bentley
Update distinct -> lowest, refactor, add ParentsProvider concept
456
            revisions = lca
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
457
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
458
    def iter_ancestry(self, revision_ids):
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
459
        """Iterate the ancestry of this revision.
460
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
461
        :param revision_ids: Nodes to start the search
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
462
        :return: Yield tuples mapping a revision_id to its parents for the
463
            ancestry of revision_id.
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
464
            Ghosts will be returned with None as their parents, and nodes
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
465
            with no parents will have NULL_REVISION as their only parent. (As
466
            defined by get_parent_map.)
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
467
            There will also be a node for (NULL_REVISION, ())
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
468
        """
3228.4.4 by John Arbash Meinel
Change iter_ancestry to take a group instead of a single node,
469
        pending = set(revision_ids)
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
470
        processed = set()
471
        while pending:
472
            processed.update(pending)
473
            next_map = self.get_parent_map(pending)
474
            next_pending = set()
475
            for item in next_map.iteritems():
476
                yield item
477
                next_pending.update(p for p in item[1] if p not in processed)
478
            ghosts = pending.difference(next_map)
479
            for ghost in ghosts:
3228.4.10 by John Arbash Meinel
Respond to abentley's review comments.
480
                yield (ghost, None)
3228.4.2 by John Arbash Meinel
Add a Graph.iter_ancestry()
481
            pending = next_pending
482
2490.2.31 by Aaron Bentley
Fix iter_topo_order to permit un-included parents
483
    def iter_topo_order(self, revisions):
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
484
        """Iterate through the input revisions in topological order.
485
486
        This sorting only ensures that parents come before their children.
487
        An ancestor may sort after a descendant if the relationship is not
488
        visible in the supplied list of revisions.
489
        """
3099.3.3 by John Arbash Meinel
Deprecate get_parents() in favor of get_parent_map()
490
        sorter = tsort.TopoSorter(self.get_parent_map(revisions))
2490.2.34 by Aaron Bentley
Update NEWS and change implementation to return an iterator
491
        return sorter.iter_topo_order()
2490.2.30 by Aaron Bentley
Add functionality for tsorting graphs
492
2653.2.1 by Aaron Bentley
Implement Graph.is_ancestor
493
    def is_ancestor(self, candidate_ancestor, candidate_descendant):
2653.2.5 by Aaron Bentley
Update to clarify algorithm
494
        """Determine whether a revision is an ancestor of another.
495
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
496
        We answer this using heads() as heads() has the logic to perform the
3078.2.6 by Ian Clatworthy
fix efficiency of local commit detection as recommended by jameinel's review
497
        smallest number of parent lookups to determine the ancestral
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
498
        relationship between N revisions.
2653.2.5 by Aaron Bentley
Update to clarify algorithm
499
        """
2921.3.1 by Robert Collins
* Graph ``heads()`` queries have been bugfixed to no longer access all
500
        return set([candidate_descendant]) == self.heads(
501
            [candidate_ancestor, candidate_descendant])
2653.2.1 by Aaron Bentley
Implement Graph.is_ancestor
502
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
503
    def _search_for_extra_common(self, common, searchers):
504
        """Make sure that unique nodes are genuinely unique.
505
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
506
        After _find_border_ancestors, all nodes marked "common" are indeed
507
        common. Some of the nodes considered unique are not, due to history
508
        shortcuts stopping the searches early.
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
509
510
        We know that we have searched enough when all common search tips are
511
        descended from all unique (uncommon) nodes because we know that a node
512
        cannot be an ancestor of its own ancestor.
513
514
        :param common: A set of common nodes
515
        :param searchers: The searchers returned from _find_border_ancestors
516
        :return: None
517
        """
518
        # Basic algorithm...
519
        #   A) The passed in searchers should all be on the same tips, thus
520
        #      they should be considered the "common" searchers.
521
        #   B) We find the difference between the searchers, these are the
522
        #      "unique" nodes for each side.
523
        #   C) We do a quick culling so that we only start searching from the
524
        #      more interesting unique nodes. (A unique ancestor is more
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
525
        #      interesting than any of its children.)
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
526
        #   D) We start searching for ancestors common to all unique nodes.
527
        #   E) We have the common searchers stop searching any ancestors of
528
        #      nodes found by (D)
529
        #   F) When there are no more common search tips, we stop
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
530
531
        # TODO: We need a way to remove unique_searchers when they overlap with
532
        #       other unique searchers.
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
533
        assert len(searchers) == 2, (
534
            "Algorithm not yet implemented for > 2 searchers")
535
        common_searchers = searchers
536
        left_searcher = searchers[0]
537
        right_searcher = searchers[1]
3377.3.15 by John Arbash Meinel
minor update
538
        unique = left_searcher.seen.symmetric_difference(right_searcher.seen)
3377.3.17 by John Arbash Meinel
Keep track of the intersection of unique ancestry,
539
        if not unique: # No unique nodes, nothing to do
540
            return
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
541
        total_unique = len(unique)
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
542
        unique = self._remove_simple_descendants(unique,
543
                    self.get_parent_map(unique))
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
544
        simple_unique = len(unique)
3377.3.14 by John Arbash Meinel
Take another tack on _search_for_extra
545
546
        unique_searchers = []
547
        for revision_id in unique:
3377.3.15 by John Arbash Meinel
minor update
548
            if revision_id in left_searcher.seen:
3377.3.14 by John Arbash Meinel
Take another tack on _search_for_extra
549
                parent_searcher = left_searcher
550
            else:
551
                parent_searcher = right_searcher
552
            revs_to_search = parent_searcher.find_seen_ancestors([revision_id])
553
            if not revs_to_search: # XXX: This shouldn't be possible
554
                revs_to_search = [revision_id]
3377.3.15 by John Arbash Meinel
minor update
555
            searcher = self._make_breadth_first_searcher(revs_to_search)
556
            # We don't care about the starting nodes.
557
            searcher.step()
558
            unique_searchers.append(searcher)
3377.3.14 by John Arbash Meinel
Take another tack on _search_for_extra
559
3377.3.16 by John Arbash Meinel
small cleanups
560
        # possible todo: aggregate the common searchers into a single common
561
        #   searcher, just make sure that we include the nodes into the .seen
562
        #   properties of the original searchers
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
563
3377.3.17 by John Arbash Meinel
Keep track of the intersection of unique ancestry,
564
        ancestor_all_unique = None
565
        for searcher in unique_searchers:
566
            if ancestor_all_unique is None:
567
                ancestor_all_unique = set(searcher.seen)
568
            else:
569
                ancestor_all_unique = ancestor_all_unique.intersection(
570
                                            searcher.seen)
571
3377.3.19 by John Arbash Meinel
Start culling unique searchers once they converge.
572
        # Filter out searchers that don't actually search different nodes. We
573
        # already have the ancestry intersection for them
574
        next_unique_searchers = []
575
        unique_search_sets = set()
576
        for searcher in unique_searchers:
577
            will_search_set = frozenset(searcher._next_query)
578
            if will_search_set not in unique_search_sets:
579
                # This searcher is searching a unique set of nodes, let it
580
                unique_search_sets.add(will_search_set)
581
                next_unique_searchers.append(searcher)
582
        trace.mutter('Started %s unique searchers for %s unique revisions,'
583
                     ' %s with unique tips',
584
                     simple_unique, total_unique, len(next_unique_searchers))
585
        unique_searchers = next_unique_searchers
586
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
587
        while True: # If we have no more nodes we have nothing to do
588
            newly_seen_common = set()
589
            for searcher in common_searchers:
590
                newly_seen_common.update(searcher.step())
591
            newly_seen_unique = set()
592
            for searcher in unique_searchers:
593
                newly_seen_unique.update(searcher.step())
594
            new_common_unique = set()
595
            for revision in newly_seen_unique:
596
                for searcher in unique_searchers:
597
                    if revision not in searcher.seen:
598
                        break
599
                else:
600
                    # This is a border because it is a first common that we see
601
                    # after walking for a while.
602
                    new_common_unique.add(revision)
603
            if newly_seen_common:
604
                # These are nodes descended from one of the 'common' searchers.
605
                # Make sure all searchers are on the same page
606
                for searcher in common_searchers:
3377.3.16 by John Arbash Meinel
small cleanups
607
                    newly_seen_common.update(
608
                        searcher.find_seen_ancestors(newly_seen_common))
3377.3.14 by John Arbash Meinel
Take another tack on _search_for_extra
609
                # We start searching the whole ancestry. It is a bit wasteful,
610
                # though. We really just want to mark all of these nodes as
611
                # 'seen' and then start just the tips. However, it requires a
612
                # get_parent_map() call to figure out the tips anyway, and all
613
                # redundant requests should be fairly fast.
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
614
                for searcher in common_searchers:
615
                    searcher.start_searching(newly_seen_common)
3377.3.13 by John Arbash Meinel
Change _search_for_extra_common slightly.
616
3377.3.17 by John Arbash Meinel
Keep track of the intersection of unique ancestry,
617
                # If a 'common' node is an ancestor of all unique searchers, we
3377.3.13 by John Arbash Meinel
Change _search_for_extra_common slightly.
618
                # can stop searching it.
3377.3.17 by John Arbash Meinel
Keep track of the intersection of unique ancestry,
619
                stop_searching_common = ancestor_all_unique.intersection(
620
                                            newly_seen_common)
3377.3.13 by John Arbash Meinel
Change _search_for_extra_common slightly.
621
                if stop_searching_common:
622
                    for searcher in common_searchers:
623
                        searcher.stop_searching_any(stop_searching_common)
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
624
            if new_common_unique:
3377.3.20 by John Arbash Meinel
comment cleanups.
625
                # We found some ancestors that are common
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
626
                for searcher in unique_searchers:
3377.3.16 by John Arbash Meinel
small cleanups
627
                    new_common_unique.update(
628
                        searcher.find_seen_ancestors(new_common_unique))
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
629
                # Since these are common, we can grab another set of ancestors
630
                # that we have seen
631
                for searcher in common_searchers:
3377.3.16 by John Arbash Meinel
small cleanups
632
                    new_common_unique.update(
633
                        searcher.find_seen_ancestors(new_common_unique))
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
634
635
                # We can tell all of the unique searchers to start at these
636
                # nodes, and tell all of the common searchers to *stop*
637
                # searching these nodes
638
                for searcher in unique_searchers:
639
                    searcher.start_searching(new_common_unique)
640
                for searcher in common_searchers:
641
                    searcher.stop_searching_any(new_common_unique)
3377.3.17 by John Arbash Meinel
Keep track of the intersection of unique ancestry,
642
                ancestor_all_unique.update(new_common_unique)
3377.3.19 by John Arbash Meinel
Start culling unique searchers once they converge.
643
3377.3.20 by John Arbash Meinel
comment cleanups.
644
                # Filter out searchers that don't actually search different
645
                # nodes. We already have the ancestry intersection for them
3377.3.19 by John Arbash Meinel
Start culling unique searchers once they converge.
646
                next_unique_searchers = []
647
                unique_search_sets = set()
648
                for searcher in unique_searchers:
649
                    will_search_set = frozenset(searcher._next_query)
650
                    if will_search_set not in unique_search_sets:
651
                        # This searcher is searching a unique set of nodes, let it
652
                        unique_search_sets.add(will_search_set)
653
                        next_unique_searchers.append(searcher)
654
                unique_searchers = next_unique_searchers
3377.3.2 by John Arbash Meinel
find_difference is fixed by updating _find_border_ancestors.... is that reasonable?
655
            for searcher in common_searchers:
656
                if searcher._next_query:
657
                    break
658
            else:
659
                # All common searcher have stopped searching
3377.3.16 by John Arbash Meinel
small cleanups
660
                return
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
661
662
    def _remove_simple_descendants(self, revisions, parent_map):
663
        """remove revisions which are children of other ones in the set
664
665
        This doesn't do any graph searching, it just checks the immediate
666
        parent_map to find if there are any children which can be removed.
667
668
        :param revisions: A set of revision_ids
669
        :return: A set of revision_ids with the children removed
670
        """
671
        simple_ancestors = revisions.copy()
672
        # TODO: jam 20071214 we *could* restrict it to searching only the
673
        #       parent_map of revisions already present in 'revisions', but
674
        #       considering the general use case, I think this is actually
675
        #       better.
676
677
        # This is the same as the following loop. I don't know that it is any
678
        # faster.
679
        ## simple_ancestors.difference_update(r for r, p_ids in parent_map.iteritems()
680
        ##     if p_ids is not None and revisions.intersection(p_ids))
681
        ## return simple_ancestors
682
683
        # Yet Another Way, invert the parent map (which can be cached)
684
        ## descendants = {}
685
        ## for revision_id, parent_ids in parent_map.iteritems():
686
        ##   for p_id in parent_ids:
687
        ##       descendants.setdefault(p_id, []).append(revision_id)
688
        ## for revision in revisions.intersection(descendants):
689
        ##   simple_ancestors.difference_update(descendants[revision])
690
        ## return simple_ancestors
691
        for revision, parent_ids in parent_map.iteritems():
692
            if parent_ids is None:
693
                continue
694
            for parent_id in parent_ids:
695
                if parent_id in revisions:
696
                    # This node has a parent present in the set, so we can
697
                    # remove it
698
                    simple_ancestors.discard(revision)
699
                    break
700
        return simple_ancestors
701
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
702
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
703
class HeadsCache(object):
704
    """A cache of results for graph heads calls."""
705
706
    def __init__(self, graph):
707
        self.graph = graph
708
        self._heads = {}
709
710
    def heads(self, keys):
711
        """Return the heads of keys.
712
2911.4.3 by Robert Collins
Make the contract of HeadsCache.heads() more clear.
713
        This matches the API of Graph.heads(), specifically the return value is
714
        a set which can be mutated, and ordering of the input is not preserved
715
        in the output.
716
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
717
        :see also: Graph.heads.
718
        :param keys: The keys to calculate heads for.
719
        :return: A set containing the heads, which may be mutated without
720
            affecting future lookups.
721
        """
2911.4.2 by Robert Collins
Make HeadsCache actually work.
722
        keys = frozenset(keys)
2911.4.1 by Robert Collins
Factor out the Graph.heads() cache from _RevisionTextVersionCache for reuse, and use it in commit.
723
        try:
724
            return set(self._heads[keys])
725
        except KeyError:
726
            heads = self.graph.heads(keys)
727
            self._heads[keys] = heads
728
            return set(heads)
729
730
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
731
class FrozenHeadsCache(object):
732
    """Cache heads() calls, assuming the caller won't modify them."""
733
734
    def __init__(self, graph):
735
        self.graph = graph
736
        self._heads = {}
737
738
    def heads(self, keys):
739
        """Return the heads of keys.
740
3224.1.24 by John Arbash Meinel
Fix up docstring since FrozenHeadsCache doesn't let you mutate the result.
741
        Similar to Graph.heads(). The main difference is that the return value
742
        is a frozen set which cannot be mutated.
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
743
744
        :see also: Graph.heads.
745
        :param keys: The keys to calculate heads for.
3224.1.24 by John Arbash Meinel
Fix up docstring since FrozenHeadsCache doesn't let you mutate the result.
746
        :return: A frozenset containing the heads.
3224.1.20 by John Arbash Meinel
Reduce the number of cache misses by caching known heads answers
747
        """
748
        keys = frozenset(keys)
749
        try:
750
            return self._heads[keys]
751
        except KeyError:
752
            heads = frozenset(self.graph.heads(keys))
753
            self._heads[keys] = heads
754
            return heads
755
756
    def cache(self, keys, heads):
757
        """Store a known value."""
758
        self._heads[frozenset(keys)] = frozenset(heads)
759
760
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
761
class _BreadthFirstSearcher(object):
2921.3.4 by Robert Collins
Review feedback.
762
    """Parallel search breadth-first the ancestry of revisions.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
763
764
    This class implements the iterator protocol, but additionally
765
    1. provides a set of seen ancestors, and
766
    2. allows some ancestries to be unsearched, via stop_searching_any
767
    """
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
768
2490.2.22 by Aaron Bentley
Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher
769
    def __init__(self, revisions, parents_provider):
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
770
        self._iterations = 0
771
        self._next_query = set(revisions)
772
        self.seen = set()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
773
        self._started_keys = set(self._next_query)
774
        self._stopped_keys = set()
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
775
        self._parents_provider = parents_provider
3177.3.3 by Robert Collins
Review feedback.
776
        self._returning = 'next_with_ghosts'
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
777
        self._current_present = set()
778
        self._current_ghosts = set()
779
        self._current_parents = {}
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
780
781
    def __repr__(self):
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
782
        if self._iterations:
783
            prefix = "searching"
3099.3.1 by John Arbash Meinel
Implement get_parent_map for ParentProviders
784
        else:
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
785
            prefix = "starting"
786
        search = '%s=%r' % (prefix, list(self._next_query))
787
        return ('_BreadthFirstSearcher(iterations=%d, %s,'
788
                ' seen=%r)' % (self._iterations, search, list(self.seen)))
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
789
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
790
    def get_result(self):
791
        """Get a SearchResult for the current state of this searcher.
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
792
        
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
793
        :return: A SearchResult for this search so far. The SearchResult is
794
            static - the search can be advanced and the search result will not
795
            be invalidated or altered.
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
796
        """
797
        if self._returning == 'next':
798
            # We have to know the current nodes children to be able to list the
799
            # exclude keys for them. However, while we could have a second
800
            # look-ahead result buffer and shuffle things around, this method
801
            # is typically only called once per search - when memoising the
3211.5.1 by Robert Collins
Change the smart server get_parents method to take a graph search to exclude already recieved parents from. This prevents history shortcuts causing huge numbers of duplicates.
802
            # results of the search. 
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
803
            found, ghosts, next, parents = self._do_query(self._next_query)
804
            # pretend we didn't query: perhaps we should tweak _do_query to be
805
            # entirely stateless?
806
            self.seen.difference_update(next)
3184.1.3 by Robert Collins
Automatically exclude ghosts.
807
            next_query = next.union(ghosts)
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
808
        else:
809
            next_query = self._next_query
3184.1.5 by Robert Collins
Record the number of found revisions for cross checking.
810
        excludes = self._stopped_keys.union(next_query)
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
811
        included_keys = self.seen.difference(excludes)
812
        return SearchResult(self._started_keys, excludes, len(included_keys),
813
            included_keys)
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
814
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
815
    def step(self):
816
        try:
817
            return self.next()
818
        except StopIteration:
819
            return ()
820
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
821
    def next(self):
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
822
        """Return the next ancestors of this revision.
823
2490.2.12 by Aaron Bentley
Improve documentation
824
        Ancestors are returned in the order they are seen in a breadth-first
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
825
        traversal.  No ancestor will be returned more than once. Ancestors are
826
        returned before their parentage is queried, so ghosts and missing
827
        revisions (including the start revisions) are included in the result.
828
        This can save a round trip in LCA style calculation by allowing
829
        convergence to be detected without reading the data for the revision
830
        the convergence occurs on.
831
832
        :return: A set of revision_ids.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
833
        """
3177.3.3 by Robert Collins
Review feedback.
834
        if self._returning != 'next':
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
835
            # switch to returning the query, not the results.
3177.3.3 by Robert Collins
Review feedback.
836
            self._returning = 'next'
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
837
            self._iterations += 1
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
838
        else:
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
839
            self._advance()
840
        if len(self._next_query) == 0:
841
            raise StopIteration()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
842
        # We have seen what we're querying at this point as we are returning
843
        # the query, not the results.
844
        self.seen.update(self._next_query)
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
845
        return self._next_query
846
847
    def next_with_ghosts(self):
848
        """Return the next found ancestors, with ghosts split out.
849
        
850
        Ancestors are returned in the order they are seen in a breadth-first
851
        traversal.  No ancestor will be returned more than once. Ancestors are
3177.3.3 by Robert Collins
Review feedback.
852
        returned only after asking for their parents, which allows us to detect
853
        which revisions are ghosts and which are not.
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
854
855
        :return: A tuple with (present ancestors, ghost ancestors) sets.
856
        """
3177.3.3 by Robert Collins
Review feedback.
857
        if self._returning != 'next_with_ghosts':
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
858
            # switch to returning the results, not the current query.
3177.3.3 by Robert Collins
Review feedback.
859
            self._returning = 'next_with_ghosts'
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
860
            self._advance()
861
        if len(self._next_query) == 0:
862
            raise StopIteration()
863
        self._advance()
864
        return self._current_present, self._current_ghosts
865
866
    def _advance(self):
867
        """Advance the search.
868
869
        Updates self.seen, self._next_query, self._current_present,
3177.3.3 by Robert Collins
Review feedback.
870
        self._current_ghosts, self._current_parents and self._iterations.
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
871
        """
872
        self._iterations += 1
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
873
        found, ghosts, next, parents = self._do_query(self._next_query)
874
        self._current_present = found
875
        self._current_ghosts = ghosts
876
        self._next_query = next
877
        self._current_parents = parents
3184.1.3 by Robert Collins
Automatically exclude ghosts.
878
        # ghosts are implicit stop points, otherwise the search cannot be
879
        # repeated when ghosts are filled.
880
        self._stopped_keys.update(ghosts)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
881
882
    def _do_query(self, revisions):
883
        """Query for revisions.
884
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
885
        Adds revisions to the seen set.
886
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
887
        :param revisions: Revisions to query.
888
        :return: A tuple: (set(found_revisions), set(ghost_revisions),
889
           set(parents_of_found_revisions), dict(found_revisions:parents)).
890
        """
3377.3.9 by John Arbash Meinel
Small tweaks to _do_query
891
        found_revisions = set()
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
892
        parents_of_found = set()
3184.1.1 by Robert Collins
Add basic get_recipe to the graph breadth first searcher.
893
        # revisions may contain nodes that point to other nodes in revisions:
894
        # we want to filter them out.
895
        self.seen.update(revisions)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
896
        parent_map = self._parents_provider.get_parent_map(revisions)
3377.3.9 by John Arbash Meinel
Small tweaks to _do_query
897
        found_revisions.update(parent_map)
3177.3.1 by Robert Collins
* New method ``next_with_ghosts`` on the Graph breadth-first-search objects
898
        for rev_id, parents in parent_map.iteritems():
3377.3.9 by John Arbash Meinel
Small tweaks to _do_query
899
            new_found_parents = [p for p in parents if p not in self.seen]
900
            if new_found_parents:
901
                # Calling set.update() with an empty generator is actually
902
                # rather expensive.
903
                parents_of_found.update(new_found_parents)
904
        ghost_revisions = revisions - found_revisions
905
        return found_revisions, ghost_revisions, parents_of_found, parent_map
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
906
2490.2.8 by Aaron Bentley
fix iteration stuff
907
    def __iter__(self):
908
        return self
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
909
3377.3.1 by John Arbash Meinel
Bring in some of the changes from graph_update and graph_optimization
910
    def find_seen_ancestors(self, revisions):
911
        """Find ancestors of these revisions that have already been seen."""
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
912
        all_seen = self.seen
913
        pending = set(revisions).intersection(all_seen)
914
        seen_ancestors = set(pending)
915
916
        if self._returning == 'next':
917
            # self.seen contains what nodes have been returned, not what nodes
918
            # have been queried. We don't want to probe for nodes that haven't
919
            # been searched yet.
920
            not_searched_yet = self._next_query
921
        else:
922
            not_searched_yet = ()
3377.3.11 by John Arbash Meinel
Committing a debug thunk that was very helpful
923
        pending.difference_update(not_searched_yet)
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
924
        get_parent_map = self._parents_provider.get_parent_map
3377.3.12 by John Arbash Meinel
Remove the helpful but ugly thunk
925
        while pending:
926
            parent_map = get_parent_map(pending)
927
            all_parents = []
928
            # We don't care if it is a ghost, since it can't be seen if it is
929
            # a ghost
930
            for parent_ids in parent_map.itervalues():
931
                all_parents.extend(parent_ids)
932
            next_pending = all_seen.intersection(all_parents).difference(seen_ancestors)
933
            seen_ancestors.update(next_pending)
934
            next_pending.difference_update(not_searched_yet)
935
            pending = next_pending
3377.3.10 by John Arbash Meinel
Tweak _BreadthFirstSearcher.find_seen_ancestors()
936
2490.2.7 by Aaron Bentley
Start implementing mca that scales with number of uncommon ancestors
937
        return seen_ancestors
938
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
939
    def stop_searching_any(self, revisions):
940
        """
941
        Remove any of the specified revisions from the search list.
942
943
        None of the specified revisions are required to be present in the
2490.2.12 by Aaron Bentley
Improve documentation
944
        search list.  In this case, the call is a no-op.
2490.2.10 by Aaron Bentley
Clarify text, remove unused _get_ancestry method
945
        """
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
946
        revisions = frozenset(revisions)
3177.3.3 by Robert Collins
Review feedback.
947
        if self._returning == 'next':
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
948
            stopped = self._next_query.intersection(revisions)
949
            self._next_query = self._next_query.difference(revisions)
950
        else:
3184.2.1 by Robert Collins
Handle stopping ghosts in searches properly.
951
            stopped_present = self._current_present.intersection(revisions)
952
            stopped = stopped_present.union(
953
                self._current_ghosts.intersection(revisions))
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
954
            self._current_present.difference_update(stopped)
955
            self._current_ghosts.difference_update(stopped)
956
            # stopping 'x' should stop returning parents of 'x', but 
957
            # not if 'y' always references those same parents
958
            stop_rev_references = {}
3184.2.1 by Robert Collins
Handle stopping ghosts in searches properly.
959
            for rev in stopped_present:
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
960
                for parent_id in self._current_parents[rev]:
961
                    if parent_id not in stop_rev_references:
962
                        stop_rev_references[parent_id] = 0
963
                    stop_rev_references[parent_id] += 1
964
            # if only the stopped revisions reference it, the ref count will be
965
            # 0 after this loop
3177.3.3 by Robert Collins
Review feedback.
966
            for parents in self._current_parents.itervalues():
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
967
                for parent_id in parents:
968
                    try:
969
                        stop_rev_references[parent_id] -= 1
970
                    except KeyError:
971
                        pass
972
            stop_parents = set()
973
            for rev_id, refs in stop_rev_references.iteritems():
974
                if refs == 0:
975
                    stop_parents.add(rev_id)
976
            self._next_query.difference_update(stop_parents)
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
977
        self._stopped_keys.update(stopped)
2490.2.25 by Aaron Bentley
Update from review
978
        return stopped
2490.2.17 by Aaron Bentley
Add start_searching, tweak stop_searching_any
979
980
    def start_searching(self, revisions):
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
981
        """Add revisions to the search.
982
983
        The parents of revisions will be returned from the next call to next()
984
        or next_with_ghosts(). If next_with_ghosts was the most recently used
985
        next* call then the return value is the result of looking up the
986
        ghost/not ghost status of revisions. (A tuple (present, ghosted)).
987
        """
988
        revisions = frozenset(revisions)
3184.1.2 by Robert Collins
Add tests for starting and stopping searches in combination with get_recipe.
989
        self._started_keys.update(revisions)
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
990
        new_revisions = revisions.difference(self.seen)
991
        revs, ghosts, query, parents = self._do_query(revisions)
992
        self._stopped_keys.update(ghosts)
3177.3.3 by Robert Collins
Review feedback.
993
        if self._returning == 'next':
3184.1.4 by Robert Collins
Correctly exclude ghosts when ghosts are started on an existing search.
994
            self._next_query.update(new_revisions)
3177.3.2 by Robert Collins
Update graph searchers stop_searching_any and start_searching for next_with_ghosts.
995
        else:
996
            # perform a query on revisions
997
            self._current_present.update(revs)
998
            self._current_ghosts.update(ghosts)
999
            self._next_query.update(query)
1000
            self._current_parents.update(parents)
1001
            return revs, ghosts
3184.1.6 by Robert Collins
Create a SearchResult object which can be used as a replacement for sets.
1002
1003
1004
class SearchResult(object):
1005
    """The result of a breadth first search.
1006
1007
    A SearchResult provides the ability to reconstruct the search or access a
1008
    set of the keys the search found.
1009
    """
1010
1011
    def __init__(self, start_keys, exclude_keys, key_count, keys):
1012
        """Create a SearchResult.
1013
1014
        :param start_keys: The keys the search started at.
1015
        :param exclude_keys: The keys the search excludes.
1016
        :param key_count: The total number of keys (from start to but not
1017
            including exclude).
1018
        :param keys: The keys the search found. Note that in future we may get
1019
            a SearchResult from a smart server, in which case the keys list is
1020
            not necessarily immediately available.
1021
        """
1022
        self._recipe = (start_keys, exclude_keys, key_count)
1023
        self._keys = frozenset(keys)
1024
1025
    def get_recipe(self):
1026
        """Return a recipe that can be used to replay this search.
1027
        
1028
        The recipe allows reconstruction of the same results at a later date
1029
        without knowing all the found keys. The essential elements are a list
1030
        of keys to start and and to stop at. In order to give reproducible
1031
        results when ghosts are encountered by a search they are automatically
1032
        added to the exclude list (or else ghost filling may alter the
1033
        results).
1034
1035
        :return: A tuple (start_keys_set, exclude_keys_set, revision_count). To
1036
            recreate the results of this search, create a breadth first
1037
            searcher on the same graph starting at start_keys. Then call next()
1038
            (or next_with_ghosts()) repeatedly, and on every result, call
1039
            stop_searching_any on any keys from the exclude_keys set. The
1040
            revision_count value acts as a trivial cross-check - the found
1041
            revisions of the new search should have as many elements as
1042
            revision_count. If it does not, then additional revisions have been
1043
            ghosted since the search was executed the first time and the second
1044
            time.
1045
        """
1046
        return self._recipe
1047
1048
    def get_keys(self):
1049
        """Return the keys found in this search.
1050
1051
        :return: A set of keys.
1052
        """
1053
        return self._keys
1054