bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
| 
4110.2.1
by Martin Pool
 Remove duplicated code from InterRepository implementations.  | 
1  | 
# Copyright (C) 2005, 2006, 2007, 2008, 2009 Canonical Ltd
 | 
| 
1887.1.1
by Adeodato Simó
 Do not separate paragraphs in the copyright statement with blank lines,  | 
2  | 
#
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
3  | 
# This program is free software; you can redistribute it and/or modify
 | 
4  | 
# it under the terms of the GNU General Public License as published by
 | 
|
5  | 
# the Free Software Foundation; either version 2 of the License, or
 | 
|
6  | 
# (at your option) any later version.
 | 
|
| 
1887.1.1
by Adeodato Simó
 Do not separate paragraphs in the copyright statement with blank lines,  | 
7  | 
#
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
8  | 
# This program is distributed in the hope that it will be useful,
 | 
9  | 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
|
10  | 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
|
11  | 
# GNU General Public License for more details.
 | 
|
| 
1887.1.1
by Adeodato Simó
 Do not separate paragraphs in the copyright statement with blank lines,  | 
12  | 
#
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
13  | 
# You should have received a copy of the GNU General Public License
 | 
14  | 
# along with this program; if not, write to the Free Software
 | 
|
| 
4183.7.1
by Sabin Iacob
 update FSF mailing address  | 
15  | 
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 | 
| 
1185.65.10
by Robert Collins
 Rename Controlfiles to LockableFiles.  | 
16  | 
|
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
17  | 
from bzrlib.lazy_import import lazy_import  | 
18  | 
lazy_import(globals(), """  | 
|
| 
4232.2.1
by Vincent Ladeuil
 Stop-gap fix for Repository.get_revision_xml.  | 
19  | 
import cStringIO
 | 
| 
1740.3.7
by Jelmer Vernooij
 Move committer, log, revprops, timestamp and timezone to CommitBuilder.  | 
20  | 
import re
 | 
21  | 
import time
 | 
|
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
22  | 
|
| 
1910.2.22
by Aaron Bentley
 Make commits preserve root entry data  | 
23  | 
from bzrlib import (
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
24  | 
    bzrdir,
 | 
25  | 
    check,
 | 
|
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
26  | 
    chk_map,
 | 
| 
2745.1.1
by Robert Collins
 Add a number of -Devil checkpoints.  | 
27  | 
    debug,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
28  | 
    errors,
 | 
| 
3882.6.23
by John Arbash Meinel
 Change the XMLSerializer.read_inventory_from_string api.  | 
29  | 
    fifo_cache,
 | 
| 
2116.4.1
by John Arbash Meinel
 Update file and revision id generators.  | 
30  | 
    generate_ids,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
31  | 
    gpg,
 | 
32  | 
    graph,
 | 
|
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
33  | 
    inventory,
 | 
| 
2163.2.1
by John Arbash Meinel
 Speed up the fileids_altered_by_revision_ids processing  | 
34  | 
    lazy_regex,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
35  | 
    lockable_files,
 | 
36  | 
    lockdir,
 | 
|
| 
2988.1.5
by Robert Collins
 Use a LRU cache when generating the text index to reduce inventory deserialisations.  | 
37  | 
    lru_cache,
 | 
| 
1910.2.22
by Aaron Bentley
 Make commits preserve root entry data  | 
38  | 
    osutils,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
39  | 
    revision as _mod_revision,
 | 
40  | 
    symbol_versioning,
 | 
|
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
41  | 
    tsort,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
42  | 
    ui,
 | 
| 
3831.2.1
by Andrew Bennetts
 Quick hack to do batching in InterDifferingSerializer. Almost halves the HPSS round-trips fetching pack-0.92-subtree to 1.9-rich-root.  | 
43  | 
    versionedfile,
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
44  | 
    )
 | 
| 
2520.4.54
by Aaron Bentley
 Hang a create_bundle method off repository  | 
45  | 
from bzrlib.bundle import serializer
 | 
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
46  | 
from bzrlib.revisiontree import RevisionTree
 | 
47  | 
from bzrlib.store.versioned import VersionedFileStore
 | 
|
48  | 
from bzrlib.testament import Testament
 | 
|
49  | 
""")  | 
|
50  | 
||
| 
1534.4.28
by Robert Collins
 first cut at merge from integration.  | 
51  | 
from bzrlib.decorators import needs_read_lock, needs_write_lock  | 
| 
1563.2.12
by Robert Collins
 Checkpointing: created InterObject to factor out common inter object worker code, added InterVersionedFile and tests to allow making join work between any versionedfile.  | 
52  | 
from bzrlib.inter import InterObject  | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
53  | 
from bzrlib.inventory import (  | 
54  | 
Inventory,  | 
|
55  | 
InventoryDirectory,  | 
|
56  | 
ROOT_ID,  | 
|
57  | 
entry_factory,  | 
|
58  | 
    )
 | 
|
| 
4032.3.1
by Robert Collins
 Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls.  | 
59  | 
from bzrlib import registry  | 
| 
3825.4.1
by Andrew Bennetts
 Add suppress_errors to abort_write_group.  | 
60  | 
from bzrlib.trace import (  | 
61  | 
log_exception_quietly, note, mutter, mutter_callsite, warning)  | 
|
| 
1185.70.3
by Martin Pool
 Various updates to make storage branch mergeable:  | 
62  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
63  | 
|
| 
1904.2.5
by Martin Pool
 Fix format warning inside test suite and add test  | 
64  | 
# Old formats display a warning, but only once
 | 
65  | 
_deprecation_warning_done = False  | 
|
66  | 
||
67  | 
||
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
68  | 
class CommitBuilder(object):  | 
69  | 
"""Provides an interface to build up a commit.  | 
|
70  | 
||
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
71  | 
    This allows describing a tree to be committed without needing to
 | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
72  | 
    know the internals of the format of the repository.
 | 
73  | 
    """
 | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
74  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
75  | 
    # all clients should supply tree roots.
 | 
76  | 
record_root_entry = True  | 
|
| 
2825.5.2
by Robert Collins
 Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately.  | 
77  | 
    # the default CommitBuilder does not manage trees whose root is versioned.
 | 
78  | 
_versioned_root = False  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
79  | 
|
| 
2979.2.2
by Robert Collins
 Per-file graph heads detection during commit for pack repositories.  | 
80  | 
def __init__(self, repository, parents, config, timestamp=None,  | 
81  | 
timezone=None, committer=None, revprops=None,  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
82  | 
revision_id=None):  | 
83  | 
"""Initiate a CommitBuilder.  | 
|
84  | 
||
85  | 
        :param repository: Repository to commit to.
 | 
|
86  | 
        :param parents: Revision ids of the parents of the new revision.
 | 
|
87  | 
        :param config: Configuration to use.
 | 
|
88  | 
        :param timestamp: Optional timestamp recorded for commit.
 | 
|
89  | 
        :param timezone: Optional timezone for timestamp.
 | 
|
90  | 
        :param committer: Optional committer to set for commit.
 | 
|
91  | 
        :param revprops: Optional dictionary of revision properties.
 | 
|
92  | 
        :param revision_id: Optional revision id.
 | 
|
93  | 
        """
 | 
|
94  | 
self._config = config  | 
|
95  | 
||
96  | 
if committer is None:  | 
|
97  | 
self._committer = self._config.username()  | 
|
98  | 
else:  | 
|
99  | 
self._committer = committer  | 
|
100  | 
||
101  | 
self.new_inventory = Inventory(None)  | 
|
| 
2858.2.1
by Martin Pool
 Remove most calls to safe_file_id and safe_revision_id.  | 
102  | 
self._new_revision_id = revision_id  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
103  | 
self.parents = parents  | 
104  | 
self.repository = repository  | 
|
105  | 
||
106  | 
self._revprops = {}  | 
|
107  | 
if revprops is not None:  | 
|
| 
3831.1.1
by John Arbash Meinel
 Before allowing commit to succeed, verify the texts will be 'safe'.  | 
108  | 
self._validate_revprops(revprops)  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
109  | 
self._revprops.update(revprops)  | 
110  | 
||
111  | 
if timestamp is None:  | 
|
112  | 
timestamp = time.time()  | 
|
113  | 
        # Restrict resolution to 1ms
 | 
|
114  | 
self._timestamp = round(timestamp, 3)  | 
|
115  | 
||
116  | 
if timezone is None:  | 
|
117  | 
self._timezone = osutils.local_time_offset()  | 
|
118  | 
else:  | 
|
119  | 
self._timezone = int(timezone)  | 
|
120  | 
||
121  | 
self._generate_revision_if_needed()  | 
|
| 
2979.2.5
by Robert Collins
 Make CommitBuilder.heads be _heads as its internal to CommitBuilder only.  | 
122  | 
self.__heads = graph.HeadsCache(repository.get_graph()).heads  | 
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
123  | 
self._basis_delta = []  | 
124  | 
        # API compatibility, older code that used CommitBuilder did not call
 | 
|
125  | 
        # .record_delete(), which means the delta that is computed would not be
 | 
|
126  | 
        # valid. Callers that will call record_delete() should call
 | 
|
127  | 
        # .will_record_deletes() to indicate that.
 | 
|
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
128  | 
self._recording_deletes = False  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
129  | 
        # memo'd check for no-op commits.
 | 
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
130  | 
self._any_changes = False  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
131  | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
132  | 
def any_changes(self):  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
133  | 
"""Return True if any entries were changed.  | 
134  | 
        
 | 
|
135  | 
        This includes merge-only changes. It is the core for the --unchanged
 | 
|
136  | 
        detection in commit.
 | 
|
137  | 
||
138  | 
        :return: True if any changes have occured.
 | 
|
139  | 
        """
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
140  | 
return self._any_changes  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
141  | 
|
| 
3831.1.1
by John Arbash Meinel
 Before allowing commit to succeed, verify the texts will be 'safe'.  | 
142  | 
def _validate_unicode_text(self, text, context):  | 
143  | 
"""Verify things like commit messages don't have bogus characters."""  | 
|
144  | 
if '\r' in text:  | 
|
145  | 
raise ValueError('Invalid value for %s: %r' % (context, text))  | 
|
146  | 
||
147  | 
def _validate_revprops(self, revprops):  | 
|
148  | 
for key, value in revprops.iteritems():  | 
|
149  | 
            # We know that the XML serializers do not round trip '\r'
 | 
|
150  | 
            # correctly, so refuse to accept them
 | 
|
| 
3831.1.5
by John Arbash Meinel
 It seems we have some direct tests that don't use strings and expect a value error as well.  | 
151  | 
if not isinstance(value, basestring):  | 
152  | 
raise ValueError('revision property (%s) is not a valid'  | 
|
153  | 
' (unicode) string: %r' % (key, value))  | 
|
| 
3831.1.1
by John Arbash Meinel
 Before allowing commit to succeed, verify the texts will be 'safe'.  | 
154  | 
self._validate_unicode_text(value,  | 
155  | 
'revision property (%s)' % (key,))  | 
|
156  | 
||
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
157  | 
def commit(self, message):  | 
158  | 
"""Make the actual commit.  | 
|
159  | 
||
160  | 
        :return: The revision id of the recorded revision.
 | 
|
161  | 
        """
 | 
|
| 
3831.1.1
by John Arbash Meinel
 Before allowing commit to succeed, verify the texts will be 'safe'.  | 
162  | 
self._validate_unicode_text(message, 'commit message')  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
163  | 
rev = _mod_revision.Revision(  | 
164  | 
timestamp=self._timestamp,  | 
|
165  | 
timezone=self._timezone,  | 
|
166  | 
committer=self._committer,  | 
|
167  | 
message=message,  | 
|
168  | 
inventory_sha1=self.inv_sha1,  | 
|
169  | 
revision_id=self._new_revision_id,  | 
|
170  | 
properties=self._revprops)  | 
|
171  | 
rev.parent_ids = self.parents  | 
|
172  | 
self.repository.add_revision(self._new_revision_id, rev,  | 
|
173  | 
self.new_inventory, self._config)  | 
|
174  | 
self.repository.commit_write_group()  | 
|
175  | 
return self._new_revision_id  | 
|
176  | 
||
177  | 
def abort(self):  | 
|
178  | 
"""Abort the commit that is being built.  | 
|
179  | 
        """
 | 
|
180  | 
self.repository.abort_write_group()  | 
|
181  | 
||
182  | 
def revision_tree(self):  | 
|
183  | 
"""Return the tree that was just committed.  | 
|
184  | 
||
185  | 
        After calling commit() this can be called to get a RevisionTree
 | 
|
186  | 
        representing the newly committed tree. This is preferred to
 | 
|
187  | 
        calling Repository.revision_tree() because that may require
 | 
|
188  | 
        deserializing the inventory, while we already have a copy in
 | 
|
189  | 
        memory.
 | 
|
190  | 
        """
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
191  | 
if self.new_inventory is None:  | 
192  | 
self.new_inventory = self.repository.get_inventory(  | 
|
193  | 
self._new_revision_id)  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
194  | 
return RevisionTree(self.repository, self.new_inventory,  | 
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
195  | 
self._new_revision_id)  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
196  | 
|
197  | 
def finish_inventory(self):  | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
198  | 
"""Tell the builder that the inventory is finished.  | 
| 
3735.2.163
by John Arbash Meinel
 Merge bzr.dev 4187, and revert the change to fix refcycle issues.  | 
199  | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
200  | 
        :return: The inventory id in the repository, which can be used with
 | 
201  | 
            repository.get_inventory.
 | 
|
202  | 
        """
 | 
|
203  | 
if self.new_inventory is None:  | 
|
204  | 
            # an inventory delta was accumulated without creating a new
 | 
|
| 
3735.2.12
by Robert Collins
 Implement commit-via-deltas for split inventory repositories.  | 
205  | 
            # inventory.
 | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
206  | 
basis_id = self.basis_delta_revision  | 
207  | 
self.inv_sha1 = self.repository.add_inventory_by_delta(  | 
|
208  | 
basis_id, self._basis_delta, self._new_revision_id,  | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
209  | 
self.parents)  | 
| 
3735.2.12
by Robert Collins
 Implement commit-via-deltas for split inventory repositories.  | 
210  | 
else:  | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
211  | 
if self.new_inventory.root is None:  | 
212  | 
raise AssertionError('Root entry should be supplied to'  | 
|
213  | 
' record_entry_contents, as of bzr 0.10.')  | 
|
214  | 
self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))  | 
|
215  | 
self.new_inventory.revision_id = self._new_revision_id  | 
|
216  | 
self.inv_sha1 = self.repository.add_inventory(  | 
|
217  | 
self._new_revision_id,  | 
|
218  | 
self.new_inventory,  | 
|
219  | 
self.parents  | 
|
220  | 
                )
 | 
|
221  | 
return self._new_revision_id  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
222  | 
|
223  | 
def _gen_revision_id(self):  | 
|
224  | 
"""Return new revision-id."""  | 
|
225  | 
return generate_ids.gen_revision_id(self._config.username(),  | 
|
226  | 
self._timestamp)  | 
|
227  | 
||
228  | 
def _generate_revision_if_needed(self):  | 
|
229  | 
"""Create a revision id if None was supplied.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
230  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
231  | 
        If the repository can not support user-specified revision ids
 | 
232  | 
        they should override this function and raise CannotSetRevisionId
 | 
|
233  | 
        if _new_revision_id is not None.
 | 
|
234  | 
||
235  | 
        :raises: CannotSetRevisionId
 | 
|
236  | 
        """
 | 
|
237  | 
if self._new_revision_id is None:  | 
|
238  | 
self._new_revision_id = self._gen_revision_id()  | 
|
239  | 
self.random_revid = True  | 
|
240  | 
else:  | 
|
241  | 
self.random_revid = False  | 
|
242  | 
||
| 
2979.2.5
by Robert Collins
 Make CommitBuilder.heads be _heads as its internal to CommitBuilder only.  | 
243  | 
def _heads(self, file_id, revision_ids):  | 
| 
2979.2.1
by Robert Collins
 Make it possible for different commit builders to override heads().  | 
244  | 
"""Calculate the graph heads for revision_ids in the graph of file_id.  | 
245  | 
||
246  | 
        This can use either a per-file graph or a global revision graph as we
 | 
|
247  | 
        have an identity relationship between the two graphs.
 | 
|
248  | 
        """
 | 
|
| 
2979.2.5
by Robert Collins
 Make CommitBuilder.heads be _heads as its internal to CommitBuilder only.  | 
249  | 
return self.__heads(revision_ids)  | 
| 
2979.2.1
by Robert Collins
 Make it possible for different commit builders to override heads().  | 
250  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
251  | 
def _check_root(self, ie, parent_invs, tree):  | 
252  | 
"""Helper for record_entry_contents.  | 
|
253  | 
||
254  | 
        :param ie: An entry being added.
 | 
|
255  | 
        :param parent_invs: The inventories of the parent revisions of the
 | 
|
256  | 
            commit.
 | 
|
257  | 
        :param tree: The tree that is being committed.
 | 
|
258  | 
        """
 | 
|
| 
2871.1.2
by Robert Collins
 * ``CommitBuilder.record_entry_contents`` now requires the root entry of a  | 
259  | 
        # In this revision format, root entries have no knit or weave When
 | 
260  | 
        # serializing out to disk and back in root.revision is always
 | 
|
261  | 
        # _new_revision_id
 | 
|
262  | 
ie.revision = self._new_revision_id  | 
|
| 
2818.3.1
by Robert Collins
 Change CommitBuilder factory delegation to allow simple declaration.  | 
263  | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
264  | 
def _require_root_change(self, tree):  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
265  | 
"""Enforce an appropriate root object change.  | 
266  | 
||
267  | 
        This is called once when record_iter_changes is called, if and only if
 | 
|
268  | 
        the root was not in the delta calculated by record_iter_changes.
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
269  | 
|
270  | 
        :param tree: The tree which is being committed.
 | 
|
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
271  | 
        """
 | 
272  | 
        # NB: if there are no parents then this method is not called, so no
 | 
|
273  | 
        # need to guard on parents having length.
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
274  | 
entry = entry_factory['directory'](tree.path2id(''), '',  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
275  | 
None)  | 
276  | 
entry.revision = self._new_revision_id  | 
|
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
277  | 
self._basis_delta.append(('', '', entry.file_id, entry))  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
278  | 
|
| 
2871.1.4
by Robert Collins
 Merge bzr.dev.  | 
279  | 
def _get_delta(self, ie, basis_inv, path):  | 
280  | 
"""Get a delta against the basis inventory for ie."""  | 
|
281  | 
if ie.file_id not in basis_inv:  | 
|
282  | 
            # add
 | 
|
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
283  | 
result = (None, path, ie.file_id, ie)  | 
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
284  | 
self._basis_delta.append(result)  | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
285  | 
return result  | 
| 
2871.1.4
by Robert Collins
 Merge bzr.dev.  | 
286  | 
elif ie != basis_inv[ie.file_id]:  | 
287  | 
            # common but altered
 | 
|
288  | 
            # TODO: avoid tis id2path call.
 | 
|
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
289  | 
result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)  | 
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
290  | 
self._basis_delta.append(result)  | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
291  | 
return result  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
292  | 
else:  | 
| 
2871.1.4
by Robert Collins
 Merge bzr.dev.  | 
293  | 
            # common, unaltered
 | 
294  | 
return None  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
295  | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
296  | 
def get_basis_delta(self):  | 
297  | 
"""Return the complete inventory delta versus the basis inventory.  | 
|
298  | 
||
299  | 
        This has been built up with the calls to record_delete and
 | 
|
300  | 
        record_entry_contents. The client must have already called
 | 
|
301  | 
        will_record_deletes() to indicate that they will be generating a
 | 
|
302  | 
        complete delta.
 | 
|
303  | 
||
304  | 
        :return: An inventory delta, suitable for use with apply_delta, or
 | 
|
305  | 
            Repository.add_inventory_by_delta, etc.
 | 
|
306  | 
        """
 | 
|
307  | 
if not self._recording_deletes:  | 
|
308  | 
raise AssertionError("recording deletes not activated.")  | 
|
309  | 
return self._basis_delta  | 
|
310  | 
||
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
311  | 
def record_delete(self, path, file_id):  | 
312  | 
"""Record that a delete occured against a basis tree.  | 
|
313  | 
||
314  | 
        This is an optional API - when used it adds items to the basis_delta
 | 
|
315  | 
        being accumulated by the commit builder. It cannot be called unless the
 | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
316  | 
        method will_record_deletes() has been called to inform the builder that
 | 
317  | 
        a delta is being supplied.
 | 
|
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
318  | 
|
319  | 
        :param path: The path of the thing deleted.
 | 
|
320  | 
        :param file_id: The file id that was deleted.
 | 
|
321  | 
        """
 | 
|
322  | 
if not self._recording_deletes:  | 
|
323  | 
raise AssertionError("recording deletes not activated.")  | 
|
| 
3879.2.5
by John Arbash Meinel
 Change record_delete() to return the delta.  | 
324  | 
delta = (path, None, file_id, None)  | 
325  | 
self._basis_delta.append(delta)  | 
|
| 
4183.5.5
by Robert Collins
 Enable record_iter_changes for cases where it can work.  | 
326  | 
self._any_changes = True  | 
| 
3879.2.5
by John Arbash Meinel
 Change record_delete() to return the delta.  | 
327  | 
return delta  | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
328  | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
329  | 
def will_record_deletes(self):  | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
330  | 
"""Tell the commit builder that deletes are being notified.  | 
331  | 
||
332  | 
        This enables the accumulation of an inventory delta; for the resulting
 | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
333  | 
        commit to be valid, deletes against the basis MUST be recorded via
 | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
334  | 
        builder.record_delete().
 | 
335  | 
        """
 | 
|
336  | 
self._recording_deletes = True  | 
|
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
337  | 
try:  | 
338  | 
basis_id = self.parents[0]  | 
|
339  | 
except IndexError:  | 
|
340  | 
basis_id = _mod_revision.NULL_REVISION  | 
|
341  | 
self.basis_delta_revision = basis_id  | 
|
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
342  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
343  | 
def record_entry_contents(self, ie, parent_invs, path, tree,  | 
344  | 
content_summary):  | 
|
345  | 
"""Record the content of ie from tree into the commit if needed.  | 
|
346  | 
||
347  | 
        Side effect: sets ie.revision when unchanged
 | 
|
348  | 
||
349  | 
        :param ie: An inventory entry present in the commit.
 | 
|
350  | 
        :param parent_invs: The inventories of the parent revisions of the
 | 
|
351  | 
            commit.
 | 
|
352  | 
        :param path: The path the entry is at in the tree.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
353  | 
        :param tree: The tree which contains this entry and should be used to
 | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
354  | 
            obtain content.
 | 
355  | 
        :param content_summary: Summary data from the tree about the paths
 | 
|
356  | 
            content - stat, length, exec, sha/link target. This is only
 | 
|
357  | 
            accessed when the entry has a revision of None - that is when it is
 | 
|
358  | 
            a candidate to commit.
 | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
359  | 
        :return: A tuple (change_delta, version_recorded, fs_hash).
 | 
360  | 
            change_delta is an inventory_delta change for this entry against
 | 
|
361  | 
            the basis tree of the commit, or None if no change occured against
 | 
|
362  | 
            the basis tree.
 | 
|
| 
2871.1.3
by Robert Collins
 * The CommitBuilder method ``record_entry_contents`` now returns summary  | 
363  | 
            version_recorded is True if a new version of the entry has been
 | 
364  | 
            recorded. For instance, committing a merge where a file was only
 | 
|
365  | 
            changed on the other side will return (delta, False).
 | 
|
| 
3709.3.3
by Robert Collins
 NEWS for the record_entry_contents change.  | 
366  | 
            fs_hash is either None, or the hash details for the path (currently
 | 
367  | 
            a tuple of the contents sha1 and the statvalue returned by
 | 
|
368  | 
            tree.get_file_with_stat()).
 | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
369  | 
        """
 | 
370  | 
if self.new_inventory.root is None:  | 
|
| 
2871.1.2
by Robert Collins
 * ``CommitBuilder.record_entry_contents`` now requires the root entry of a  | 
371  | 
if ie.parent_id is not None:  | 
372  | 
raise errors.RootMissing()  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
373  | 
self._check_root(ie, parent_invs, tree)  | 
374  | 
if ie.revision is None:  | 
|
375  | 
kind = content_summary[0]  | 
|
376  | 
else:  | 
|
377  | 
            # ie is carried over from a prior commit
 | 
|
378  | 
kind = ie.kind  | 
|
379  | 
        # XXX: repository specific check for nested tree support goes here - if
 | 
|
380  | 
        # the repo doesn't want nested trees we skip it ?
 | 
|
381  | 
if (kind == 'tree-reference' and  | 
|
382  | 
not self.repository._format.supports_tree_reference):  | 
|
383  | 
            # mismatch between commit builder logic and repository:
 | 
|
384  | 
            # this needs the entry creation pushed down into the builder.
 | 
|
| 
2776.4.18
by Robert Collins
 Review feedback.  | 
385  | 
raise NotImplementedError('Missing repository subtree support.')  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
386  | 
self.new_inventory.add(ie)  | 
387  | 
||
| 
2871.1.3
by Robert Collins
 * The CommitBuilder method ``record_entry_contents`` now returns summary  | 
388  | 
        # TODO: slow, take it out of the inner loop.
 | 
389  | 
try:  | 
|
390  | 
basis_inv = parent_invs[0]  | 
|
391  | 
except IndexError:  | 
|
392  | 
basis_inv = Inventory(root_id=None)  | 
|
393  | 
||
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
394  | 
        # ie.revision is always None if the InventoryEntry is considered
 | 
| 
2776.4.13
by Robert Collins
 Merge bzr.dev.  | 
395  | 
        # for committing. We may record the previous parents revision if the
 | 
396  | 
        # content is actually unchanged against a sole head.
 | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
397  | 
if ie.revision is not None:  | 
| 
2903.2.5
by Martin Pool
 record_entry_contents should give back deltas for changed roots; clean it up a bit  | 
398  | 
if not self._versioned_root and path == '':  | 
| 
2871.1.3
by Robert Collins
 * The CommitBuilder method ``record_entry_contents`` now returns summary  | 
399  | 
                # repositories that do not version the root set the root's
 | 
| 
3775.2.2
by Robert Collins
 Teach CommitBuilder to accumulate inventory deltas.  | 
400  | 
                # revision to the new commit even when no change occurs (more
 | 
401  | 
                # specifically, they do not record a revision on the root; and
 | 
|
402  | 
                # the rev id is assigned to the root during deserialisation -
 | 
|
403  | 
                # this masks when a change may have occurred against the basis.
 | 
|
404  | 
                # To match this we always issue a delta, because the revision
 | 
|
405  | 
                # of the root will always be changing.
 | 
|
| 
2903.2.5
by Martin Pool
 record_entry_contents should give back deltas for changed roots; clean it up a bit  | 
406  | 
if ie.file_id in basis_inv:  | 
407  | 
delta = (basis_inv.id2path(ie.file_id), path,  | 
|
408  | 
ie.file_id, ie)  | 
|
409  | 
else:  | 
|
| 
2871.1.3
by Robert Collins
 * The CommitBuilder method ``record_entry_contents`` now returns summary  | 
410  | 
                    # add
 | 
411  | 
delta = (None, path, ie.file_id, ie)  | 
|
| 
3879.2.3
by John Arbash Meinel
 Hide the .basis_delta variable, and require callers to use .get_basis_delta()  | 
412  | 
self._basis_delta.append(delta)  | 
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
413  | 
return delta, False, None  | 
| 
2903.2.5
by Martin Pool
 record_entry_contents should give back deltas for changed roots; clean it up a bit  | 
414  | 
else:  | 
415  | 
                # we don't need to commit this, because the caller already
 | 
|
416  | 
                # determined that an existing revision of this file is
 | 
|
| 
3619.1.1
by Robert Collins
 Tighten up the handling of carried-over inventory entries.  | 
417  | 
                # appropriate. If its not being considered for committing then
 | 
418  | 
                # it and all its parents to the root must be unaltered so
 | 
|
419  | 
                # no-change against the basis.
 | 
|
420  | 
if ie.revision == self._new_revision_id:  | 
|
421  | 
raise AssertionError("Impossible situation, a skipped "  | 
|
| 
3619.1.2
by Robert Collins
 Review feedback.  | 
422  | 
"inventory entry (%r) claims to be modified in this "  | 
423  | 
"commit (%r).", (ie, self._new_revision_id))  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
424  | 
return None, False, None  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
425  | 
        # XXX: Friction: parent_candidates should return a list not a dict
 | 
426  | 
        #      so that we don't have to walk the inventories again.
 | 
|
427  | 
parent_candiate_entries = ie.parent_candidates(parent_invs)  | 
|
| 
2979.2.5
by Robert Collins
 Make CommitBuilder.heads be _heads as its internal to CommitBuilder only.  | 
428  | 
head_set = self._heads(ie.file_id, parent_candiate_entries.keys())  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
429  | 
heads = []  | 
430  | 
for inv in parent_invs:  | 
|
431  | 
if ie.file_id in inv:  | 
|
432  | 
old_rev = inv[ie.file_id].revision  | 
|
433  | 
if old_rev in head_set:  | 
|
434  | 
heads.append(inv[ie.file_id].revision)  | 
|
435  | 
head_set.remove(inv[ie.file_id].revision)  | 
|
436  | 
||
437  | 
store = False  | 
|
438  | 
        # now we check to see if we need to write a new record to the
 | 
|
439  | 
        # file-graph.
 | 
|
440  | 
        # We write a new entry unless there is one head to the ancestors, and
 | 
|
441  | 
        # the kind-derived content is unchanged.
 | 
|
442  | 
||
443  | 
        # Cheapest check first: no ancestors, or more the one head in the
 | 
|
444  | 
        # ancestors, we write a new node.
 | 
|
445  | 
if len(heads) != 1:  | 
|
446  | 
store = True  | 
|
447  | 
if not store:  | 
|
448  | 
            # There is a single head, look it up for comparison
 | 
|
449  | 
parent_entry = parent_candiate_entries[heads[0]]  | 
|
450  | 
            # if the non-content specific data has changed, we'll be writing a
 | 
|
451  | 
            # node:
 | 
|
452  | 
if (parent_entry.parent_id != ie.parent_id or  | 
|
453  | 
parent_entry.name != ie.name):  | 
|
454  | 
store = True  | 
|
455  | 
        # now we need to do content specific checks:
 | 
|
456  | 
if not store:  | 
|
457  | 
            # if the kind changed the content obviously has
 | 
|
458  | 
if kind != parent_entry.kind:  | 
|
459  | 
store = True  | 
|
| 
3709.3.2
by Robert Collins
 Race-free stat-fingerprint updating during commit via a new method get_file_with_stat.  | 
460  | 
        # Stat cache fingerprint feedback for the caller - None as we usually
 | 
461  | 
        # don't generate one.
 | 
|
462  | 
fingerprint = None  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
463  | 
if kind == 'file':  | 
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
464  | 
if content_summary[2] is None:  | 
465  | 
raise ValueError("Files must not have executable = None")  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
466  | 
if not store:  | 
467  | 
if (# if the file length changed we have to store:  | 
|
468  | 
parent_entry.text_size != content_summary[1] or  | 
|
469  | 
                    # if the exec bit has changed we have to store:
 | 
|
470  | 
parent_entry.executable != content_summary[2]):  | 
|
471  | 
store = True  | 
|
472  | 
elif parent_entry.text_sha1 == content_summary[3]:  | 
|
473  | 
                    # all meta and content is unchanged (using a hash cache
 | 
|
474  | 
                    # hit to check the sha)
 | 
|
475  | 
ie.revision = parent_entry.revision  | 
|
476  | 
ie.text_size = parent_entry.text_size  | 
|
477  | 
ie.text_sha1 = parent_entry.text_sha1  | 
|
478  | 
ie.executable = parent_entry.executable  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
479  | 
return self._get_delta(ie, basis_inv, path), False, None  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
480  | 
else:  | 
481  | 
                    # Either there is only a hash change(no hash cache entry,
 | 
|
482  | 
                    # or same size content change), or there is no change on
 | 
|
483  | 
                    # this file at all.
 | 
|
| 
2776.4.19
by Robert Collins
 Final review tweaks.  | 
484  | 
                    # Provide the parent's hash to the store layer, so that the
 | 
485  | 
                    # content is unchanged we will not store a new node.
 | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
486  | 
nostore_sha = parent_entry.text_sha1  | 
487  | 
if store:  | 
|
| 
2776.4.18
by Robert Collins
 Review feedback.  | 
488  | 
                # We want to record a new node regardless of the presence or
 | 
489  | 
                # absence of a content change in the file.
 | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
490  | 
nostore_sha = None  | 
| 
2776.4.18
by Robert Collins
 Review feedback.  | 
491  | 
ie.executable = content_summary[2]  | 
| 
3709.3.2
by Robert Collins
 Race-free stat-fingerprint updating during commit via a new method get_file_with_stat.  | 
492  | 
file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)  | 
493  | 
try:  | 
|
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
494  | 
text = file_obj.read()  | 
| 
3709.3.2
by Robert Collins
 Race-free stat-fingerprint updating during commit via a new method get_file_with_stat.  | 
495  | 
finally:  | 
496  | 
file_obj.close()  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
497  | 
try:  | 
498  | 
ie.text_sha1, ie.text_size = self._add_text_to_weave(  | 
|
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
499  | 
ie.file_id, text, heads, nostore_sha)  | 
| 
3709.3.2
by Robert Collins
 Race-free stat-fingerprint updating during commit via a new method get_file_with_stat.  | 
500  | 
                # Let the caller know we generated a stat fingerprint.
 | 
501  | 
fingerprint = (ie.text_sha1, stat_value)  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
502  | 
except errors.ExistingContent:  | 
| 
2776.4.18
by Robert Collins
 Review feedback.  | 
503  | 
                # Turns out that the file content was unchanged, and we were
 | 
504  | 
                # only going to store a new node if it was changed. Carry over
 | 
|
505  | 
                # the entry.
 | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
506  | 
ie.revision = parent_entry.revision  | 
507  | 
ie.text_size = parent_entry.text_size  | 
|
508  | 
ie.text_sha1 = parent_entry.text_sha1  | 
|
509  | 
ie.executable = parent_entry.executable  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
510  | 
return self._get_delta(ie, basis_inv, path), False, None  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
511  | 
elif kind == 'directory':  | 
512  | 
if not store:  | 
|
513  | 
                # all data is meta here, nothing specific to directory, so
 | 
|
514  | 
                # carry over:
 | 
|
515  | 
ie.revision = parent_entry.revision  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
516  | 
return self._get_delta(ie, basis_inv, path), False, None  | 
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
517  | 
self._add_text_to_weave(ie.file_id, '', heads, None)  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
518  | 
elif kind == 'symlink':  | 
519  | 
current_link_target = content_summary[3]  | 
|
520  | 
if not store:  | 
|
| 
2776.4.18
by Robert Collins
 Review feedback.  | 
521  | 
                # symlink target is not generic metadata, check if it has
 | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
522  | 
                # changed.
 | 
523  | 
if current_link_target != parent_entry.symlink_target:  | 
|
524  | 
store = True  | 
|
525  | 
if not store:  | 
|
526  | 
                # unchanged, carry over.
 | 
|
527  | 
ie.revision = parent_entry.revision  | 
|
528  | 
ie.symlink_target = parent_entry.symlink_target  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
529  | 
return self._get_delta(ie, basis_inv, path), False, None  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
530  | 
ie.symlink_target = current_link_target  | 
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
531  | 
self._add_text_to_weave(ie.file_id, '', heads, None)  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
532  | 
elif kind == 'tree-reference':  | 
533  | 
if not store:  | 
|
534  | 
if content_summary[3] != parent_entry.reference_revision:  | 
|
535  | 
store = True  | 
|
536  | 
if not store:  | 
|
537  | 
                # unchanged, carry over.
 | 
|
538  | 
ie.reference_revision = parent_entry.reference_revision  | 
|
539  | 
ie.revision = parent_entry.revision  | 
|
| 
3709.3.1
by Robert Collins
 First cut - make it work - at updating the tree stat cache during commit.  | 
540  | 
return self._get_delta(ie, basis_inv, path), False, None  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
541  | 
ie.reference_revision = content_summary[3]  | 
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
542  | 
self._add_text_to_weave(ie.file_id, '', heads, None)  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
543  | 
else:  | 
544  | 
raise NotImplementedError('unknown kind')  | 
|
545  | 
ie.revision = self._new_revision_id  | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
546  | 
self._any_changes = True  | 
| 
3709.3.2
by Robert Collins
 Race-free stat-fingerprint updating during commit via a new method get_file_with_stat.  | 
547  | 
return self._get_delta(ie, basis_inv, path), True, fingerprint  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
548  | 
|
| 
3775.2.30
by Robert Collins
 Remove the basis_tree parameter to record_iter_changes.  | 
549  | 
def record_iter_changes(self, tree, basis_revision_id, iter_changes,  | 
550  | 
_entry_factory=entry_factory):  | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
551  | 
"""Record a new tree via iter_changes.  | 
552  | 
||
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
553  | 
        :param tree: The tree to obtain text contents from for changed objects.
 | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
554  | 
        :param basis_revision_id: The revision id of the tree the iter_changes
 | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
555  | 
            has been generated against. Currently assumed to be the same
 | 
556  | 
            as self.parents[0] - if it is not, errors may occur.
 | 
|
557  | 
        :param iter_changes: An iter_changes iterator with the changes to apply
 | 
|
| 
4183.5.5
by Robert Collins
 Enable record_iter_changes for cases where it can work.  | 
558  | 
            to basis_revision_id. The iterator must not include any items with
 | 
559  | 
            a current kind of None - missing items must be either filtered out
 | 
|
560  | 
            or errored-on beefore record_iter_changes sees the item.
 | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
561  | 
        :param _entry_factory: Private method to bind entry_factory locally for
 | 
562  | 
            performance.
 | 
|
| 
4183.5.4
by Robert Collins
 Turn record_iter_changes into a generator to emit file system hashes.  | 
563  | 
        :return: A generator of (file_id, relpath, fs_hash) tuples for use with
 | 
564  | 
            tree._observed_sha1.
 | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
565  | 
        """
 | 
566  | 
        # Create an inventory delta based on deltas between all the parents and
 | 
|
567  | 
        # deltas between all the parent inventories. We use inventory delta's 
 | 
|
568  | 
        # between the inventory objects because iter_changes masks
 | 
|
569  | 
        # last-changed-field only changes.
 | 
|
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
570  | 
        # Working data:
 | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
571  | 
        # file_id -> change map, change is fileid, paths, changed, versioneds,
 | 
572  | 
        # parents, names, kinds, executables
 | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
573  | 
merged_ids = {}  | 
| 
3775.2.32
by Robert Collins
 Trivial review feedback.  | 
574  | 
        # {file_id -> revision_id -> inventory entry, for entries in parent
 | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
575  | 
        # trees that are not parents[0]
 | 
576  | 
parent_entries = {}  | 
|
| 
4183.5.5
by Robert Collins
 Enable record_iter_changes for cases where it can work.  | 
577  | 
ghost_basis = False  | 
578  | 
try:  | 
|
579  | 
revtrees = list(self.repository.revision_trees(self.parents))  | 
|
580  | 
except errors.NoSuchRevision:  | 
|
581  | 
            # one or more ghosts, slow path.
 | 
|
582  | 
revtrees = []  | 
|
583  | 
for revision_id in self.parents:  | 
|
584  | 
try:  | 
|
585  | 
revtrees.append(self.repository.revision_tree(revision_id))  | 
|
586  | 
except errors.NoSuchRevision:  | 
|
587  | 
if not revtrees:  | 
|
588  | 
basis_revision_id = _mod_revision.NULL_REVISION  | 
|
589  | 
ghost_basis = True  | 
|
590  | 
revtrees.append(self.repository.revision_tree(  | 
|
591  | 
_mod_revision.NULL_REVISION))  | 
|
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
592  | 
        # The basis inventory from a repository 
 | 
593  | 
if revtrees:  | 
|
594  | 
basis_inv = revtrees[0].inventory  | 
|
595  | 
else:  | 
|
596  | 
basis_inv = self.repository.revision_tree(  | 
|
597  | 
_mod_revision.NULL_REVISION).inventory  | 
|
| 
3775.2.32
by Robert Collins
 Trivial review feedback.  | 
598  | 
if len(self.parents) > 0:  | 
| 
4183.5.5
by Robert Collins
 Enable record_iter_changes for cases where it can work.  | 
599  | 
if basis_revision_id != self.parents[0] and not ghost_basis:  | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
600  | 
raise Exception(  | 
601  | 
"arbitrary basis parents not yet supported with merges")  | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
602  | 
for revtree in revtrees[1:]:  | 
| 
3775.2.32
by Robert Collins
 Trivial review feedback.  | 
603  | 
for change in revtree.inventory._make_delta(basis_inv):  | 
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
604  | 
if change[1] is None:  | 
| 
3775.2.32
by Robert Collins
 Trivial review feedback.  | 
605  | 
                        # Not present in this parent.
 | 
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
606  | 
                        continue
 | 
607  | 
if change[2] not in merged_ids:  | 
|
608  | 
if change[0] is not None:  | 
|
| 
4183.5.9
by Robert Collins
 Fix creating new revisions of files when merging.  | 
609  | 
basis_entry = basis_inv[change[2]]  | 
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
610  | 
merged_ids[change[2]] = [  | 
| 
4183.5.9
by Robert Collins
 Fix creating new revisions of files when merging.  | 
611  | 
                                # basis revid
 | 
612  | 
basis_entry.revision,  | 
|
613  | 
                                # new tree revid
 | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
614  | 
change[3].revision]  | 
| 
4183.5.9
by Robert Collins
 Fix creating new revisions of files when merging.  | 
615  | 
parent_entries[change[2]] = {  | 
616  | 
                                # basis parent
 | 
|
617  | 
basis_entry.revision:basis_entry,  | 
|
618  | 
                                # this parent 
 | 
|
619  | 
change[3].revision:change[3],  | 
|
620  | 
                                }
 | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
621  | 
else:  | 
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
622  | 
merged_ids[change[2]] = [change[3].revision]  | 
| 
4183.5.9
by Robert Collins
 Fix creating new revisions of files when merging.  | 
623  | 
parent_entries[change[2]] = {change[3].revision:change[3]}  | 
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
624  | 
else:  | 
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
625  | 
merged_ids[change[2]].append(change[3].revision)  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
626  | 
parent_entries[change[2]][change[3].revision] = change[3]  | 
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
627  | 
else:  | 
628  | 
merged_ids = {}  | 
|
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
629  | 
        # Setup the changes from the tree:
 | 
| 
3775.2.32
by Robert Collins
 Trivial review feedback.  | 
630  | 
        # changes maps file_id -> (change, [parent revision_ids])
 | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
631  | 
changes= {}  | 
632  | 
for change in iter_changes:  | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
633  | 
            # This probably looks up in basis_inv way to much.
 | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
634  | 
if change[1][0] is not None:  | 
635  | 
head_candidate = [basis_inv[change[0]].revision]  | 
|
636  | 
else:  | 
|
637  | 
head_candidate = []  | 
|
638  | 
changes[change[0]] = change, merged_ids.get(change[0],  | 
|
639  | 
head_candidate)  | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
640  | 
unchanged_merged = set(merged_ids) - set(changes)  | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
641  | 
        # Extend the changes dict with synthetic changes to record merges of
 | 
642  | 
        # texts.
 | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
643  | 
for file_id in unchanged_merged:  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
644  | 
            # Record a merged version of these items that did not change vs the
 | 
645  | 
            # basis. This can be either identical parallel changes, or a revert
 | 
|
646  | 
            # of a specific file after a merge. The recorded content will be
 | 
|
647  | 
            # that of the current tree (which is the same as the basis), but
 | 
|
648  | 
            # the per-file graph will reflect a merge.
 | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
649  | 
            # NB:XXX: We are reconstructing path information we had, this
 | 
650  | 
            # should be preserved instead.
 | 
|
651  | 
            # inv delta  change: (file_id, (path_in_source, path_in_target),
 | 
|
652  | 
            #   changed_content, versioned, parent, name, kind,
 | 
|
653  | 
            #   executable)
 | 
|
| 
4183.5.5
by Robert Collins
 Enable record_iter_changes for cases where it can work.  | 
654  | 
try:  | 
655  | 
basis_entry = basis_inv[file_id]  | 
|
656  | 
except errors.NoSuchId:  | 
|
657  | 
                # a change from basis->some_parents but file_id isn't in basis
 | 
|
658  | 
                # so was new in the merge, which means it must have changed
 | 
|
659  | 
                # from basis -> current, and as it hasn't the add was reverted
 | 
|
660  | 
                # by the user. So we discard this change.
 | 
|
661  | 
                pass
 | 
|
662  | 
else:  | 
|
663  | 
change = (file_id,  | 
|
664  | 
(basis_inv.id2path(file_id), tree.id2path(file_id)),  | 
|
665  | 
False, (True, True),  | 
|
666  | 
(basis_entry.parent_id, basis_entry.parent_id),  | 
|
667  | 
(basis_entry.name, basis_entry.name),  | 
|
668  | 
(basis_entry.kind, basis_entry.kind),  | 
|
669  | 
(basis_entry.executable, basis_entry.executable))  | 
|
670  | 
changes[file_id] = (change, merged_ids[file_id])  | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
671  | 
        # changes contains tuples with the change and a set of inventory
 | 
672  | 
        # candidates for the file.
 | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
673  | 
        # inv delta is:
 | 
674  | 
        # old_path, new_path, file_id, new_inventory_entry
 | 
|
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
675  | 
seen_root = False # Is the root in the basis delta?  | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
676  | 
inv_delta = self._basis_delta  | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
677  | 
modified_rev = self._new_revision_id  | 
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
678  | 
for change, head_candidates in changes.values():  | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
679  | 
if change[3][1]: # versioned in target.  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
680  | 
                # Several things may be happening here:
 | 
681  | 
                # We may have a fork in the per-file graph
 | 
|
682  | 
                #  - record a change with the content from tree
 | 
|
683  | 
                # We may have a change against < all trees  
 | 
|
684  | 
                #  - carry over the tree that hasn't changed
 | 
|
685  | 
                # We may have a change against all trees
 | 
|
686  | 
                #  - record the change with the content from tree
 | 
|
| 
3775.2.11
by Robert Collins
 CommitBuilder handles renamed directory and unmodified entries with single parents, for record_iter_changes.  | 
687  | 
kind = change[6][1]  | 
| 
3775.2.12
by Robert Collins
 CommitBuilder.record_iter_changes handles renamed files.  | 
688  | 
file_id = change[0]  | 
689  | 
entry = _entry_factory[kind](file_id, change[5][1],  | 
|
690  | 
change[4][1])  | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
691  | 
head_set = self._heads(change[0], set(head_candidates))  | 
692  | 
heads = []  | 
|
693  | 
                # Preserve ordering.
 | 
|
694  | 
for head_candidate in head_candidates:  | 
|
695  | 
if head_candidate in head_set:  | 
|
696  | 
heads.append(head_candidate)  | 
|
697  | 
head_set.remove(head_candidate)  | 
|
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
698  | 
carried_over = False  | 
| 
3775.2.33
by Robert Collins
 Fix bug with merges of new files, increasing test coverage to ensure its kept fixed.  | 
699  | 
if len(heads) == 1:  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
700  | 
                    # Could be a carry-over situation:
 | 
| 
3775.2.34
by Robert Collins
 Handle committing new files again.  | 
701  | 
parent_entry_revs = parent_entries.get(file_id, None)  | 
702  | 
if parent_entry_revs:  | 
|
703  | 
parent_entry = parent_entry_revs.get(heads[0], None)  | 
|
704  | 
else:  | 
|
705  | 
parent_entry = None  | 
|
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
706  | 
if parent_entry is None:  | 
707  | 
                        # The parent iter_changes was called against is the one
 | 
|
708  | 
                        # that is the per-file head, so any change is relevant
 | 
|
709  | 
                        # iter_changes is valid.
 | 
|
710  | 
carry_over_possible = False  | 
|
711  | 
else:  | 
|
712  | 
                        # could be a carry over situation
 | 
|
713  | 
                        # A change against the basis may just indicate a merge,
 | 
|
714  | 
                        # we need to check the content against the source of the
 | 
|
715  | 
                        # merge to determine if it was changed after the merge
 | 
|
716  | 
                        # or carried over.
 | 
|
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
717  | 
if (parent_entry.kind != entry.kind or  | 
718  | 
parent_entry.parent_id != entry.parent_id or  | 
|
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
719  | 
parent_entry.name != entry.name):  | 
720  | 
                            # Metadata common to all entries has changed
 | 
|
721  | 
                            # against per-file parent
 | 
|
722  | 
carry_over_possible = False  | 
|
723  | 
else:  | 
|
724  | 
carry_over_possible = True  | 
|
725  | 
                        # per-type checks for changes against the parent_entry
 | 
|
726  | 
                        # are done below.
 | 
|
727  | 
else:  | 
|
728  | 
                    # Cannot be a carry-over situation
 | 
|
729  | 
carry_over_possible = False  | 
|
730  | 
                # Populate the entry in the delta
 | 
|
731  | 
if kind == 'file':  | 
|
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
732  | 
                    # XXX: There is still a small race here: If someone reverts the content of a file
 | 
733  | 
                    # after iter_changes examines and decides it has changed,
 | 
|
734  | 
                    # we will unconditionally record a new version even if some
 | 
|
735  | 
                    # other process reverts it while commit is running (with
 | 
|
736  | 
                    # the revert happening after iter_changes did it's
 | 
|
737  | 
                    # examination).
 | 
|
738  | 
if change[7][1]:  | 
|
739  | 
entry.executable = True  | 
|
740  | 
else:  | 
|
741  | 
entry.executable = False  | 
|
| 
4398.8.1
by John Arbash Meinel
 Add a VersionedFile.add_text() api.  | 
742  | 
if (carry_over_possible and  | 
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
743  | 
parent_entry.executable == entry.executable):  | 
744  | 
                            # Check the file length, content hash after reading
 | 
|
745  | 
                            # the file.
 | 
|
746  | 
nostore_sha = parent_entry.text_sha1  | 
|
747  | 
else:  | 
|
748  | 
nostore_sha = None  | 
|
749  | 
file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])  | 
|
750  | 
try:  | 
|
| 
4398.8.1
by John Arbash Meinel
 Add a VersionedFile.add_text() api.  | 
751  | 
text = file_obj.read()  | 
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
752  | 
finally:  | 
753  | 
file_obj.close()  | 
|
754  | 
try:  | 
|
755  | 
entry.text_sha1, entry.text_size = self._add_text_to_weave(  | 
|
| 
4398.8.1
by John Arbash Meinel
 Add a VersionedFile.add_text() api.  | 
756  | 
file_id, text, heads, nostore_sha)  | 
| 
4183.5.4
by Robert Collins
 Turn record_iter_changes into a generator to emit file system hashes.  | 
757  | 
yield file_id, change[1][1], (entry.text_sha1, stat_value)  | 
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
758  | 
except errors.ExistingContent:  | 
759  | 
                        # No content change against a carry_over parent
 | 
|
| 
4183.5.4
by Robert Collins
 Turn record_iter_changes into a generator to emit file system hashes.  | 
760  | 
                        # Perhaps this should also yield a fs hash update?
 | 
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
761  | 
carried_over = True  | 
762  | 
entry.text_size = parent_entry.text_size  | 
|
763  | 
entry.text_sha1 = parent_entry.text_sha1  | 
|
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
764  | 
elif kind == 'symlink':  | 
| 
3775.2.24
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch symlinks.  | 
765  | 
                    # Wants a path hint?
 | 
766  | 
entry.symlink_target = tree.get_symlink_target(file_id)  | 
|
767  | 
if (carry_over_possible and  | 
|
768  | 
parent_entry.symlink_target == entry.symlink_target):  | 
|
| 
4183.5.2
by Robert Collins
 Support tree-reference in record_iter_changes.  | 
769  | 
carried_over = True  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
770  | 
else:  | 
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
771  | 
self._add_text_to_weave(change[0], '', heads, None)  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
772  | 
elif kind == 'directory':  | 
773  | 
if carry_over_possible:  | 
|
774  | 
carried_over = True  | 
|
775  | 
else:  | 
|
| 
3775.2.19
by Robert Collins
 CommitBuilder.record_iter_changes handles merged directories.  | 
776  | 
                        # Nothing to set on the entry.
 | 
777  | 
                        # XXX: split into the Root and nonRoot versions.
 | 
|
778  | 
if change[1][1] != '' or self.repository.supports_rich_root():  | 
|
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
779  | 
self._add_text_to_weave(change[0], '', heads, None)  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
780  | 
elif kind == 'tree-reference':  | 
| 
4183.5.2
by Robert Collins
 Support tree-reference in record_iter_changes.  | 
781  | 
if not self.repository._format.supports_tree_reference:  | 
782  | 
                        # This isn't quite sane as an error, but we shouldn't
 | 
|
783  | 
                        # ever see this code path in practice: tree's don't
 | 
|
784  | 
                        # permit references when the repo doesn't support tree
 | 
|
785  | 
                        # references.
 | 
|
786  | 
raise errors.UnsupportedOperation(tree.add_reference,  | 
|
787  | 
self.repository)  | 
|
| 
4496.3.1
by Andrew Bennetts
 Fix undefined local and remove unused import in repository.py.  | 
788  | 
reference_revision = tree.get_reference_revision(change[0])  | 
789  | 
entry.reference_revision = reference_revision  | 
|
| 
4183.5.2
by Robert Collins
 Support tree-reference in record_iter_changes.  | 
790  | 
if (carry_over_possible and  | 
791  | 
parent_entry.reference_revision == reference_revision):  | 
|
792  | 
carried_over = True  | 
|
793  | 
else:  | 
|
| 
4398.8.5
by John Arbash Meinel
 Fix a few more cases where we were adding a list rather than an empty string.  | 
794  | 
self._add_text_to_weave(change[0], '', heads, None)  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
795  | 
else:  | 
| 
3775.2.27
by Robert Collins
 CommitBuilder.record_iter_changes handles files becoming directories and links.  | 
796  | 
raise AssertionError('unknown kind %r' % kind)  | 
| 
3775.2.22
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch directories.  | 
797  | 
if not carried_over:  | 
798  | 
entry.revision = modified_rev  | 
|
| 
3775.2.23
by Robert Collins
 CommitBuilder.record_iter_changes handles changed-in-branch files.  | 
799  | 
else:  | 
800  | 
entry.revision = parent_entry.revision  | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
801  | 
else:  | 
802  | 
entry = None  | 
|
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
803  | 
new_path = change[1][1]  | 
804  | 
inv_delta.append((change[1][0], new_path, change[0], entry))  | 
|
805  | 
if new_path == '':  | 
|
806  | 
seen_root = True  | 
|
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
807  | 
self.new_inventory = None  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
808  | 
if len(inv_delta):  | 
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
809  | 
self._any_changes = True  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
810  | 
if not seen_root:  | 
811  | 
            # housekeeping root entry changes do not affect no-change commits.
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
812  | 
self._require_root_change(tree)  | 
| 
3775.2.29
by Robert Collins
 Updates to the form of add_inventory_by_delta that landed in trunk.  | 
813  | 
self.basis_delta_revision = basis_revision_id  | 
| 
3775.2.4
by Robert Collins
 Start on a CommitBuilder.record_iter_changes method.  | 
814  | 
|
| 
4398.8.1
by John Arbash Meinel
 Add a VersionedFile.add_text() api.  | 
815  | 
def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):  | 
| 
4398.8.6
by John Arbash Meinel
 Switch the api from VF.add_text to VF._add_text and trim some extra 'features'.  | 
816  | 
parent_keys = tuple([(file_id, parent) for parent in parents])  | 
817  | 
return self.repository.texts._add_text(  | 
|
| 
4398.8.1
by John Arbash Meinel
 Add a VersionedFile.add_text() api.  | 
818  | 
(file_id, self._new_revision_id), parent_keys, new_text,  | 
| 
4398.8.6
by John Arbash Meinel
 Switch the api from VF.add_text to VF._add_text and trim some extra 'features'.  | 
819  | 
nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
820  | 
|
821  | 
||
822  | 
class RootCommitBuilder(CommitBuilder):  | 
|
823  | 
"""This commitbuilder actually records the root id"""  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
824  | 
|
| 
2825.5.2
by Robert Collins
 Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately.  | 
825  | 
    # the root entry gets versioned properly by this builder.
 | 
| 
2840.1.1
by Ian Clatworthy
 faster pointless commit detection (Robert Collins)  | 
826  | 
_versioned_root = True  | 
| 
2825.5.2
by Robert Collins
 Review feedback, and fix pointless commits with nested trees to raise PointlessCommit appropriately.  | 
827  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
828  | 
def _check_root(self, ie, parent_invs, tree):  | 
829  | 
"""Helper for record_entry_contents.  | 
|
830  | 
||
831  | 
        :param ie: An entry being added.
 | 
|
832  | 
        :param parent_invs: The inventories of the parent revisions of the
 | 
|
833  | 
            commit.
 | 
|
834  | 
        :param tree: The tree that is being committed.
 | 
|
835  | 
        """
 | 
|
836  | 
||
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
837  | 
def _require_root_change(self, tree):  | 
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
838  | 
"""Enforce an appropriate root object change.  | 
839  | 
||
840  | 
        This is called once when record_iter_changes is called, if and only if
 | 
|
841  | 
        the root was not in the delta calculated by record_iter_changes.
 | 
|
| 
3775.2.9
by Robert Collins
 CommitBuilder handles deletes via record_iter_entries.  | 
842  | 
|
843  | 
        :param tree: The tree which is being committed.
 | 
|
| 
3775.2.7
by Robert Collins
 CommitBuilder handles no-change commits to roots properly with record_iter_changes.  | 
844  | 
        """
 | 
845  | 
        # versioned roots do not change unless the tree found a change.
 | 
|
846  | 
||
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
847  | 
|
| 
2220.2.3
by Martin Pool
 Add tag: revision namespace.  | 
848  | 
######################################################################
 | 
849  | 
# Repositories
 | 
|
850  | 
||
| 
1185.66.5
by Aaron Bentley
 Renamed RevisionStorage to Repository  | 
851  | 
class Repository(object):  | 
| 
1185.70.3
by Martin Pool
 Various updates to make storage branch mergeable:  | 
852  | 
"""Repository holding history for one or more branches.  | 
853  | 
||
854  | 
    The repository holds and retrieves historical information including
 | 
|
855  | 
    revisions and file history.  It's normally accessed only by the Branch,
 | 
|
856  | 
    which views a particular line of development through that history.
 | 
|
857  | 
||
| 
3350.6.7
by Robert Collins
 Review feedback, making things more clear, adding documentation on what is used where.  | 
858  | 
    The Repository builds on top of some byte storage facilies (the revisions,
 | 
| 
3735.2.1
by Robert Collins
 Add the concept of CHK lookups to Repository.  | 
859  | 
    signatures, inventories, texts and chk_bytes attributes) and a Transport,
 | 
860  | 
    which respectively provide byte storage and a means to access the (possibly
 | 
|
| 
1185.70.3
by Martin Pool
 Various updates to make storage branch mergeable:  | 
861  | 
    remote) disk.
 | 
| 
3407.2.13
by Martin Pool
 Remove indirection through control_files to get transports  | 
862  | 
|
| 
3350.6.7
by Robert Collins
 Review feedback, making things more clear, adding documentation on what is used where.  | 
863  | 
    The byte storage facilities are addressed via tuples, which we refer to
 | 
864  | 
    as 'keys' throughout the code base. Revision_keys, inventory_keys and
 | 
|
865  | 
    signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
 | 
|
| 
3735.2.1
by Robert Collins
 Add the concept of CHK lookups to Repository.  | 
866  | 
    (file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
 | 
| 
3735.2.99
by John Arbash Meinel
 Merge bzr.dev 4034. Whitespace cleanup  | 
867  | 
    byte string made up of a hash identifier and a hash value.
 | 
| 
3735.2.1
by Robert Collins
 Add the concept of CHK lookups to Repository.  | 
868  | 
    We use this interface because it allows low friction with the underlying
 | 
869  | 
    code that implements disk indices, network encoding and other parts of
 | 
|
870  | 
    bzrlib.
 | 
|
| 
3350.6.7
by Robert Collins
 Review feedback, making things more clear, adding documentation on what is used where.  | 
871  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
872  | 
    :ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
 | 
873  | 
        the serialised revisions for the repository. This can be used to obtain
 | 
|
874  | 
        revision graph information or to access raw serialised revisions.
 | 
|
875  | 
        The result of trying to insert data into the repository via this store
 | 
|
876  | 
        is undefined: it should be considered read-only except for implementors
 | 
|
877  | 
        of repositories.
 | 
|
| 
3350.6.7
by Robert Collins
 Review feedback, making things more clear, adding documentation on what is used where.  | 
878  | 
    :ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
 | 
879  | 
        the serialised signatures for the repository. This can be used to
 | 
|
880  | 
        obtain access to raw serialised signatures.  The result of trying to
 | 
|
881  | 
        insert data into the repository via this store is undefined: it should
 | 
|
882  | 
        be considered read-only except for implementors of repositories.
 | 
|
883  | 
    :ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
 | 
|
884  | 
        the serialised inventories for the repository. This can be used to
 | 
|
885  | 
        obtain unserialised inventories.  The result of trying to insert data
 | 
|
886  | 
        into the repository via this store is undefined: it should be
 | 
|
887  | 
        considered read-only except for implementors of repositories.
 | 
|
888  | 
    :ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
 | 
|
889  | 
        texts of files and directories for the repository. This can be used to
 | 
|
890  | 
        obtain file texts or file graphs. Note that Repository.iter_file_bytes
 | 
|
891  | 
        is usually a better interface for accessing file texts.
 | 
|
892  | 
        The result of trying to insert data into the repository via this store
 | 
|
893  | 
        is undefined: it should be considered read-only except for implementors
 | 
|
894  | 
        of repositories.
 | 
|
| 
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
 Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil)  | 
895  | 
    :ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
 | 
| 
3735.2.1
by Robert Collins
 Add the concept of CHK lookups to Repository.  | 
896  | 
        any data the repository chooses to store or have indexed by its hash.
 | 
897  | 
        The result of trying to insert data into the repository via this store
 | 
|
898  | 
        is undefined: it should be considered read-only except for implementors
 | 
|
899  | 
        of repositories.
 | 
|
| 
3407.2.13
by Martin Pool
 Remove indirection through control_files to get transports  | 
900  | 
    :ivar _transport: Transport for file access to repository, typically
 | 
901  | 
        pointing to .bzr/repository.
 | 
|
| 
1185.70.3
by Martin Pool
 Various updates to make storage branch mergeable:  | 
902  | 
    """
 | 
| 
1185.65.17
by Robert Collins
 Merge from integration, mode-changes are broken.  | 
903  | 
|
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
904  | 
    # What class to use for a CommitBuilder. Often its simpler to change this
 | 
905  | 
    # in a Repository class subclass rather than to override
 | 
|
906  | 
    # get_commit_builder.
 | 
|
907  | 
_commit_builder_class = CommitBuilder  | 
|
908  | 
    # The search regex used by xml based repositories to determine what things
 | 
|
909  | 
    # where changed in a single commit.
 | 
|
| 
2163.2.1
by John Arbash Meinel
 Speed up the fileids_altered_by_revision_ids processing  | 
910  | 
_file_ids_altered_regex = lazy_regex.lazy_compile(  | 
911  | 
r'file_id="(?P<file_id>[^"]+)"'  | 
|
| 
2776.4.6
by Robert Collins
 Fixup various commit test failures falling out from the other commit changes.  | 
912  | 
r'.* revision="(?P<revision_id>[^"]+)"'  | 
| 
2163.2.1
by John Arbash Meinel
 Speed up the fileids_altered_by_revision_ids processing  | 
913  | 
        )
 | 
914  | 
||
| 
3825.4.1
by Andrew Bennetts
 Add suppress_errors to abort_write_group.  | 
915  | 
def abort_write_group(self, suppress_errors=False):  | 
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
916  | 
"""Commit the contents accrued within the current write group.  | 
917  | 
||
| 
3825.4.6
by Andrew Bennetts
 Document the suppress_errors flag in the docstring.  | 
918  | 
        :param suppress_errors: if true, abort_write_group will catch and log
 | 
919  | 
            unexpected errors that happen during the abort, rather than
 | 
|
920  | 
            allowing them to propagate.  Defaults to False.
 | 
|
921  | 
||
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
922  | 
        :seealso: start_write_group.
 | 
923  | 
        """
 | 
|
924  | 
if self._write_group is not self.get_transaction():  | 
|
925  | 
            # has an unlock or relock occured ?
 | 
|
| 
3735.2.9
by Robert Collins
 Get a working chk_map using inventory implementation bootstrapped.  | 
926  | 
raise errors.BzrError(  | 
927  | 
'mismatched lock context and write group. %r, %r' %  | 
|
928  | 
(self._write_group, self.get_transaction()))  | 
|
| 
3825.4.1
by Andrew Bennetts
 Add suppress_errors to abort_write_group.  | 
929  | 
try:  | 
930  | 
self._abort_write_group()  | 
|
931  | 
except Exception, exc:  | 
|
932  | 
self._write_group = None  | 
|
933  | 
if not suppress_errors:  | 
|
934  | 
                raise
 | 
|
935  | 
mutter('abort_write_group failed')  | 
|
936  | 
log_exception_quietly()  | 
|
937  | 
note('bzr: ERROR (ignored): %s', exc)  | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
938  | 
self._write_group = None  | 
939  | 
||
940  | 
def _abort_write_group(self):  | 
|
941  | 
"""Template method for per-repository write group cleanup.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
942  | 
|
943  | 
        This is called during abort before the write group is considered to be
 | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
944  | 
        finished and should cleanup any internal state accrued during the write
 | 
945  | 
        group. There is no requirement that data handed to the repository be
 | 
|
946  | 
        *not* made available - this is not a rollback - but neither should any
 | 
|
947  | 
        attempt be made to ensure that data added is fully commited. Abort is
 | 
|
948  | 
        invoked when an error has occured so futher disk or network operations
 | 
|
949  | 
        may not be possible or may error and if possible should not be
 | 
|
950  | 
        attempted.
 | 
|
951  | 
        """
 | 
|
952  | 
||
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
953  | 
def add_fallback_repository(self, repository):  | 
954  | 
"""Add a repository to use for looking up data not held locally.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
955  | 
|
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
956  | 
        :param repository: A repository.
 | 
957  | 
        """
 | 
|
958  | 
if not self._format.supports_external_lookups:  | 
|
959  | 
raise errors.UnstackableRepositoryFormat(self._format, self.base)  | 
|
| 
4379.2.2
by John Arbash Meinel
 Change the Repository.add_fallback_repository() contract slightly.  | 
960  | 
if self.is_locked():  | 
961  | 
            # This repository will call fallback.unlock() when we transition to
 | 
|
962  | 
            # the unlocked state, so we make sure to increment the lock count
 | 
|
963  | 
repository.lock_read()  | 
|
| 
3582.1.7
by Martin Pool
 add_fallback_repository gives more detail on incompatibilities  | 
964  | 
self._check_fallback_repository(repository)  | 
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
965  | 
self._fallback_repositories.append(repository)  | 
| 
3221.12.13
by Robert Collins
 Implement generic stacking rather than pack-internals based stacking.  | 
966  | 
self.texts.add_fallback_versioned_files(repository.texts)  | 
967  | 
self.inventories.add_fallback_versioned_files(repository.inventories)  | 
|
968  | 
self.revisions.add_fallback_versioned_files(repository.revisions)  | 
|
969  | 
self.signatures.add_fallback_versioned_files(repository.signatures)  | 
|
| 
3735.2.9
by Robert Collins
 Get a working chk_map using inventory implementation bootstrapped.  | 
970  | 
if self.chk_bytes is not None:  | 
971  | 
self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)  | 
|
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
972  | 
|
| 
3582.1.7
by Martin Pool
 add_fallback_repository gives more detail on incompatibilities  | 
973  | 
def _check_fallback_repository(self, repository):  | 
| 
3221.12.4
by Robert Collins
 Implement basic repository supporting external references.  | 
974  | 
"""Check that this repository can fallback to repository safely.  | 
| 
3582.1.7
by Martin Pool
 add_fallback_repository gives more detail on incompatibilities  | 
975  | 
|
976  | 
        Raise an error if not.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
977  | 
|
| 
3221.12.4
by Robert Collins
 Implement basic repository supporting external references.  | 
978  | 
        :param repository: A repository to fallback to.
 | 
979  | 
        """
 | 
|
| 
3582.1.7
by Martin Pool
 add_fallback_repository gives more detail on incompatibilities  | 
980  | 
return InterRepository._assert_same_model(self, repository)  | 
| 
3221.12.4
by Robert Collins
 Implement basic repository supporting external references.  | 
981  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
982  | 
def add_inventory(self, revision_id, inv, parents):  | 
983  | 
"""Add the inventory inv to the repository as revision_id.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
984  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
985  | 
        :param parents: The revision ids of the parents that revision_id
 | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
986  | 
                        is known to have and are in the repository already.
 | 
987  | 
||
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
988  | 
        :returns: The validator(which is a sha1 digest, though what is sha'd is
 | 
989  | 
            repository format specific) of the serialized inventory.
 | 
|
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
990  | 
        """
 | 
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
991  | 
if not self.is_in_write_group():  | 
992  | 
raise AssertionError("%r not in write group" % (self,))  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
993  | 
_mod_revision.check_not_reserved_id(revision_id)  | 
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
994  | 
if not (inv.revision_id is None or inv.revision_id == revision_id):  | 
995  | 
raise AssertionError(  | 
|
996  | 
                "Mismatch between inventory revision"
 | 
|
997  | 
" id and insertion revid (%r, %r)"  | 
|
998  | 
% (inv.revision_id, revision_id))  | 
|
999  | 
if inv.root is None:  | 
|
1000  | 
raise AssertionError()  | 
|
| 
3735.2.9
by Robert Collins
 Get a working chk_map using inventory implementation bootstrapped.  | 
1001  | 
return self._add_inventory_checked(revision_id, inv, parents)  | 
1002  | 
||
1003  | 
def _add_inventory_checked(self, revision_id, inv, parents):  | 
|
1004  | 
"""Add inv to the repository after checking the inputs.  | 
|
1005  | 
||
1006  | 
        This function can be overridden to allow different inventory styles.
 | 
|
1007  | 
||
1008  | 
        :seealso: add_inventory, for the contract.
 | 
|
1009  | 
        """
 | 
|
| 
2817.2.1
by Robert Collins
 * Inventory serialisation no longer double-sha's the content.  | 
1010  | 
inv_lines = self._serialise_inventory_to_lines(inv)  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1011  | 
return self._inventory_add_lines(revision_id, parents,  | 
| 
2817.2.1
by Robert Collins
 * Inventory serialisation no longer double-sha's the content.  | 
1012  | 
inv_lines, check_content=False)  | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1013  | 
|
| 
3879.2.2
by John Arbash Meinel
 Rename add_inventory_delta to add_inventory_by_delta.  | 
1014  | 
def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,  | 
| 
3735.2.121
by Ian Clatworthy
 add propagate_caches param to create_by_apply_delta, making fast-import 30% faster  | 
1015  | 
parents, basis_inv=None, propagate_caches=False):  | 
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1016  | 
"""Add a new inventory expressed as a delta against another revision.  | 
| 
3879.2.2
by John Arbash Meinel
 Rename add_inventory_delta to add_inventory_by_delta.  | 
1017  | 
|
| 
4501.1.1
by Robert Collins
 Add documentation describing how and why we use inventory deltas, and what can go wrong with them.  | 
1018  | 
        See the inventory developers documentation for the theory behind
 | 
1019  | 
        inventory deltas.
 | 
|
1020  | 
||
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1021  | 
        :param basis_revision_id: The inventory id the delta was created
 | 
| 
3879.2.2
by John Arbash Meinel
 Rename add_inventory_delta to add_inventory_by_delta.  | 
1022  | 
            against. (This does not have to be a direct parent.)
 | 
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1023  | 
        :param delta: The inventory delta (see Inventory.apply_delta for
 | 
1024  | 
            details).
 | 
|
1025  | 
        :param new_revision_id: The revision id that the inventory is being
 | 
|
1026  | 
            added for.
 | 
|
1027  | 
        :param parents: The revision ids of the parents that revision_id is
 | 
|
1028  | 
            known to have and are in the repository already. These are supplied
 | 
|
1029  | 
            for repositories that depend on the inventory graph for revision
 | 
|
1030  | 
            graph access, as well as for those that pun ancestry with delta
 | 
|
1031  | 
            compression.
 | 
|
| 
3735.2.120
by Ian Clatworthy
 allow a known basis inventory to be passed to Repository.add_inventory_by_delta()  | 
1032  | 
        :param basis_inv: The basis inventory if it is already known,
 | 
1033  | 
            otherwise None.
 | 
|
| 
3735.2.121
by Ian Clatworthy
 add propagate_caches param to create_by_apply_delta, making fast-import 30% faster  | 
1034  | 
        :param propagate_caches: If True, the caches for this inventory are
 | 
1035  | 
          copied to and updated for the result if possible.
 | 
|
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1036  | 
|
| 
3879.3.1
by John Arbash Meinel
 Change the return of add_inventory_by_delta to also return the Inventory.  | 
1037  | 
        :returns: (validator, new_inv)
 | 
1038  | 
            The validator(which is a sha1 digest, though what is sha'd is
 | 
|
1039  | 
            repository format specific) of the serialized inventory, and the
 | 
|
1040  | 
            resulting inventory.
 | 
|
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1041  | 
        """
 | 
1042  | 
if not self.is_in_write_group():  | 
|
1043  | 
raise AssertionError("%r not in write group" % (self,))  | 
|
1044  | 
_mod_revision.check_not_reserved_id(new_revision_id)  | 
|
1045  | 
basis_tree = self.revision_tree(basis_revision_id)  | 
|
1046  | 
basis_tree.lock_read()  | 
|
1047  | 
try:  | 
|
1048  | 
            # Note that this mutates the inventory of basis_tree, which not all
 | 
|
1049  | 
            # inventory implementations may support: A better idiom would be to
 | 
|
1050  | 
            # return a new inventory, but as there is no revision tree cache in
 | 
|
1051  | 
            # repository this is safe for now - RBC 20081013
 | 
|
| 
3735.2.120
by Ian Clatworthy
 allow a known basis inventory to be passed to Repository.add_inventory_by_delta()  | 
1052  | 
if basis_inv is None:  | 
1053  | 
basis_inv = basis_tree.inventory  | 
|
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1054  | 
basis_inv.apply_delta(delta)  | 
1055  | 
basis_inv.revision_id = new_revision_id  | 
|
| 
3879.3.1
by John Arbash Meinel
 Change the return of add_inventory_by_delta to also return the Inventory.  | 
1056  | 
return (self.add_inventory(new_revision_id, basis_inv, parents),  | 
| 
3735.2.59
by Jelmer Vernooij
 Make Repository.add_inventory_delta() return the resulting inventory.  | 
1057  | 
basis_inv)  | 
| 
3775.2.1
by Robert Collins
 Create bzrlib.repository.Repository.add_inventory_delta for adding inventories via deltas.  | 
1058  | 
finally:  | 
1059  | 
basis_tree.unlock()  | 
|
1060  | 
||
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1061  | 
def _inventory_add_lines(self, revision_id, parents, lines,  | 
| 
2805.6.7
by Robert Collins
 Review feedback.  | 
1062  | 
check_content=True):  | 
| 
2817.2.1
by Robert Collins
 * Inventory serialisation no longer double-sha's the content.  | 
1063  | 
"""Store lines in inv_vf and return the sha1 of the inventory."""  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1064  | 
parents = [(parent,) for parent in parents]  | 
1065  | 
return self.inventories.add_lines((revision_id,), parents, lines,  | 
|
| 
2817.2.1
by Robert Collins
 * Inventory serialisation no longer double-sha's the content.  | 
1066  | 
check_content=check_content)[0]  | 
| 
1740.3.6
by Jelmer Vernooij
 Move inventory writing to the commit builder.  | 
1067  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1068  | 
def add_revision(self, revision_id, rev, inv=None, config=None):  | 
1069  | 
"""Add rev to the revision store as revision_id.  | 
|
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1070  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1071  | 
        :param revision_id: the revision id to use.
 | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1072  | 
        :param rev: The revision object.
 | 
1073  | 
        :param inv: The inventory for the revision. if None, it will be looked
 | 
|
1074  | 
                    up in the inventory storer
 | 
|
1075  | 
        :param config: If None no digital signature will be created.
 | 
|
1076  | 
                       If supplied its signature_needed method will be used
 | 
|
1077  | 
                       to determine if a signature should be made.
 | 
|
1078  | 
        """
 | 
|
| 
2249.5.13
by John Arbash Meinel
 Finish auditing Repository, and fix generate_ids to always generate utf8 ids.  | 
1079  | 
        # TODO: jam 20070210 Shouldn't we check rev.revision_id and
 | 
1080  | 
        #       rev.parent_ids?
 | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1081  | 
_mod_revision.check_not_reserved_id(revision_id)  | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1082  | 
if config is not None and config.signature_needed():  | 
1083  | 
if inv is None:  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1084  | 
inv = self.get_inventory(revision_id)  | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1085  | 
plaintext = Testament(rev, inv).as_short_text()  | 
1086  | 
self.store_revision_signature(  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1087  | 
gpg.GPGStrategy(config), plaintext, revision_id)  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1088  | 
        # check inventory present
 | 
1089  | 
if not self.inventories.get_parent_map([(revision_id,)]):  | 
|
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1090  | 
if inv is None:  | 
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
1091  | 
raise errors.WeaveRevisionNotPresent(revision_id,  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1092  | 
self.inventories)  | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1093  | 
else:  | 
1094  | 
                # yes, this is not suitable for adding with ghosts.
 | 
|
| 
3380.1.6
by Aaron Bentley
 Ensure fetching munges sha1s  | 
1095  | 
rev.inventory_sha1 = self.add_inventory(revision_id, inv,  | 
| 
3305.1.1
by Jelmer Vernooij
 Make sure that specifying the inv= argument to add_revision() sets the  | 
1096  | 
rev.parent_ids)  | 
| 
3380.1.6
by Aaron Bentley
 Ensure fetching munges sha1s  | 
1097  | 
else:  | 
| 
3350.8.3
by Robert Collins
 VF.get_sha1s needed changing to be stackable.  | 
1098  | 
key = (revision_id,)  | 
1099  | 
rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1100  | 
self._add_revision(rev)  | 
| 
1570.1.2
by Robert Collins
 Import bzrtools' 'fix' command as 'bzr reconcile.'  | 
1101  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1102  | 
def _add_revision(self, revision):  | 
1103  | 
text = self._serializer.write_revision_to_string(revision)  | 
|
1104  | 
key = (revision.revision_id,)  | 
|
1105  | 
parents = tuple((parent,) for parent in revision.parent_ids)  | 
|
1106  | 
self.revisions.add_lines(key, parents, osutils.split_lines(text))  | 
|
| 
2520.4.10
by Aaron Bentley
 Enable installation of revisions  | 
1107  | 
|
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
1108  | 
def all_revision_ids(self):  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1109  | 
"""Returns a list of all the revision ids in the repository.  | 
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
1110  | 
|
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
1111  | 
        This is conceptually deprecated because code should generally work on
 | 
1112  | 
        the graph reachable from a particular revision, and ignore any other
 | 
|
1113  | 
        revisions that might be present.  There is no direct replacement
 | 
|
1114  | 
        method.
 | 
|
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
1115  | 
        """
 | 
| 
2592.3.114
by Robert Collins
 More evil mutterings.  | 
1116  | 
if 'evil' in debug.debug_flags:  | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1117  | 
mutter_callsite(2, "all_revision_ids is linear with history.")  | 
| 
3221.12.4
by Robert Collins
 Implement basic repository supporting external references.  | 
1118  | 
return self._all_revision_ids()  | 
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
1119  | 
|
1120  | 
def _all_revision_ids(self):  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1121  | 
"""Returns a list of all the revision ids in the repository.  | 
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1122  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1123  | 
        These are in as much topological order as the underlying store can
 | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1124  | 
        present.
 | 
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1125  | 
        """
 | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1126  | 
raise NotImplementedError(self._all_revision_ids)  | 
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1127  | 
|
| 
1687.1.7
by Robert Collins
 Teach Repository about break_lock.  | 
1128  | 
def break_lock(self):  | 
1129  | 
"""Break a lock if one is present from another instance.  | 
|
1130  | 
||
1131  | 
        Uses the ui factory to ask for confirmation if the lock may be from
 | 
|
1132  | 
        an active process.
 | 
|
1133  | 
        """
 | 
|
1134  | 
self.control_files.break_lock()  | 
|
1135  | 
||
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1136  | 
    @needs_read_lock
 | 
1137  | 
def _eliminate_revisions_not_present(self, revision_ids):  | 
|
1138  | 
"""Check every revision id in revision_ids to see if we have it.  | 
|
1139  | 
||
1140  | 
        Returns a set of the present revisions.
 | 
|
1141  | 
        """
 | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1142  | 
result = []  | 
| 
3369.2.1
by John Arbash Meinel
 Knit => knit fetching also has some very bad 'for x in revision_ids: has_revision_id()' calls  | 
1143  | 
graph = self.get_graph()  | 
1144  | 
parent_map = graph.get_parent_map(revision_ids)  | 
|
1145  | 
        # The old API returned a list, should this actually be a set?
 | 
|
1146  | 
return parent_map.keys()  | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1147  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
1148  | 
    @staticmethod
 | 
1149  | 
def create(a_bzrdir):  | 
|
1150  | 
"""Construct the current default format repository in a_bzrdir."""  | 
|
1151  | 
return RepositoryFormat.get_default_format().initialize(a_bzrdir)  | 
|
1152  | 
||
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1153  | 
def __init__(self, _format, a_bzrdir, control_files):  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
1154  | 
"""instantiate a Repository.  | 
1155  | 
||
1156  | 
        :param _format: The format of the repository on disk.
 | 
|
1157  | 
        :param a_bzrdir: The BzrDir of the repository.
 | 
|
1158  | 
||
1159  | 
        In the future we will have a single api for all stores for
 | 
|
1160  | 
        getting file texts, inventories and revisions, then
 | 
|
1161  | 
        this construct will accept instances of those things.
 | 
|
1162  | 
        """
 | 
|
| 
1608.2.1
by Martin Pool
 [merge] Storage filename escaping  | 
1163  | 
super(Repository, self).__init__()  | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
1164  | 
self._format = _format  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
1165  | 
        # the following are part of the public API for Repository:
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
1166  | 
self.bzrdir = a_bzrdir  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
1167  | 
self.control_files = control_files  | 
| 
3407.2.13
by Martin Pool
 Remove indirection through control_files to get transports  | 
1168  | 
self._transport = control_files._transport  | 
| 
3407.2.14
by Martin Pool
 Remove more cases of getting transport via control_files  | 
1169  | 
self.base = self._transport.base  | 
| 
2671.4.2
by Robert Collins
 Review feedback.  | 
1170  | 
        # for tests
 | 
1171  | 
self._reconcile_does_inventory_gc = True  | 
|
| 
2745.6.16
by Aaron Bentley
 Update from review  | 
1172  | 
self._reconcile_fixes_text_parents = False  | 
| 
2951.1.3
by Robert Collins
 Partial support for native reconcile with packs.  | 
1173  | 
self._reconcile_backsup_inventory = True  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1174  | 
        # not right yet - should be more semantically clear ?
 | 
1175  | 
        #
 | 
|
| 
1608.2.1
by Martin Pool
 [merge] Storage filename escaping  | 
1176  | 
        # TODO: make sure to construct the right store classes, etc, depending
 | 
1177  | 
        # on whether escaping is required.
 | 
|
| 
1904.2.3
by Martin Pool
 Give a warning on access to old repository formats  | 
1178  | 
self._warn_if_deprecated()  | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1179  | 
self._write_group = None  | 
| 
3221.12.1
by Robert Collins
 Backport development1 format (stackable packs) to before-shallow-branches.  | 
1180  | 
        # Additional places to query for data.
 | 
1181  | 
self._fallback_repositories = []  | 
|
| 
3882.6.23
by John Arbash Meinel
 Change the XMLSerializer.read_inventory_from_string api.  | 
1182  | 
        # An InventoryEntry cache, used during deserialization
 | 
1183  | 
self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1184  | 
|
| 
1668.1.3
by Martin Pool
 [patch] use the correct transaction when committing snapshot (Malone: #43959)  | 
1185  | 
def __repr__(self):  | 
| 
2592.4.5
by Martin Pool
 Add Repository.base on all repositories.  | 
1186  | 
return '%s(%r)' % (self.__class__.__name__,  | 
1187  | 
self.base)  | 
|
| 
1668.1.3
by Martin Pool
 [patch] use the correct transaction when committing snapshot (Malone: #43959)  | 
1188  | 
|
| 
2671.1.4
by Andrew Bennetts
 Rename is_same_repository to has_same_location, thanks Aaron!  | 
1189  | 
def has_same_location(self, other):  | 
| 
2671.1.3
by Andrew Bennetts
 Remove Repository.__eq__/__ne__ methods, replace with is_same_repository method.  | 
1190  | 
"""Returns a boolean indicating if this repository is at the same  | 
1191  | 
        location as another repository.
 | 
|
1192  | 
||
1193  | 
        This might return False even when two repository objects are accessing
 | 
|
1194  | 
        the same physical repository via different URLs.
 | 
|
1195  | 
        """
 | 
|
| 
2592.3.162
by Robert Collins
 Remove some arbitrary differences from bzr.dev.  | 
1196  | 
if self.__class__ is not other.__class__:  | 
1197  | 
return False  | 
|
| 
3407.2.3
by Martin Pool
 Branch and Repository use their own ._transport rather than going through .control_files  | 
1198  | 
return (self._transport.base == other._transport.base)  | 
| 
2671.1.1
by Andrew Bennetts
 Add support for comparing Repositories with == and != operators.  | 
1199  | 
|
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1200  | 
def is_in_write_group(self):  | 
1201  | 
"""Return True if there is an open write group.  | 
|
1202  | 
||
1203  | 
        :seealso: start_write_group.
 | 
|
1204  | 
        """
 | 
|
1205  | 
return self._write_group is not None  | 
|
1206  | 
||
| 
1694.2.6
by Martin Pool
 [merge] bzr.dev  | 
1207  | 
def is_locked(self):  | 
1208  | 
return self.control_files.is_locked()  | 
|
1209  | 
||
| 
2592.3.188
by Robert Collins
 Allow pack repositories to have multiple writers active at one time, for greater concurrency.  | 
1210  | 
def is_write_locked(self):  | 
1211  | 
"""Return True if this object is write locked."""  | 
|
1212  | 
return self.is_locked() and self.control_files._lock_mode == 'w'  | 
|
1213  | 
||
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1214  | 
def lock_write(self, token=None):  | 
1215  | 
"""Lock this repository for writing.  | 
|
| 
2617.6.8
by Robert Collins
 Review feedback and documentation.  | 
1216  | 
|
1217  | 
        This causes caching within the repository obejct to start accumlating
 | 
|
1218  | 
        data during reads, and allows a 'write_group' to be obtained. Write
 | 
|
1219  | 
        groups must be used for actual data insertion.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1220  | 
|
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1221  | 
        :param token: if this is already locked, then lock_write will fail
 | 
1222  | 
            unless the token matches the existing lock.
 | 
|
1223  | 
        :returns: a token if this instance supports tokens, otherwise None.
 | 
|
1224  | 
        :raises TokenLockingNotSupported: when a token is given but this
 | 
|
1225  | 
            instance doesn't support using token locks.
 | 
|
1226  | 
        :raises MismatchedToken: if the specified token doesn't match the token
 | 
|
1227  | 
            of the existing lock.
 | 
|
| 
2617.6.8
by Robert Collins
 Review feedback and documentation.  | 
1228  | 
        :seealso: start_write_group.
 | 
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1229  | 
|
| 
2018.5.145
by Andrew Bennetts
 Add a brief explanation of what tokens are used for to lock_write docstrings.  | 
1230  | 
        A token should be passed in if you know that you have locked the object
 | 
1231  | 
        some other way, and need to synchronise this object's state with that
 | 
|
1232  | 
        fact.
 | 
|
1233  | 
||
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1234  | 
        XXX: this docstring is duplicated in many places, e.g. lockable_files.py
 | 
1235  | 
        """
 | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1236  | 
locked = self.is_locked()  | 
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1237  | 
result = self.control_files.lock_write(token=token)  | 
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1238  | 
if not locked:  | 
| 
4379.2.1
by John Arbash Meinel
 Change the fallback repository code to only lock/unlock on transition.  | 
1239  | 
for repo in self._fallback_repositories:  | 
1240  | 
                # Writes don't affect fallback repos
 | 
|
1241  | 
repo.lock_read()  | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1242  | 
self._refresh_data()  | 
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1243  | 
return result  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1244  | 
|
1245  | 
def lock_read(self):  | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1246  | 
locked = self.is_locked()  | 
| 
1553.5.55
by Martin Pool
 [revert] broken changes  | 
1247  | 
self.control_files.lock_read()  | 
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1248  | 
if not locked:  | 
| 
4379.2.1
by John Arbash Meinel
 Change the fallback repository code to only lock/unlock on transition.  | 
1249  | 
for repo in self._fallback_repositories:  | 
1250  | 
repo.lock_read()  | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1251  | 
self._refresh_data()  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1252  | 
|
| 
1694.2.6
by Martin Pool
 [merge] bzr.dev  | 
1253  | 
def get_physical_lock_status(self):  | 
1254  | 
return self.control_files.get_physical_lock_status()  | 
|
| 
1624.3.36
by Olaf Conradi
 Rename is_transport_locked() to get_physical_lock_status() as the  | 
1255  | 
|
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1256  | 
def leave_lock_in_place(self):  | 
1257  | 
"""Tell this repository not to release the physical lock when this  | 
|
1258  | 
        object is unlocked.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1259  | 
|
| 
2018.5.76
by Andrew Bennetts
 Testing that repository.{dont_,}leave_lock_in_place raises NotImplementedError if lock_write returns None.  | 
1260  | 
        If lock_write doesn't return a token, then this method is not supported.
 | 
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1261  | 
        """
 | 
1262  | 
self.control_files.leave_in_place()  | 
|
1263  | 
||
1264  | 
def dont_leave_lock_in_place(self):  | 
|
1265  | 
"""Tell this repository to release the physical lock when this  | 
|
1266  | 
        object is unlocked, even if it didn't originally acquire it.
 | 
|
| 
2018.5.76
by Andrew Bennetts
 Testing that repository.{dont_,}leave_lock_in_place raises NotImplementedError if lock_write returns None.  | 
1267  | 
|
1268  | 
        If lock_write doesn't return a token, then this method is not supported.
 | 
|
| 
2018.5.75
by Andrew Bennetts
 Add Repository.{dont_,}leave_lock_in_place.  | 
1269  | 
        """
 | 
1270  | 
self.control_files.dont_leave_in_place()  | 
|
1271  | 
||
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1272  | 
    @needs_read_lock
 | 
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1273  | 
def gather_stats(self, revid=None, committers=None):  | 
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1274  | 
"""Gather statistics from a revision id.  | 
1275  | 
||
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1276  | 
        :param revid: The revision id to gather statistics from, if None, then
 | 
1277  | 
            no revision specific statistics are gathered.
 | 
|
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1278  | 
        :param committers: Optional parameter controlling whether to grab
 | 
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1279  | 
            a count of committers from the revision specific statistics.
 | 
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1280  | 
        :return: A dictionary of statistics. Currently this contains:
 | 
1281  | 
            committers: The number of committers if requested.
 | 
|
1282  | 
            firstrev: A tuple with timestamp, timezone for the penultimate left
 | 
|
1283  | 
                most ancestor of revid, if revid is not the NULL_REVISION.
 | 
|
1284  | 
            latestrev: A tuple with timestamp, timezone for revid, if revid is
 | 
|
1285  | 
                not the NULL_REVISION.
 | 
|
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1286  | 
            revisions: The total revision count in the repository.
 | 
1287  | 
            size: An estimate disk size of the repository in bytes.
 | 
|
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1288  | 
        """
 | 
1289  | 
result = {}  | 
|
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1290  | 
if revid and committers:  | 
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1291  | 
result['committers'] = 0  | 
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1292  | 
if revid and revid != _mod_revision.NULL_REVISION:  | 
1293  | 
if committers:  | 
|
1294  | 
all_committers = set()  | 
|
1295  | 
revisions = self.get_ancestry(revid)  | 
|
1296  | 
            # pop the leading None
 | 
|
1297  | 
revisions.pop(0)  | 
|
1298  | 
first_revision = None  | 
|
1299  | 
if not committers:  | 
|
1300  | 
                # ignore the revisions in the middle - just grab first and last
 | 
|
1301  | 
revisions = revisions[0], revisions[-1]  | 
|
1302  | 
for revision in self.get_revisions(revisions):  | 
|
1303  | 
if not first_revision:  | 
|
1304  | 
first_revision = revision  | 
|
1305  | 
if committers:  | 
|
1306  | 
all_committers.add(revision.committer)  | 
|
1307  | 
last_revision = revision  | 
|
1308  | 
if committers:  | 
|
1309  | 
result['committers'] = len(all_committers)  | 
|
1310  | 
result['firstrev'] = (first_revision.timestamp,  | 
|
1311  | 
first_revision.timezone)  | 
|
1312  | 
result['latestrev'] = (last_revision.timestamp,  | 
|
1313  | 
last_revision.timezone)  | 
|
1314  | 
||
1315  | 
        # now gather global repository information
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1316  | 
        # XXX: This is available for many repos regardless of listability.
 | 
| 
2258.1.2
by Robert Collins
 New version of gather_stats which gathers aggregate data too.  | 
1317  | 
if self.bzrdir.root_transport.listable():  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1318  | 
            # XXX: do we want to __define len__() ?
 | 
| 
3350.6.10
by Martin Pool
 VersionedFiles review cleanups  | 
1319  | 
            # Maybe the versionedfiles object should provide a different
 | 
1320  | 
            # method to get the number of keys.
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1321  | 
result['revisions'] = len(self.revisions.keys())  | 
1322  | 
            # result['size'] = t
 | 
|
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1323  | 
return result  | 
1324  | 
||
| 
3140.1.2
by Aaron Bentley
 Add ability to find branches inside repositories  | 
1325  | 
def find_branches(self, using=False):  | 
1326  | 
"""Find branches underneath this repository.  | 
|
1327  | 
||
| 
3140.1.7
by Aaron Bentley
 Update docs  | 
1328  | 
        This will include branches inside other branches.
 | 
1329  | 
||
| 
3140.1.2
by Aaron Bentley
 Add ability to find branches inside repositories  | 
1330  | 
        :param using: If True, list only branches using this repository.
 | 
1331  | 
        """
 | 
|
| 
3140.1.9
by Aaron Bentley
 Optimize find_branches for standalone repositories  | 
1332  | 
if using and not self.is_shared():  | 
1333  | 
try:  | 
|
1334  | 
return [self.bzrdir.open_branch()]  | 
|
1335  | 
except errors.NotBranchError:  | 
|
1336  | 
return []  | 
|
| 
3140.1.2
by Aaron Bentley
 Add ability to find branches inside repositories  | 
1337  | 
class Evaluator(object):  | 
1338  | 
||
1339  | 
def __init__(self):  | 
|
1340  | 
self.first_call = True  | 
|
1341  | 
||
1342  | 
def __call__(self, bzrdir):  | 
|
1343  | 
                # On the first call, the parameter is always the bzrdir
 | 
|
1344  | 
                # containing the current repo.
 | 
|
1345  | 
if not self.first_call:  | 
|
1346  | 
try:  | 
|
1347  | 
repository = bzrdir.open_repository()  | 
|
1348  | 
except errors.NoRepositoryPresent:  | 
|
1349  | 
                        pass
 | 
|
1350  | 
else:  | 
|
1351  | 
return False, (None, repository)  | 
|
1352  | 
self.first_call = False  | 
|
1353  | 
try:  | 
|
1354  | 
value = (bzrdir.open_branch(), None)  | 
|
1355  | 
except errors.NotBranchError:  | 
|
1356  | 
value = (None, None)  | 
|
1357  | 
return True, value  | 
|
1358  | 
||
1359  | 
branches = []  | 
|
1360  | 
for branch, repository in bzrdir.BzrDir.find_bzrdirs(  | 
|
1361  | 
self.bzrdir.root_transport, evaluate=Evaluator()):  | 
|
1362  | 
if branch is not None:  | 
|
1363  | 
branches.append(branch)  | 
|
1364  | 
if not using and repository is not None:  | 
|
1365  | 
branches.extend(repository.find_branches())  | 
|
1366  | 
return branches  | 
|
1367  | 
||
| 
2258.1.1
by Robert Collins
 Move info branch statistics gathering into the repository to allow smart server optimisation (Robert Collins).  | 
1368  | 
    @needs_read_lock
 | 
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
1369  | 
def search_missing_revision_ids(self, other, revision_id=None, find_ghosts=True):  | 
1370  | 
"""Return the revision ids that other has that this does not.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1371  | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
1372  | 
        These are returned in topological order.
 | 
1373  | 
||
1374  | 
        revision_id: only return revision ids included by revision_id.
 | 
|
1375  | 
        """
 | 
|
1376  | 
return InterRepository.get(other, self).search_missing_revision_ids(  | 
|
1377  | 
revision_id, find_ghosts)  | 
|
1378  | 
||
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
1379  | 
    @staticmethod
 | 
1380  | 
def open(base):  | 
|
1381  | 
"""Open the repository rooted at base.  | 
|
1382  | 
||
1383  | 
        For instance, if the repository is at URL/.bzr/repository,
 | 
|
1384  | 
        Repository.open(URL) -> a Repository instance.
 | 
|
1385  | 
        """
 | 
|
| 
1773.4.1
by Martin Pool
 Add pyflakes makefile target; fix many warnings  | 
1386  | 
control = bzrdir.BzrDir.open(base)  | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
1387  | 
return control.open_repository()  | 
1388  | 
||
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
1389  | 
def copy_content_into(self, destination, revision_id=None):  | 
| 
1534.6.6
by Robert Collins
 Move find_repository to bzrdir, its not quite ideal there but its simpler and until someone chooses to vary the search by branch type its completely sufficient.  | 
1390  | 
"""Make a complete copy of the content in self into destination.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1391  | 
|
1392  | 
        This is a destructive operation! Do not use it on existing
 | 
|
| 
1534.6.6
by Robert Collins
 Move find_repository to bzrdir, its not quite ideal there but its simpler and until someone chooses to vary the search by branch type its completely sufficient.  | 
1393  | 
        repositories.
 | 
1394  | 
        """
 | 
|
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
1395  | 
return InterRepository.get(self, destination).copy_content(revision_id)  | 
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1396  | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1397  | 
def commit_write_group(self):  | 
1398  | 
"""Commit the contents accrued within the current write group.  | 
|
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1399  | 
|
1400  | 
        :seealso: start_write_group.
 | 
|
1401  | 
        """
 | 
|
1402  | 
if self._write_group is not self.get_transaction():  | 
|
1403  | 
            # has an unlock or relock occured ?
 | 
|
| 
2592.3.38
by Robert Collins
 All experimental format tests passing again.  | 
1404  | 
raise errors.BzrError('mismatched lock context %r and '  | 
1405  | 
'write group %r.' %  | 
|
1406  | 
(self.get_transaction(), self._write_group))  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
1407  | 
result = self._commit_write_group()  | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1408  | 
self._write_group = None  | 
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
1409  | 
return result  | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1410  | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1411  | 
def _commit_write_group(self):  | 
1412  | 
"""Template method for per-repository write group cleanup.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1413  | 
|
1414  | 
        This is called before the write group is considered to be
 | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1415  | 
        finished and should ensure that all data handed to the repository
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1416  | 
        for writing during the write group is safely committed (to the
 | 
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1417  | 
        extent possible considering file system caching etc).
 | 
1418  | 
        """
 | 
|
1419  | 
||
| 
4002.1.1
by Andrew Bennetts
 Implement suspend_write_group/resume_write_group.  | 
1420  | 
def suspend_write_group(self):  | 
1421  | 
raise errors.UnsuspendableWriteGroup(self)  | 
|
1422  | 
||
| 
4343.3.29
by John Arbash Meinel
 Add 'check_for_missing_texts' flag to get_missing_parent_inv..  | 
1423  | 
def get_missing_parent_inventories(self, check_for_missing_texts=True):  | 
| 
4257.4.6
by Andrew Bennetts
 Make get_missing_parent_inventories work for all repo formats (it's a no-op for unstackable formats).  | 
1424  | 
"""Return the keys of missing inventory parents for revisions added in  | 
1425  | 
        this write group.
 | 
|
1426  | 
||
1427  | 
        A revision is not complete if the inventory delta for that revision
 | 
|
1428  | 
        cannot be calculated.  Therefore if the parent inventories of a
 | 
|
1429  | 
        revision are not present, the revision is incomplete, and e.g. cannot
 | 
|
1430  | 
        be streamed by a smart server.  This method finds missing inventory
 | 
|
1431  | 
        parents for revisions added in this write group.
 | 
|
1432  | 
        """
 | 
|
1433  | 
if not self._format.supports_external_lookups:  | 
|
1434  | 
            # This is only an issue for stacked repositories
 | 
|
1435  | 
return set()  | 
|
| 
4257.4.10
by Andrew Bennetts
 Observe new revisions in _KnitGraphIndex.add_record rather than iterating all the uncommitted packs' indices.  | 
1436  | 
if not self.is_in_write_group():  | 
1437  | 
raise AssertionError('not in a write group')  | 
|
| 
4343.3.1
by John Arbash Meinel
 Set 'supports_external_lookups=True' for dev6 repositories.  | 
1438  | 
|
| 
4257.4.11
by Andrew Bennetts
 Polish the patch.  | 
1439  | 
        # XXX: We assume that every added revision already has its
 | 
1440  | 
        # corresponding inventory, so we only check for parent inventories that
 | 
|
1441  | 
        # might be missing, rather than all inventories.
 | 
|
1442  | 
parents = set(self.revisions._index.get_missing_parents())  | 
|
| 
4257.4.10
by Andrew Bennetts
 Observe new revisions in _KnitGraphIndex.add_record rather than iterating all the uncommitted packs' indices.  | 
1443  | 
parents.discard(_mod_revision.NULL_REVISION)  | 
| 
4257.4.7
by Andrew Bennetts
 Remove a little more cruft  | 
1444  | 
unstacked_inventories = self.inventories._index  | 
| 
4257.4.5
by Andrew Bennetts
 Refactor a little.  | 
1445  | 
present_inventories = unstacked_inventories.get_parent_map(  | 
| 
4257.4.10
by Andrew Bennetts
 Observe new revisions in _KnitGraphIndex.add_record rather than iterating all the uncommitted packs' indices.  | 
1446  | 
key[-1:] for key in parents)  | 
| 
4343.3.28
by John Arbash Meinel
 We only need to return the inventories we don't have.  | 
1447  | 
parents.difference_update(present_inventories)  | 
1448  | 
if len(parents) == 0:  | 
|
| 
4309.1.6
by Andrew Bennetts
 Exit get_missing_parent_inventories early (without checking texts) if there are no missing parent inventories.  | 
1449  | 
            # No missing parent inventories.
 | 
1450  | 
return set()  | 
|
| 
4343.3.29
by John Arbash Meinel
 Add 'check_for_missing_texts' flag to get_missing_parent_inv..  | 
1451  | 
if not check_for_missing_texts:  | 
1452  | 
return set(('inventories', rev_id) for (rev_id,) in parents)  | 
|
| 
4309.1.6
by Andrew Bennetts
 Exit get_missing_parent_inventories early (without checking texts) if there are no missing parent inventories.  | 
1453  | 
        # Ok, now we have a list of missing inventories.  But these only matter
 | 
| 
4309.1.2
by Andrew Bennetts
 Tentative fix for bug 368418: only fail the missing parent inventories check if there are missing texts that appear to be altered by the inventories with missing parents.  | 
1454  | 
        # if the inventories that reference them are missing some texts they
 | 
1455  | 
        # appear to introduce.
 | 
|
| 
4309.1.3
by Andrew Bennetts
 Start testing more cases, and start factoring those tests a little more clearly.  | 
1456  | 
        # XXX: Texts referenced by all added inventories need to be present,
 | 
| 
4309.1.5
by Andrew Bennetts
 Remove lots of cruft.  | 
1457  | 
        # but at the moment we're only checking for texts referenced by
 | 
1458  | 
        # inventories at the graph's edge.
 | 
|
| 
4309.1.6
by Andrew Bennetts
 Exit get_missing_parent_inventories early (without checking texts) if there are no missing parent inventories.  | 
1459  | 
key_deps = self.revisions._index._key_dependencies  | 
1460  | 
key_deps.add_keys(present_inventories)  | 
|
| 
4309.1.3
by Andrew Bennetts
 Start testing more cases, and start factoring those tests a little more clearly.  | 
1461  | 
referrers = frozenset(r[0] for r in key_deps.get_referrers())  | 
1462  | 
file_ids = self.fileids_altered_by_revision_ids(referrers)  | 
|
| 
4309.1.2
by Andrew Bennetts
 Tentative fix for bug 368418: only fail the missing parent inventories check if there are missing texts that appear to be altered by the inventories with missing parents.  | 
1463  | 
missing_texts = set()  | 
1464  | 
for file_id, version_ids in file_ids.iteritems():  | 
|
1465  | 
missing_texts.update(  | 
|
1466  | 
(file_id, version_id) for version_id in version_ids)  | 
|
1467  | 
present_texts = self.texts.get_parent_map(missing_texts)  | 
|
1468  | 
missing_texts.difference_update(present_texts)  | 
|
1469  | 
if not missing_texts:  | 
|
| 
4309.1.6
by Andrew Bennetts
 Exit get_missing_parent_inventories early (without checking texts) if there are no missing parent inventories.  | 
1470  | 
            # No texts are missing, so all revisions and their deltas are
 | 
| 
4309.1.2
by Andrew Bennetts
 Tentative fix for bug 368418: only fail the missing parent inventories check if there are missing texts that appear to be altered by the inventories with missing parents.  | 
1471  | 
            # reconstructable.
 | 
1472  | 
return set()  | 
|
| 
4309.1.5
by Andrew Bennetts
 Remove lots of cruft.  | 
1473  | 
        # Alternatively the text versions could be returned as the missing
 | 
| 
4309.1.3
by Andrew Bennetts
 Start testing more cases, and start factoring those tests a little more clearly.  | 
1474  | 
        # keys, but this is likely to be less data.
 | 
| 
4257.4.10
by Andrew Bennetts
 Observe new revisions in _KnitGraphIndex.add_record rather than iterating all the uncommitted packs' indices.  | 
1475  | 
missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)  | 
1476  | 
return missing_keys  | 
|
| 
4257.4.3
by Andrew Bennetts
 SinkStream.insert_stream checks for missing parent inventories, and reports them as missing_keys.  | 
1477  | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
1478  | 
def refresh_data(self):  | 
1479  | 
"""Re-read any data needed to to synchronise with disk.  | 
|
1480  | 
||
1481  | 
        This method is intended to be called after another repository instance
 | 
|
1482  | 
        (such as one used by a smart server) has inserted data into the
 | 
|
1483  | 
        repository. It may not be called during a write group, but may be
 | 
|
1484  | 
        called at any other time.
 | 
|
1485  | 
        """
 | 
|
1486  | 
if self.is_in_write_group():  | 
|
1487  | 
raise errors.InternalBzrError(  | 
|
1488  | 
"May not refresh_data while in a write group.")  | 
|
1489  | 
self._refresh_data()  | 
|
1490  | 
||
| 
4002.1.1
by Andrew Bennetts
 Implement suspend_write_group/resume_write_group.  | 
1491  | 
def resume_write_group(self, tokens):  | 
1492  | 
if not self.is_write_locked():  | 
|
1493  | 
raise errors.NotWriteLocked(self)  | 
|
1494  | 
if self._write_group:  | 
|
1495  | 
raise errors.BzrError('already in a write group')  | 
|
1496  | 
self._resume_write_group(tokens)  | 
|
1497  | 
        # so we can detect unlock/relock - the write group is now entered.
 | 
|
1498  | 
self._write_group = self.get_transaction()  | 
|
| 
4032.1.1
by John Arbash Meinel
 Merge the removal of all trailing whitespace, and resolve conflicts.  | 
1499  | 
|
| 
4002.1.1
by Andrew Bennetts
 Implement suspend_write_group/resume_write_group.  | 
1500  | 
def _resume_write_group(self, tokens):  | 
1501  | 
raise errors.UnsuspendableWriteGroup(self)  | 
|
1502  | 
||
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
1503  | 
def fetch(self, source, revision_id=None, pb=None, find_ghosts=False,  | 
1504  | 
fetch_spec=None):  | 
|
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1505  | 
"""Fetch the content required to construct revision_id from source.  | 
1506  | 
||
| 
4070.9.14
by Andrew Bennetts
 Tweaks requested by Robert's review.  | 
1507  | 
        If revision_id is None and fetch_spec is None, then all content is
 | 
1508  | 
        copied.
 | 
|
1509  | 
||
| 
4145.1.1
by Robert Collins
 Explicitly prevent fetching while the target repository is in a write group.  | 
1510  | 
        fetch() may not be used when the repository is in a write group -
 | 
1511  | 
        either finish the current write group before using fetch, or use
 | 
|
1512  | 
        fetch before starting the write group.
 | 
|
1513  | 
||
| 
2949.1.1
by Robert Collins
 Change Repository.fetch to provide a find_ghosts parameter which triggers ghost filling.  | 
1514  | 
        :param find_ghosts: Find and copy revisions in the source that are
 | 
1515  | 
            ghosts in the target (and not reachable directly by walking out to
 | 
|
1516  | 
            the first-present revision in target from revision_id).
 | 
|
| 
4070.9.14
by Andrew Bennetts
 Tweaks requested by Robert's review.  | 
1517  | 
        :param revision_id: If specified, all the content needed for this
 | 
1518  | 
            revision ID will be copied to the target.  Fetch will determine for
 | 
|
1519  | 
            itself which content needs to be copied.
 | 
|
1520  | 
        :param fetch_spec: If specified, a SearchResult or
 | 
|
1521  | 
            PendingAncestryResult that describes which revisions to copy.  This
 | 
|
1522  | 
            allows copying multiple heads at once.  Mutually exclusive with
 | 
|
1523  | 
            revision_id.
 | 
|
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1524  | 
        """
 | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
1525  | 
if fetch_spec is not None and revision_id is not None:  | 
1526  | 
raise AssertionError(  | 
|
1527  | 
"fetch_spec and revision_id are mutually exclusive.")  | 
|
| 
4145.1.1
by Robert Collins
 Explicitly prevent fetching while the target repository is in a write group.  | 
1528  | 
if self.is_in_write_group():  | 
| 
4145.1.3
by Robert Collins
 NEWS conflicts.  | 
1529  | 
raise errors.InternalBzrError(  | 
1530  | 
"May not fetch while in a write group.")  | 
|
| 
2592.3.115
by Robert Collins
 Move same repository check up to Repository.fetch to allow all fetch implementations to benefit.  | 
1531  | 
        # fast path same-url fetch operations
 | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
1532  | 
if self.has_same_location(source) and fetch_spec is None:  | 
| 
2592.3.115
by Robert Collins
 Move same repository check up to Repository.fetch to allow all fetch implementations to benefit.  | 
1533  | 
            # check that last_revision is in 'from' and then return a
 | 
1534  | 
            # no-operation.
 | 
|
1535  | 
if (revision_id is not None and  | 
|
1536  | 
not _mod_revision.is_null(revision_id)):  | 
|
1537  | 
self.get_revision(revision_id)  | 
|
1538  | 
return 0, []  | 
|
| 
3582.1.3
by Martin Pool
 Repository.fetch no longer needs to translate NotImplementedErro to IncompatibleRepositories  | 
1539  | 
        # if there is no specific appropriate InterRepository, this will get
 | 
1540  | 
        # the InterRepository base class, which raises an
 | 
|
1541  | 
        # IncompatibleRepositories when asked to fetch.
 | 
|
| 
2323.8.3
by Aaron Bentley
 Reduce scope of try/except, update NEWS  | 
1542  | 
inter = InterRepository.get(source, self)  | 
| 
3582.1.3
by Martin Pool
 Repository.fetch no longer needs to translate NotImplementedErro to IncompatibleRepositories  | 
1543  | 
return inter.fetch(revision_id=revision_id, pb=pb,  | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
1544  | 
find_ghosts=find_ghosts, fetch_spec=fetch_spec)  | 
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1545  | 
|
| 
2520.4.54
by Aaron Bentley
 Hang a create_bundle method off repository  | 
1546  | 
def create_bundle(self, target, base, fileobj, format=None):  | 
1547  | 
return serializer.write_bundle(self, target, base, fileobj, format)  | 
|
1548  | 
||
| 
2803.2.1
by Robert Collins
 * CommitBuilder now advertises itself as requiring the root entry to be  | 
1549  | 
def get_commit_builder(self, branch, parents, config, timestamp=None,  | 
1550  | 
timezone=None, committer=None, revprops=None,  | 
|
| 
1740.3.7
by Jelmer Vernooij
 Move committer, log, revprops, timestamp and timezone to CommitBuilder.  | 
1551  | 
revision_id=None):  | 
1552  | 
"""Obtain a CommitBuilder for this repository.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1553  | 
|
| 
1740.3.7
by Jelmer Vernooij
 Move committer, log, revprops, timestamp and timezone to CommitBuilder.  | 
1554  | 
        :param branch: Branch to commit to.
 | 
1555  | 
        :param parents: Revision ids of the parents of the new revision.
 | 
|
1556  | 
        :param config: Configuration to use.
 | 
|
1557  | 
        :param timestamp: Optional timestamp recorded for commit.
 | 
|
1558  | 
        :param timezone: Optional timezone for timestamp.
 | 
|
1559  | 
        :param committer: Optional committer to set for commit.
 | 
|
1560  | 
        :param revprops: Optional dictionary of revision properties.
 | 
|
1561  | 
        :param revision_id: Optional revision id.
 | 
|
1562  | 
        """
 | 
|
| 
2818.3.2
by Robert Collins
 Review feedback.  | 
1563  | 
result = self._commit_builder_class(self, parents, config,  | 
| 
2592.3.135
by Robert Collins
 Do not create many transient knit objects, saving 4% on commit.  | 
1564  | 
timestamp, timezone, committer, revprops, revision_id)  | 
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1565  | 
self.start_write_group()  | 
1566  | 
return result  | 
|
| 
1740.3.1
by Jelmer Vernooij
 Introduce and use CommitBuilder objects.  | 
1567  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1568  | 
def unlock(self):  | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1569  | 
if (self.control_files._lock_count == 1 and  | 
1570  | 
self.control_files._lock_mode == 'w'):  | 
|
1571  | 
if self._write_group is not None:  | 
|
| 
2592.3.244
by Martin Pool
 unlock while in a write group now aborts the write group, unlocks, and errors.  | 
1572  | 
self.abort_write_group()  | 
1573  | 
self.control_files.unlock()  | 
|
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1574  | 
raise errors.BzrError(  | 
1575  | 
'Must end write groups before releasing write locks.')  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1576  | 
self.control_files.unlock()  | 
| 
3882.6.23
by John Arbash Meinel
 Change the XMLSerializer.read_inventory_from_string api.  | 
1577  | 
if self.control_files._lock_count == 0:  | 
1578  | 
self._inventory_entry_cache.clear()  | 
|
| 
4379.2.1
by John Arbash Meinel
 Change the fallback repository code to only lock/unlock on transition.  | 
1579  | 
for repo in self._fallback_repositories:  | 
1580  | 
repo.unlock()  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1581  | 
|
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
1582  | 
    @needs_read_lock
 | 
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
1583  | 
def clone(self, a_bzrdir, revision_id=None):  | 
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1584  | 
"""Clone this repository into a_bzrdir using the current format.  | 
1585  | 
||
1586  | 
        Currently no check is made that the format of this repository and
 | 
|
1587  | 
        the bzrdir format are compatible. FIXME RBC 20060201.
 | 
|
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
1588  | 
|
1589  | 
        :return: The newly created destination repository.
 | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1590  | 
        """
 | 
| 
2440.1.1
by Martin Pool
 Add new Repository.sprout,  | 
1591  | 
        # TODO: deprecate after 0.16; cloning this with all its settings is
 | 
1592  | 
        # probably not very useful -- mbp 20070423
 | 
|
1593  | 
dest_repo = self._create_sprouting_repo(a_bzrdir, shared=self.is_shared())  | 
|
1594  | 
self.copy_content_into(dest_repo, revision_id)  | 
|
1595  | 
return dest_repo  | 
|
1596  | 
||
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1597  | 
def start_write_group(self):  | 
1598  | 
"""Start a write group in the repository.  | 
|
1599  | 
||
1600  | 
        Write groups are used by repositories which do not have a 1:1 mapping
 | 
|
1601  | 
        between file ids and backend store to manage the insertion of data from
 | 
|
1602  | 
        both fetch and commit operations.
 | 
|
1603  | 
||
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1604  | 
        A write lock is required around the start_write_group/commit_write_group
 | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1605  | 
        for the support of lock-requiring repository formats.
 | 
| 
2617.6.8
by Robert Collins
 Review feedback and documentation.  | 
1606  | 
|
1607  | 
        One can only insert data into a repository inside a write group.
 | 
|
1608  | 
||
| 
2617.6.6
by Robert Collins
 Some review feedback.  | 
1609  | 
        :return: None.
 | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1610  | 
        """
 | 
| 
2592.3.188
by Robert Collins
 Allow pack repositories to have multiple writers active at one time, for greater concurrency.  | 
1611  | 
if not self.is_write_locked():  | 
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1612  | 
raise errors.NotWriteLocked(self)  | 
1613  | 
if self._write_group:  | 
|
1614  | 
raise errors.BzrError('already in a write group')  | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1615  | 
self._start_write_group()  | 
1616  | 
        # so we can detect unlock/relock - the write group is now entered.
 | 
|
| 
2617.6.1
by Robert Collins
 * New method on Repository - ``start_write_group``, ``end_write_group``  | 
1617  | 
self._write_group = self.get_transaction()  | 
1618  | 
||
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1619  | 
def _start_write_group(self):  | 
1620  | 
"""Template method for per-repository write group startup.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1621  | 
|
1622  | 
        This is called before the write group is considered to be
 | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
1623  | 
        entered.
 | 
1624  | 
        """
 | 
|
1625  | 
||
| 
2440.1.1
by Martin Pool
 Add new Repository.sprout,  | 
1626  | 
    @needs_read_lock
 | 
1627  | 
def sprout(self, to_bzrdir, revision_id=None):  | 
|
1628  | 
"""Create a descendent repository for new development.  | 
|
1629  | 
||
1630  | 
        Unlike clone, this does not copy the settings of the repository.
 | 
|
1631  | 
        """
 | 
|
1632  | 
dest_repo = self._create_sprouting_repo(to_bzrdir, shared=False)  | 
|
1633  | 
dest_repo.fetch(self, revision_id=revision_id)  | 
|
1634  | 
return dest_repo  | 
|
1635  | 
||
1636  | 
def _create_sprouting_repo(self, a_bzrdir, shared):  | 
|
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1637  | 
if not isinstance(a_bzrdir._format, self.bzrdir._format.__class__):  | 
1638  | 
            # use target default format.
 | 
|
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
1639  | 
dest_repo = a_bzrdir.create_repository()  | 
| 
1534.4.50
by Robert Collins
 Got the bzrdir api straightened out, plenty of refactoring to use it pending, but the api is up and running.  | 
1640  | 
else:  | 
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
1641  | 
            # Most control formats need the repository to be specifically
 | 
1642  | 
            # created, but on some old all-in-one formats it's not needed
 | 
|
1643  | 
try:  | 
|
| 
2440.1.1
by Martin Pool
 Add new Repository.sprout,  | 
1644  | 
dest_repo = self._format.initialize(a_bzrdir, shared=shared)  | 
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
1645  | 
except errors.UninitializableFormat:  | 
1646  | 
dest_repo = a_bzrdir.open_repository()  | 
|
1647  | 
return dest_repo  | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1648  | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
1649  | 
def _get_sink(self):  | 
1650  | 
"""Return a sink for streaming into this repository."""  | 
|
1651  | 
return StreamSink(self)  | 
|
1652  | 
||
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
1653  | 
def _get_source(self, to_format):  | 
1654  | 
"""Return a source for streaming from this repository."""  | 
|
1655  | 
return StreamSource(self, to_format)  | 
|
1656  | 
||
| 
1563.2.22
by Robert Collins
 Move responsibility for repository.has_revision into RevisionStore  | 
1657  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1658  | 
def has_revision(self, revision_id):  | 
| 
1563.2.22
by Robert Collins
 Move responsibility for repository.has_revision into RevisionStore  | 
1659  | 
"""True if this repository has a copy of the revision."""  | 
| 
3172.3.1
by Robert Collins
 Repository has a new method ``has_revisions`` which signals the presence  | 
1660  | 
return revision_id in self.has_revisions((revision_id,))  | 
1661  | 
||
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1662  | 
    @needs_read_lock
 | 
| 
3172.3.1
by Robert Collins
 Repository has a new method ``has_revisions`` which signals the presence  | 
1663  | 
def has_revisions(self, revision_ids):  | 
1664  | 
"""Probe to find out the presence of multiple revisions.  | 
|
1665  | 
||
1666  | 
        :param revision_ids: An iterable of revision_ids.
 | 
|
1667  | 
        :return: A set of the revision_ids that were present.
 | 
|
1668  | 
        """
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1669  | 
parent_map = self.revisions.get_parent_map(  | 
1670  | 
[(rev_id,) for rev_id in revision_ids])  | 
|
1671  | 
result = set()  | 
|
1672  | 
if _mod_revision.NULL_REVISION in revision_ids:  | 
|
1673  | 
result.add(_mod_revision.NULL_REVISION)  | 
|
1674  | 
result.update([key[0] for key in parent_map])  | 
|
1675  | 
return result  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1676  | 
|
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
1677  | 
    @needs_read_lock
 | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1678  | 
def get_revision(self, revision_id):  | 
1679  | 
"""Return the Revision object for a named revision."""  | 
|
1680  | 
return self.get_revisions([revision_id])[0]  | 
|
1681  | 
||
1682  | 
    @needs_read_lock
 | 
|
| 
1570.1.13
by Robert Collins
 Check for incorrect revision parentage in the weave during revision access.  | 
1683  | 
def get_revision_reconcile(self, revision_id):  | 
1684  | 
"""'reconcile' helper routine that allows access to a revision always.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1685  | 
|
| 
1570.1.13
by Robert Collins
 Check for incorrect revision parentage in the weave during revision access.  | 
1686  | 
        This variant of get_revision does not cross check the weave graph
 | 
1687  | 
        against the revision one as get_revision does: but it should only
 | 
|
1688  | 
        be used by reconcile, or reconcile-alike commands that are correcting
 | 
|
1689  | 
        or testing the revision graph.
 | 
|
1690  | 
        """
 | 
|
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1691  | 
return self._get_revisions([revision_id])[0]  | 
| 
2249.5.13
by John Arbash Meinel
 Finish auditing Repository, and fix generate_ids to always generate utf8 ids.  | 
1692  | 
|
| 
1756.1.2
by Aaron Bentley
 Show logs using get_revisions  | 
1693  | 
    @needs_read_lock
 | 
1694  | 
def get_revisions(self, revision_ids):  | 
|
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
1695  | 
"""Get many revisions at once."""  | 
1696  | 
return self._get_revisions(revision_ids)  | 
|
1697  | 
||
1698  | 
    @needs_read_lock
 | 
|
1699  | 
def _get_revisions(self, revision_ids):  | 
|
1700  | 
"""Core work logic to get many revisions without sanity checks."""  | 
|
1701  | 
for rev_id in revision_ids:  | 
|
1702  | 
if not rev_id or not isinstance(rev_id, basestring):  | 
|
1703  | 
raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1704  | 
keys = [(key,) for key in revision_ids]  | 
1705  | 
stream = self.revisions.get_record_stream(keys, 'unordered', True)  | 
|
1706  | 
revs = {}  | 
|
1707  | 
for record in stream:  | 
|
1708  | 
if record.storage_kind == 'absent':  | 
|
1709  | 
raise errors.NoSuchRevision(self, record.key[0])  | 
|
1710  | 
text = record.get_bytes_as('fulltext')  | 
|
1711  | 
rev = self._serializer.read_revision_from_string(text)  | 
|
1712  | 
revs[record.key[0]] = rev  | 
|
1713  | 
return [revs[revid] for revid in revision_ids]  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1714  | 
|
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
1715  | 
    @needs_read_lock
 | 
| 
1563.2.29
by Robert Collins
 Remove all but fetch references to repository.revision_store.  | 
1716  | 
def get_revision_xml(self, revision_id):  | 
| 
4232.2.1
by Vincent Ladeuil
 Stop-gap fix for Repository.get_revision_xml.  | 
1717  | 
        # TODO: jam 20070210 This shouldn't be necessary since get_revision
 | 
1718  | 
        #       would have already do it.
 | 
|
1719  | 
        # TODO: jam 20070210 Just use _serializer.write_revision_to_string()
 | 
|
1720  | 
        # TODO: this can't just be replaced by:
 | 
|
1721  | 
        # return self._serializer.write_revision_to_string(
 | 
|
1722  | 
        #     self.get_revision(revision_id))
 | 
|
1723  | 
        # as cStringIO preservers the encoding unlike write_revision_to_string
 | 
|
1724  | 
        # or some other call down the path.
 | 
|
1725  | 
rev = self.get_revision(revision_id)  | 
|
1726  | 
rev_tmp = cStringIO.StringIO()  | 
|
1727  | 
        # the current serializer..
 | 
|
1728  | 
self._serializer.write_revision(rev, rev_tmp)  | 
|
1729  | 
rev_tmp.seek(0)  | 
|
1730  | 
return rev_tmp.getvalue()  | 
|
| 
1563.2.29
by Robert Collins
 Remove all but fetch references to repository.revision_store.  | 
1731  | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1732  | 
def get_deltas_for_revisions(self, revisions, specific_fileids=None):  | 
| 
1756.3.19
by Aaron Bentley
 Documentation and cleanups  | 
1733  | 
"""Produce a generator of revision deltas.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1734  | 
|
| 
1756.3.19
by Aaron Bentley
 Documentation and cleanups  | 
1735  | 
        Note that the input is a sequence of REVISIONS, not revision_ids.
 | 
1736  | 
        Trees will be held in memory until the generator exits.
 | 
|
1737  | 
        Each delta is relative to the revision's lefthand predecessor.
 | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1738  | 
|
1739  | 
        :param specific_fileids: if not None, the result is filtered
 | 
|
1740  | 
          so that only those file-ids, their parents and their
 | 
|
1741  | 
          children are included.
 | 
|
| 
1756.3.19
by Aaron Bentley
 Documentation and cleanups  | 
1742  | 
        """
 | 
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1743  | 
        # Get the revision-ids of interest
 | 
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
1744  | 
required_trees = set()  | 
1745  | 
for revision in revisions:  | 
|
1746  | 
required_trees.add(revision.revision_id)  | 
|
1747  | 
required_trees.update(revision.parent_ids[:1])  | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1748  | 
|
1749  | 
        # Get the matching filtered trees. Note that it's more
 | 
|
1750  | 
        # efficient to pass filtered trees to changes_from() rather
 | 
|
1751  | 
        # than doing the filtering afterwards. changes_from() could
 | 
|
1752  | 
        # arguably do the filtering itself but it's path-based, not
 | 
|
1753  | 
        # file-id based, so filtering before or afterwards is
 | 
|
1754  | 
        # currently easier.
 | 
|
1755  | 
if specific_fileids is None:  | 
|
1756  | 
trees = dict((t.get_revision_id(), t) for  | 
|
1757  | 
t in self.revision_trees(required_trees))  | 
|
1758  | 
else:  | 
|
1759  | 
trees = dict((t.get_revision_id(), t) for  | 
|
1760  | 
t in self._filtered_revision_trees(required_trees,  | 
|
1761  | 
specific_fileids))  | 
|
1762  | 
||
1763  | 
        # Calculate the deltas
 | 
|
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
1764  | 
for revision in revisions:  | 
1765  | 
if not revision.parent_ids:  | 
|
| 
3668.5.1
by Jelmer Vernooij
 Use NULL_REVISION rather than None for Repository.revision_tree().  | 
1766  | 
old_tree = self.revision_tree(_mod_revision.NULL_REVISION)  | 
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
1767  | 
else:  | 
1768  | 
old_tree = trees[revision.parent_ids[0]]  | 
|
| 
1852.10.3
by Robert Collins
 Remove all uses of compare_trees and replace with Tree.changes_from throughout bzrlib.  | 
1769  | 
yield trees[revision.revision_id].changes_from(old_tree)  | 
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
1770  | 
|
| 
1756.3.19
by Aaron Bentley
 Documentation and cleanups  | 
1771  | 
    @needs_read_lock
 | 
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1772  | 
def get_revision_delta(self, revision_id, specific_fileids=None):  | 
| 
1744.2.2
by Johan Rydberg
 Add get_revision_delta to Repository; and make Branch.get_revision_delta use it.  | 
1773  | 
"""Return the delta for one revision.  | 
1774  | 
||
1775  | 
        The delta is relative to the left-hand predecessor of the
 | 
|
1776  | 
        revision.
 | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1777  | 
|
1778  | 
        :param specific_fileids: if not None, the result is filtered
 | 
|
1779  | 
          so that only those file-ids, their parents and their
 | 
|
1780  | 
          children are included.
 | 
|
| 
1744.2.2
by Johan Rydberg
 Add get_revision_delta to Repository; and make Branch.get_revision_delta use it.  | 
1781  | 
        """
 | 
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
1782  | 
r = self.get_revision(revision_id)  | 
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
1783  | 
return list(self.get_deltas_for_revisions([r],  | 
1784  | 
specific_fileids=specific_fileids))[0]  | 
|
| 
1744.2.2
by Johan Rydberg
 Add get_revision_delta to Repository; and make Branch.get_revision_delta use it.  | 
1785  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1786  | 
    @needs_write_lock
 | 
1787  | 
def store_revision_signature(self, gpg_strategy, plaintext, revision_id):  | 
|
| 
1563.2.29
by Robert Collins
 Remove all but fetch references to repository.revision_store.  | 
1788  | 
signature = gpg_strategy.sign(plaintext)  | 
| 
2996.2.4
by Aaron Bentley
 Rename function to add_signature_text  | 
1789  | 
self.add_signature_text(revision_id, signature)  | 
| 
2996.2.3
by Aaron Bentley
 Add tests for install_revisions and add_signature  | 
1790  | 
|
1791  | 
    @needs_write_lock
 | 
|
| 
2996.2.4
by Aaron Bentley
 Rename function to add_signature_text  | 
1792  | 
def add_signature_text(self, revision_id, signature):  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1793  | 
self.signatures.add_lines((revision_id,), (),  | 
1794  | 
osutils.split_lines(signature))  | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
1795  | 
|
| 
2988.1.2
by Robert Collins
 New Repository API find_text_key_references for use by reconcile and check.  | 
1796  | 
def find_text_key_references(self):  | 
1797  | 
"""Find the text key references within the repository.  | 
|
1798  | 
||
1799  | 
        :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
 | 
|
1800  | 
            to whether they were referred to by the inventory of the
 | 
|
1801  | 
            revision_id that they contain. The inventory texts from all present
 | 
|
1802  | 
            revision ids are assessed to generate this report.
 | 
|
1803  | 
        """
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1804  | 
revision_keys = self.revisions.keys()  | 
1805  | 
w = self.inventories  | 
|
| 
2988.1.2
by Robert Collins
 New Repository API find_text_key_references for use by reconcile and check.  | 
1806  | 
pb = ui.ui_factory.nested_progress_bar()  | 
1807  | 
try:  | 
|
1808  | 
return self._find_text_key_references_from_xml_inventory_lines(  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1809  | 
w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))  | 
| 
2988.1.2
by Robert Collins
 New Repository API find_text_key_references for use by reconcile and check.  | 
1810  | 
finally:  | 
1811  | 
pb.finished()  | 
|
1812  | 
||
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1813  | 
def _find_text_key_references_from_xml_inventory_lines(self,  | 
1814  | 
line_iterator):  | 
|
1815  | 
"""Core routine for extracting references to texts from inventories.  | 
|
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1816  | 
|
1817  | 
        This performs the translation of xml lines to revision ids.
 | 
|
1818  | 
||
| 
2975.3.1
by Robert Collins
 Change (without backwards compatibility) the  | 
1819  | 
        :param line_iterator: An iterator of lines, origin_version_id
 | 
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1820  | 
        :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
 | 
1821  | 
            to whether they were referred to by the inventory of the
 | 
|
1822  | 
            revision_id that they contain. Note that if that revision_id was
 | 
|
1823  | 
            not part of the line_iterator's output then False will be given -
 | 
|
1824  | 
            even though it may actually refer to that key.
 | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1825  | 
        """
 | 
| 
2988.2.2
by Robert Collins
 Review feedback.  | 
1826  | 
if not self._serializer.support_altered_by_hack:  | 
1827  | 
raise AssertionError(  | 
|
1828  | 
                "_find_text_key_references_from_xml_inventory_lines only "
 | 
|
1829  | 
                "supported for branches which store inventory as unnested xml"
 | 
|
1830  | 
", not on %r" % self)  | 
|
| 
1694.2.6
by Martin Pool
 [merge] bzr.dev  | 
1831  | 
result = {}  | 
| 
1563.2.35
by Robert Collins
 cleanup deprecation warnings and finish conversion so the inventory is knit based too.  | 
1832  | 
|
| 
1694.2.6
by Martin Pool
 [merge] bzr.dev  | 
1833  | 
        # this code needs to read every new line in every inventory for the
 | 
1834  | 
        # inventories [revision_ids]. Seeing a line twice is ok. Seeing a line
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1835  | 
        # not present in one of those inventories is unnecessary but not
 | 
| 
1594.2.6
by Robert Collins
 Introduce a api specifically for looking at lines in some versions of the inventory, for fileid_involved.  | 
1836  | 
        # harmful because we are filtering by the revision id marker in the
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
1837  | 
        # inventory lines : we only select file ids altered in one of those
 | 
| 
1759.2.2
by Jelmer Vernooij
 Revert some of my spelling fixes and fix some typos after review by Aaron.  | 
1838  | 
        # revisions. We don't need to see all lines in the inventory because
 | 
| 
1594.2.6
by Robert Collins
 Introduce a api specifically for looking at lines in some versions of the inventory, for fileid_involved.  | 
1839  | 
        # only those added in an inventory in rev X can contain a revision=X
 | 
1840  | 
        # line.
 | 
|
| 
2163.2.3
by John Arbash Meinel
 Change to local variables to save another 300ms  | 
1841  | 
unescape_revid_cache = {}  | 
1842  | 
unescape_fileid_cache = {}  | 
|
1843  | 
||
| 
2163.2.5
by John Arbash Meinel
 Inline the cache lookup, and explain why  | 
1844  | 
        # jam 20061218 In a big fetch, this handles hundreds of thousands
 | 
1845  | 
        # of lines, so it has had a lot of inlining and optimizing done.
 | 
|
1846  | 
        # Sorry that it is a little bit messy.
 | 
|
| 
2163.2.3
by John Arbash Meinel
 Change to local variables to save another 300ms  | 
1847  | 
        # Move several functions to be local variables, since this is a long
 | 
1848  | 
        # running loop.
 | 
|
1849  | 
search = self._file_ids_altered_regex.search  | 
|
| 
2163.2.5
by John Arbash Meinel
 Inline the cache lookup, and explain why  | 
1850  | 
unescape = _unescape_xml  | 
| 
2163.2.3
by John Arbash Meinel
 Change to local variables to save another 300ms  | 
1851  | 
setdefault = result.setdefault  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1852  | 
for line, line_key in line_iterator:  | 
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1853  | 
match = search(line)  | 
1854  | 
if match is None:  | 
|
1855  | 
                continue
 | 
|
1856  | 
            # One call to match.group() returning multiple items is quite a
 | 
|
1857  | 
            # bit faster than 2 calls to match.group() each returning 1
 | 
|
1858  | 
file_id, revision_id = match.group('file_id', 'revision_id')  | 
|
1859  | 
||
1860  | 
            # Inlining the cache lookups helps a lot when you make 170,000
 | 
|
1861  | 
            # lines and 350k ids, versus 8.4 unique ids.
 | 
|
1862  | 
            # Using a cache helps in 2 ways:
 | 
|
1863  | 
            #   1) Avoids unnecessary decoding calls
 | 
|
1864  | 
            #   2) Re-uses cached strings, which helps in future set and
 | 
|
1865  | 
            #      equality checks.
 | 
|
1866  | 
            # (2) is enough that removing encoding entirely along with
 | 
|
1867  | 
            # the cache (so we are using plain strings) results in no
 | 
|
1868  | 
            # performance improvement.
 | 
|
1869  | 
try:  | 
|
1870  | 
revision_id = unescape_revid_cache[revision_id]  | 
|
1871  | 
except KeyError:  | 
|
1872  | 
unescaped = unescape(revision_id)  | 
|
1873  | 
unescape_revid_cache[revision_id] = unescaped  | 
|
1874  | 
revision_id = unescaped  | 
|
1875  | 
||
| 
2988.2.2
by Robert Collins
 Review feedback.  | 
1876  | 
            # Note that unconditionally unescaping means that we deserialise
 | 
1877  | 
            # every fileid, which for general 'pull' is not great, but we don't
 | 
|
1878  | 
            # really want to have some many fulltexts that this matters anyway.
 | 
|
1879  | 
            # RBC 20071114.
 | 
|
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1880  | 
try:  | 
1881  | 
file_id = unescape_fileid_cache[file_id]  | 
|
1882  | 
except KeyError:  | 
|
1883  | 
unescaped = unescape(file_id)  | 
|
1884  | 
unescape_fileid_cache[file_id] = unescaped  | 
|
1885  | 
file_id = unescaped  | 
|
1886  | 
||
1887  | 
key = (file_id, revision_id)  | 
|
1888  | 
setdefault(key, False)  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1889  | 
if revision_id == line_key[-1]:  | 
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1890  | 
result[key] = True  | 
1891  | 
return result  | 
|
1892  | 
||
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1893  | 
def _inventory_xml_lines_for_keys(self, keys):  | 
1894  | 
"""Get a line iterator of the sort needed for findind references.  | 
|
1895  | 
||
1896  | 
        Not relevant for non-xml inventory repositories.
 | 
|
1897  | 
||
1898  | 
        Ghosts in revision_keys are ignored.
 | 
|
1899  | 
||
1900  | 
        :param revision_keys: The revision keys for the inventories to inspect.
 | 
|
1901  | 
        :return: An iterator over (inventory line, revid) for the fulltexts of
 | 
|
1902  | 
            all of the xml inventories specified by revision_keys.
 | 
|
1903  | 
        """
 | 
|
1904  | 
stream = self.inventories.get_record_stream(keys, 'unordered', True)  | 
|
1905  | 
for record in stream:  | 
|
1906  | 
if record.storage_kind != 'absent':  | 
|
1907  | 
chunks = record.get_bytes_as('chunked')  | 
|
| 
3735.2.136
by Robert Collins
 Fix typo  | 
1908  | 
revid = record.key[-1]  | 
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1909  | 
lines = osutils.chunks_to_lines(chunks)  | 
1910  | 
for line in lines:  | 
|
1911  | 
yield line, revid  | 
|
1912  | 
||
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1913  | 
def _find_file_ids_from_xml_inventory_lines(self, line_iterator,  | 
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1914  | 
revision_keys):  | 
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1915  | 
"""Helper routine for fileids_altered_by_revision_ids.  | 
1916  | 
||
1917  | 
        This performs the translation of xml lines to revision ids.
 | 
|
1918  | 
||
1919  | 
        :param line_iterator: An iterator of lines, origin_version_id
 | 
|
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1920  | 
        :param revision_keys: The revision ids to filter for. This should be a
 | 
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1921  | 
            set or other type which supports efficient __contains__ lookups, as
 | 
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1922  | 
            the revision key from each parsed line will be looked up in the
 | 
1923  | 
            revision_keys filter.
 | 
|
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1924  | 
        :return: a dictionary mapping altered file-ids to an iterable of
 | 
1925  | 
        revision_ids. Each altered file-ids has the exact revision_ids that
 | 
|
1926  | 
        altered it listed explicitly.
 | 
|
1927  | 
        """
 | 
|
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1928  | 
seen = set(self._find_text_key_references_from_xml_inventory_lines(  | 
1929  | 
line_iterator).iterkeys())  | 
|
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1930  | 
parent_keys = self._find_parent_keys_of_revisions(revision_keys)  | 
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1931  | 
parent_seen = set(self._find_text_key_references_from_xml_inventory_lines(  | 
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1932  | 
self._inventory_xml_lines_for_keys(parent_keys)))  | 
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1933  | 
new_keys = seen - parent_seen  | 
| 
2988.1.1
by Robert Collins
 Refactor fetch's xml inventory parsing into a core routine that extracts the data and a separate one that filters for fetch.  | 
1934  | 
result = {}  | 
1935  | 
setdefault = result.setdefault  | 
|
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
1936  | 
for key in new_keys:  | 
1937  | 
setdefault(key[0], set()).add(key[-1])  | 
|
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1938  | 
return result  | 
1939  | 
||
| 
4360.4.10
by John Arbash Meinel
 Remove some of the code duplication.  | 
1940  | 
def _find_parent_ids_of_revisions(self, revision_ids):  | 
1941  | 
"""Find all parent ids that are mentioned in the revision graph.  | 
|
1942  | 
||
1943  | 
        :return: set of revisions that are parents of revision_ids which are
 | 
|
1944  | 
            not part of revision_ids themselves
 | 
|
1945  | 
        """
 | 
|
1946  | 
parent_map = self.get_parent_map(revision_ids)  | 
|
| 
4360.4.12
by John Arbash Meinel
 Work out some issues with revision_ids vs revision_keys.  | 
1947  | 
parent_ids = set()  | 
1948  | 
map(parent_ids.update, parent_map.itervalues())  | 
|
1949  | 
parent_ids.difference_update(revision_ids)  | 
|
1950  | 
parent_ids.discard(_mod_revision.NULL_REVISION)  | 
|
1951  | 
return parent_ids  | 
|
1952  | 
||
1953  | 
def _find_parent_keys_of_revisions(self, revision_keys):  | 
|
1954  | 
"""Similar to _find_parent_ids_of_revisions, but used with keys.  | 
|
1955  | 
||
1956  | 
        :param revision_keys: An iterable of revision_keys.
 | 
|
1957  | 
        :return: The parents of all revision_keys that are not already in
 | 
|
1958  | 
            revision_keys
 | 
|
1959  | 
        """
 | 
|
1960  | 
parent_map = self.revisions.get_parent_map(revision_keys)  | 
|
1961  | 
parent_keys = set()  | 
|
1962  | 
map(parent_keys.update, parent_map.itervalues())  | 
|
1963  | 
parent_keys.difference_update(revision_keys)  | 
|
1964  | 
parent_keys.discard(_mod_revision.NULL_REVISION)  | 
|
1965  | 
return parent_keys  | 
|
| 
4360.4.10
by John Arbash Meinel
 Remove some of the code duplication.  | 
1966  | 
|
| 
3422.1.1
by John Arbash Meinel
 merge in bzr-1.5rc1, revert the transaction cache change  | 
1967  | 
def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):  | 
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1968  | 
"""Find the file ids and versions affected by revisions.  | 
1969  | 
||
1970  | 
        :param revisions: an iterable containing revision ids.
 | 
|
| 
3422.1.1
by John Arbash Meinel
 merge in bzr-1.5rc1, revert the transaction cache change  | 
1971  | 
        :param _inv_weave: The inventory weave from this repository or None.
 | 
1972  | 
            If None, the inventory weave will be opened automatically.
 | 
|
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1973  | 
        :return: a dictionary mapping altered file-ids to an iterable of
 | 
1974  | 
        revision_ids. Each altered file-ids has the exact revision_ids that
 | 
|
1975  | 
        altered it listed explicitly.
 | 
|
1976  | 
        """
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
1977  | 
selected_keys = set((revid,) for revid in revision_ids)  | 
1978  | 
w = _inv_weave or self.inventories  | 
|
| 
2039.1.1
by Aaron Bentley
 Clean up progress properly when interrupted during fetch (#54000)  | 
1979  | 
pb = ui.ui_factory.nested_progress_bar()  | 
1980  | 
try:  | 
|
| 
2592.3.110
by Robert Collins
 Filter out texts and signatures not referenced by the revisions being copied during pack to pack fetching.  | 
1981  | 
return self._find_file_ids_from_xml_inventory_lines(  | 
| 
4309.1.5
by Andrew Bennetts
 Remove lots of cruft.  | 
1982  | 
w.iter_lines_added_or_present_in_keys(  | 
1983  | 
selected_keys, pb=pb),  | 
|
1984  | 
selected_keys)  | 
|
| 
2039.1.1
by Aaron Bentley
 Clean up progress properly when interrupted during fetch (#54000)  | 
1985  | 
finally:  | 
1986  | 
pb.finished()  | 
|
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
1987  | 
|
| 
2708.1.7
by Aaron Bentley
 Rename extract_files_bytes to iter_files_bytes  | 
1988  | 
def iter_files_bytes(self, desired_files):  | 
| 
2708.1.9
by Aaron Bentley
 Clean-up docs and imports  | 
1989  | 
"""Iterate through file versions.  | 
1990  | 
||
| 
2708.1.10
by Aaron Bentley
 Update docstrings  | 
1991  | 
        Files will not necessarily be returned in the order they occur in
 | 
1992  | 
        desired_files.  No specific order is guaranteed.
 | 
|
1993  | 
||
| 
2708.1.9
by Aaron Bentley
 Clean-up docs and imports  | 
1994  | 
        Yields pairs of identifier, bytes_iterator.  identifier is an opaque
 | 
| 
2708.1.10
by Aaron Bentley
 Update docstrings  | 
1995  | 
        value supplied by the caller as part of desired_files.  It should
 | 
1996  | 
        uniquely identify the file version in the caller's context.  (Examples:
 | 
|
1997  | 
        an index number or a TreeTransform trans_id.)
 | 
|
1998  | 
||
1999  | 
        bytes_iterator is an iterable of bytestrings for the file.  The
 | 
|
2000  | 
        kind of iterable and length of the bytestrings are unspecified, but for
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2001  | 
        this implementation, it is a list of bytes produced by
 | 
2002  | 
        VersionedFile.get_record_stream().
 | 
|
| 
2708.1.10
by Aaron Bentley
 Update docstrings  | 
2003  | 
|
| 
2708.1.9
by Aaron Bentley
 Clean-up docs and imports  | 
2004  | 
        :param desired_files: a list of (file_id, revision_id, identifier)
 | 
| 
2708.1.10
by Aaron Bentley
 Update docstrings  | 
2005  | 
            triples
 | 
| 
2708.1.9
by Aaron Bentley
 Clean-up docs and imports  | 
2006  | 
        """
 | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2007  | 
text_keys = {}  | 
| 
2708.1.3
by Aaron Bentley
 Implement extract_files_bytes on Repository  | 
2008  | 
for file_id, revision_id, callable_data in desired_files:  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2009  | 
text_keys[(file_id, revision_id)] = callable_data  | 
2010  | 
for record in self.texts.get_record_stream(text_keys, 'unordered', True):  | 
|
2011  | 
if record.storage_kind == 'absent':  | 
|
2012  | 
raise errors.RevisionNotPresent(record.key, self)  | 
|
| 
4202.1.1
by John Arbash Meinel
 Update Repository.iter_files_bytes() to return an iterable of bytestrings.  | 
2013  | 
yield text_keys[record.key], record.get_bytes_as('chunked')  | 
| 
2708.1.3
by Aaron Bentley
 Implement extract_files_bytes on Repository  | 
2014  | 
|
| 
3063.2.1
by Robert Collins
 Solve reconciling erroring when multiple portions of a single delta chain are being reinserted.  | 
2015  | 
def _generate_text_key_index(self, text_key_references=None,  | 
2016  | 
ancestors=None):  | 
|
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2017  | 
"""Generate a new text key index for the repository.  | 
2018  | 
||
2019  | 
        This is an expensive function that will take considerable time to run.
 | 
|
2020  | 
||
2021  | 
        :return: A dict mapping text keys ((file_id, revision_id) tuples) to a
 | 
|
2022  | 
            list of parents, also text keys. When a given key has no parents,
 | 
|
2023  | 
            the parents list will be [NULL_REVISION].
 | 
|
2024  | 
        """
 | 
|
2025  | 
        # All revisions, to find inventory parents.
 | 
|
| 
3063.2.1
by Robert Collins
 Solve reconciling erroring when multiple portions of a single delta chain are being reinserted.  | 
2026  | 
if ancestors is None:  | 
| 
3287.6.1
by Robert Collins
 * ``VersionedFile.get_graph`` is deprecated, with no replacement method.  | 
2027  | 
graph = self.get_graph()  | 
2028  | 
ancestors = graph.get_parent_map(self.all_revision_ids())  | 
|
| 
2951.2.9
by Robert Collins
 * ``pack-0.92`` repositories can now be reconciled.  | 
2029  | 
if text_key_references is None:  | 
2030  | 
text_key_references = self.find_text_key_references()  | 
|
| 
2988.3.1
by Robert Collins
 Handle the progress bar in _generate_text_key_index correctly.  | 
2031  | 
pb = ui.ui_factory.nested_progress_bar()  | 
2032  | 
try:  | 
|
2033  | 
return self._do_generate_text_key_index(ancestors,  | 
|
2034  | 
text_key_references, pb)  | 
|
2035  | 
finally:  | 
|
2036  | 
pb.finished()  | 
|
2037  | 
||
2038  | 
def _do_generate_text_key_index(self, ancestors, text_key_references, pb):  | 
|
2039  | 
"""Helper for _generate_text_key_index to avoid deep nesting."""  | 
|
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2040  | 
revision_order = tsort.topo_sort(ancestors)  | 
2041  | 
invalid_keys = set()  | 
|
2042  | 
revision_keys = {}  | 
|
2043  | 
for revision_id in revision_order:  | 
|
2044  | 
revision_keys[revision_id] = set()  | 
|
2045  | 
text_count = len(text_key_references)  | 
|
2046  | 
        # a cache of the text keys to allow reuse; costs a dict of all the
 | 
|
2047  | 
        # keys, but saves a 2-tuple for every child of a given key.
 | 
|
2048  | 
text_key_cache = {}  | 
|
2049  | 
for text_key, valid in text_key_references.iteritems():  | 
|
2050  | 
if not valid:  | 
|
2051  | 
invalid_keys.add(text_key)  | 
|
2052  | 
else:  | 
|
2053  | 
revision_keys[text_key[1]].add(text_key)  | 
|
2054  | 
text_key_cache[text_key] = text_key  | 
|
2055  | 
del text_key_references  | 
|
2056  | 
text_index = {}  | 
|
2057  | 
text_graph = graph.Graph(graph.DictParentsProvider(text_index))  | 
|
2058  | 
NULL_REVISION = _mod_revision.NULL_REVISION  | 
|
| 
2988.1.5
by Robert Collins
 Use a LRU cache when generating the text index to reduce inventory deserialisations.  | 
2059  | 
        # Set a cache with a size of 10 - this suffices for bzr.dev but may be
 | 
2060  | 
        # too small for large or very branchy trees. However, for 55K path
 | 
|
2061  | 
        # trees, it would be easy to use too much memory trivially. Ideally we
 | 
|
2062  | 
        # could gauge this by looking at available real memory etc, but this is
 | 
|
2063  | 
        # always a tricky proposition.
 | 
|
2064  | 
inventory_cache = lru_cache.LRUCache(10)  | 
|
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2065  | 
batch_size = 10 # should be ~150MB on a 55K path tree  | 
2066  | 
batch_count = len(revision_order) / batch_size + 1  | 
|
2067  | 
processed_texts = 0  | 
|
| 
4103.3.2
by Martin Pool
 Remove trailing punctuation from progress messages  | 
2068  | 
pb.update("Calculating text parents", processed_texts, text_count)  | 
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2069  | 
for offset in xrange(batch_count):  | 
2070  | 
to_query = revision_order[offset * batch_size:(offset + 1) *  | 
|
2071  | 
batch_size]  | 
|
2072  | 
if not to_query:  | 
|
2073  | 
                break
 | 
|
2074  | 
for rev_tree in self.revision_trees(to_query):  | 
|
2075  | 
revision_id = rev_tree.get_revision_id()  | 
|
2076  | 
parent_ids = ancestors[revision_id]  | 
|
2077  | 
for text_key in revision_keys[revision_id]:  | 
|
| 
4103.3.2
by Martin Pool
 Remove trailing punctuation from progress messages  | 
2078  | 
pb.update("Calculating text parents", processed_texts)  | 
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2079  | 
processed_texts += 1  | 
2080  | 
candidate_parents = []  | 
|
2081  | 
for parent_id in parent_ids:  | 
|
2082  | 
parent_text_key = (text_key[0], parent_id)  | 
|
2083  | 
try:  | 
|
2084  | 
check_parent = parent_text_key not in \  | 
|
2085  | 
revision_keys[parent_id]  | 
|
2086  | 
except KeyError:  | 
|
2087  | 
                            # the parent parent_id is a ghost:
 | 
|
2088  | 
check_parent = False  | 
|
2089  | 
                            # truncate the derived graph against this ghost.
 | 
|
2090  | 
parent_text_key = None  | 
|
2091  | 
if check_parent:  | 
|
2092  | 
                            # look at the parent commit details inventories to
 | 
|
2093  | 
                            # determine possible candidates in the per file graph.
 | 
|
2094  | 
                            # TODO: cache here.
 | 
|
| 
2988.1.5
by Robert Collins
 Use a LRU cache when generating the text index to reduce inventory deserialisations.  | 
2095  | 
try:  | 
2096  | 
inv = inventory_cache[parent_id]  | 
|
2097  | 
except KeyError:  | 
|
2098  | 
inv = self.revision_tree(parent_id).inventory  | 
|
2099  | 
inventory_cache[parent_id] = inv  | 
|
| 
3735.2.9
by Robert Collins
 Get a working chk_map using inventory implementation bootstrapped.  | 
2100  | 
try:  | 
2101  | 
parent_entry = inv[text_key[0]]  | 
|
2102  | 
except (KeyError, errors.NoSuchId):  | 
|
2103  | 
parent_entry = None  | 
|
| 
2988.1.3
by Robert Collins
 Add a new repositoy method _generate_text_key_index for use by reconcile/check.  | 
2104  | 
if parent_entry is not None:  | 
2105  | 
parent_text_key = (  | 
|
2106  | 
text_key[0], parent_entry.revision)  | 
|
2107  | 
else:  | 
|
2108  | 
parent_text_key = None  | 
|
2109  | 
if parent_text_key is not None:  | 
|
2110  | 
candidate_parents.append(  | 
|
2111  | 
text_key_cache[parent_text_key])  | 
|
2112  | 
parent_heads = text_graph.heads(candidate_parents)  | 
|
2113  | 
new_parents = list(parent_heads)  | 
|
2114  | 
new_parents.sort(key=lambda x:candidate_parents.index(x))  | 
|
2115  | 
if new_parents == []:  | 
|
2116  | 
new_parents = [NULL_REVISION]  | 
|
2117  | 
text_index[text_key] = new_parents  | 
|
2118  | 
||
2119  | 
for text_key in invalid_keys:  | 
|
2120  | 
text_index[text_key] = [NULL_REVISION]  | 
|
2121  | 
return text_index  | 
|
2122  | 
||
| 
2668.2.8
by Andrew Bennetts
 Rename get_data_to_fetch_for_revision_ids as item_keys_introduced_by.  | 
2123  | 
def item_keys_introduced_by(self, revision_ids, _files_pb=None):  | 
2124  | 
"""Get an iterable listing the keys of all the data introduced by a set  | 
|
2125  | 
        of revision IDs.
 | 
|
2126  | 
||
2127  | 
        The keys will be ordered so that the corresponding items can be safely
 | 
|
2128  | 
        fetched and inserted in that order.
 | 
|
2129  | 
||
2130  | 
        :returns: An iterable producing tuples of (knit-kind, file-id,
 | 
|
2131  | 
            versions).  knit-kind is one of 'file', 'inventory', 'signatures',
 | 
|
2132  | 
            'revisions'.  file-id is None unless knit-kind is 'file'.
 | 
|
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2133  | 
        """
 | 
| 
3735.4.4
by Andrew Bennetts
 Change the layering, to put the custom file_id list underneath item_keys_intoduced_by  | 
2134  | 
for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):  | 
2135  | 
yield result  | 
|
2136  | 
del _files_pb  | 
|
2137  | 
for result in self._find_non_file_keys_to_fetch(revision_ids):  | 
|
2138  | 
yield result  | 
|
2139  | 
||
2140  | 
def _find_file_keys_to_fetch(self, revision_ids, pb):  | 
|
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2141  | 
        # XXX: it's a bit weird to control the inventory weave caching in this
 | 
| 
2535.3.7
by Andrew Bennetts
 Remove now unused _fetch_weave_texts, make progress reporting closer to how it was before I refactored __fetch.  | 
2142  | 
        # generator.  Ideally the caching would be done in fetch.py I think.  Or
 | 
2143  | 
        # maybe this generator should explicitly have the contract that it
 | 
|
2144  | 
        # should not be iterated until the previously yielded item has been
 | 
|
2145  | 
        # processed?
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2146  | 
inv_w = self.inventories  | 
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2147  | 
|
2148  | 
        # file ids that changed
 | 
|
| 
3422.1.1
by John Arbash Meinel
 merge in bzr-1.5rc1, revert the transaction cache change  | 
2149  | 
file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)  | 
| 
2535.3.8
by Andrew Bennetts
 Unbreak progress reporting.  | 
2150  | 
count = 0  | 
2151  | 
num_file_ids = len(file_ids)  | 
|
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2152  | 
for file_id, altered_versions in file_ids.iteritems():  | 
| 
3735.4.4
by Andrew Bennetts
 Change the layering, to put the custom file_id list underneath item_keys_intoduced_by  | 
2153  | 
if pb is not None:  | 
2154  | 
pb.update("fetch texts", count, num_file_ids)  | 
|
| 
2535.3.8
by Andrew Bennetts
 Unbreak progress reporting.  | 
2155  | 
count += 1  | 
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2156  | 
yield ("file", file_id, altered_versions)  | 
2157  | 
||
| 
3735.4.4
by Andrew Bennetts
 Change the layering, to put the custom file_id list underneath item_keys_intoduced_by  | 
2158  | 
def _find_non_file_keys_to_fetch(self, revision_ids):  | 
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2159  | 
        # inventory
 | 
2160  | 
yield ("inventory", None, revision_ids)  | 
|
2161  | 
||
2162  | 
        # signatures
 | 
|
| 
3825.5.2
by Andrew Bennetts
 Ensure that item_keys_introduced_by returns the  | 
2163  | 
        # XXX: Note ATM no callers actually pay attention to this return
 | 
2164  | 
        #      instead they just use the list of revision ids and ignore
 | 
|
2165  | 
        #      missing sigs. Consider removing this work entirely
 | 
|
2166  | 
revisions_with_signatures = set(self.signatures.get_parent_map(  | 
|
2167  | 
[(r,) for r in revision_ids]))  | 
|
| 
3825.5.1
by Andrew Bennetts
 Improve determining signatures to transfer in item_keys_introduced_by.  | 
2168  | 
revisions_with_signatures = set(  | 
| 
3825.5.2
by Andrew Bennetts
 Ensure that item_keys_introduced_by returns the  | 
2169  | 
[r for (r,) in revisions_with_signatures])  | 
| 
3825.5.1
by Andrew Bennetts
 Improve determining signatures to transfer in item_keys_introduced_by.  | 
2170  | 
revisions_with_signatures.intersection_update(revision_ids)  | 
| 
2535.3.25
by Andrew Bennetts
 Fetch signatures too.  | 
2171  | 
yield ("signatures", None, revisions_with_signatures)  | 
| 
2535.3.6
by Andrew Bennetts
 Move some "what repo data to fetch logic" from RepoFetcher to Repository.  | 
2172  | 
|
2173  | 
        # revisions
 | 
|
2174  | 
yield ("revisions", None, revision_ids)  | 
|
2175  | 
||
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
2176  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2177  | 
def get_inventory(self, revision_id):  | 
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
2178  | 
"""Get Inventory object by revision id."""  | 
2179  | 
return self.iter_inventories([revision_id]).next()  | 
|
2180  | 
||
2181  | 
def iter_inventories(self, revision_ids):  | 
|
2182  | 
"""Get many inventories by revision_ids.  | 
|
2183  | 
||
2184  | 
        This will buffer some or all of the texts used in constructing the
 | 
|
2185  | 
        inventories in memory, but will only parse a single inventory at a
 | 
|
2186  | 
        time.
 | 
|
2187  | 
||
| 
4202.2.1
by Ian Clatworthy
 get directory logging working again  | 
2188  | 
        :param revision_ids: The expected revision ids of the inventories.
 | 
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
2189  | 
        :return: An iterator of inventories.
 | 
2190  | 
        """
 | 
|
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
2191  | 
if ((None in revision_ids)  | 
2192  | 
or (_mod_revision.NULL_REVISION in revision_ids)):  | 
|
2193  | 
raise ValueError('cannot get null revision inventory')  | 
|
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
2194  | 
return self._iter_inventories(revision_ids)  | 
2195  | 
||
2196  | 
def _iter_inventories(self, revision_ids):  | 
|
2197  | 
"""single-document based inventory iteration."""  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2198  | 
for text, revision_id in self._iter_inventory_xmls(revision_ids):  | 
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
2199  | 
yield self.deserialise_inventory(revision_id, text)  | 
| 
1740.2.3
by Aaron Bentley
 Only reserialize the working tree basis inventory when needed.  | 
2200  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2201  | 
def _iter_inventory_xmls(self, revision_ids):  | 
2202  | 
keys = [(revision_id,) for revision_id in revision_ids]  | 
|
2203  | 
stream = self.inventories.get_record_stream(keys, 'unordered', True)  | 
|
| 
3890.2.3
by John Arbash Meinel
 Use the 'chunked' interface to keep memory consumption minimal during revision_trees()  | 
2204  | 
text_chunks = {}  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2205  | 
for record in stream:  | 
2206  | 
if record.storage_kind != 'absent':  | 
|
| 
3890.2.3
by John Arbash Meinel
 Use the 'chunked' interface to keep memory consumption minimal during revision_trees()  | 
2207  | 
text_chunks[record.key] = record.get_bytes_as('chunked')  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2208  | 
else:  | 
2209  | 
raise errors.NoSuchRevision(self, record.key)  | 
|
2210  | 
for key in keys:  | 
|
| 
3890.2.3
by John Arbash Meinel
 Use the 'chunked' interface to keep memory consumption minimal during revision_trees()  | 
2211  | 
chunks = text_chunks.pop(key)  | 
2212  | 
yield ''.join(chunks), key[-1]  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2213  | 
|
| 
1740.2.3
by Aaron Bentley
 Only reserialize the working tree basis inventory when needed.  | 
2214  | 
def deserialise_inventory(self, revision_id, xml):  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2215  | 
"""Transform the xml into an inventory object.  | 
| 
1740.2.3
by Aaron Bentley
 Only reserialize the working tree basis inventory when needed.  | 
2216  | 
|
2217  | 
        :param revision_id: The expected revision id of the inventory.
 | 
|
2218  | 
        :param xml: A serialised inventory.
 | 
|
2219  | 
        """
 | 
|
| 
3882.6.23
by John Arbash Meinel
 Change the XMLSerializer.read_inventory_from_string api.  | 
2220  | 
result = self._serializer.read_inventory_from_string(xml, revision_id,  | 
2221  | 
entry_cache=self._inventory_entry_cache)  | 
|
| 
3169.2.3
by Robert Collins
 Use an if, not an assert, as we test with -O.  | 
2222  | 
if result.revision_id != revision_id:  | 
2223  | 
raise AssertionError('revision id mismatch %s != %s' % (  | 
|
2224  | 
result.revision_id, revision_id))  | 
|
| 
3169.2.2
by Robert Collins
 Add a test to Repository.deserialise_inventory that the resulting ivnentory is the one asked for, and update relevant tests. Also tweak the model 1 to 2 regenerate inventories logic to use the revision trees parent marker which is more accurate in some cases.  | 
2225  | 
return result  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2226  | 
|
| 
1910.2.22
by Aaron Bentley
 Make commits preserve root entry data  | 
2227  | 
def serialise_inventory(self, inv):  | 
| 
1910.2.48
by Aaron Bentley
 Update from review comments  | 
2228  | 
return self._serializer.write_inventory_to_string(inv)  | 
| 
1910.2.22
by Aaron Bentley
 Make commits preserve root entry data  | 
2229  | 
|
| 
2817.2.1
by Robert Collins
 * Inventory serialisation no longer double-sha's the content.  | 
2230  | 
def _serialise_inventory_to_lines(self, inv):  | 
2231  | 
return self._serializer.write_inventory_to_lines(inv)  | 
|
2232  | 
||
| 
2520.4.113
by Aaron Bentley
 Avoid peeking at Repository._serializer  | 
2233  | 
def get_serializer_format(self):  | 
2234  | 
return self._serializer.format_num  | 
|
2235  | 
||
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
2236  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2237  | 
def get_inventory_xml(self, revision_id):  | 
2238  | 
"""Get inventory XML as a file object."""  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2239  | 
texts = self._iter_inventory_xmls([revision_id])  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2240  | 
try:  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2241  | 
text, revision_id = texts.next()  | 
2242  | 
except StopIteration:  | 
|
| 
1773.4.1
by Martin Pool
 Add pyflakes makefile target; fix many warnings  | 
2243  | 
raise errors.HistoryMissing(self, 'inventory', revision_id)  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2244  | 
return text  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2245  | 
|
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
2246  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2247  | 
def get_inventory_sha1(self, revision_id):  | 
2248  | 
"""Return the sha1 hash of the inventory entry  | 
|
2249  | 
        """
 | 
|
2250  | 
return self.get_revision(revision_id).inventory_sha1  | 
|
2251  | 
||
| 
4419.2.5
by Andrew Bennetts
 Add Repository.get_rev_id_for_revno, and use it both as the _ensure_real fallback and as the server-side implementation.  | 
2252  | 
def get_rev_id_for_revno(self, revno, known_pair):  | 
2253  | 
"""Return the revision id of a revno, given a later (revno, revid)  | 
|
2254  | 
        pair in the same history.
 | 
|
2255  | 
||
2256  | 
        :return: if found (True, revid).  If the available history ran out
 | 
|
2257  | 
            before reaching the revno, then this returns
 | 
|
2258  | 
            (False, (closest_revno, closest_revid)).
 | 
|
2259  | 
        """
 | 
|
2260  | 
known_revno, known_revid = known_pair  | 
|
2261  | 
partial_history = [known_revid]  | 
|
2262  | 
distance_from_known = known_revno - revno  | 
|
2263  | 
if distance_from_known < 0:  | 
|
2264  | 
raise ValueError(  | 
|
2265  | 
'requested revno (%d) is later than given known revno (%d)'  | 
|
2266  | 
% (revno, known_revno))  | 
|
2267  | 
try:  | 
|
| 
4419.2.9
by Andrew Bennetts
 Add per_repository_reference test for get_rev_id_for_revno, fix the bugs it revealed.  | 
2268  | 
_iter_for_revno(  | 
2269  | 
self, partial_history, stop_index=distance_from_known)  | 
|
| 
4419.2.5
by Andrew Bennetts
 Add Repository.get_rev_id_for_revno, and use it both as the _ensure_real fallback and as the server-side implementation.  | 
2270  | 
except errors.RevisionNotPresent, err:  | 
| 
4419.2.6
by Andrew Bennetts
 Add tests for server-side logic, and fix the bugs exposed by those tests.  | 
2271  | 
if err.revision_id == known_revid:  | 
2272  | 
                # The start revision (known_revid) wasn't found.
 | 
|
2273  | 
                raise
 | 
|
2274  | 
            # This is a stacked repository with no fallbacks, or a there's a
 | 
|
2275  | 
            # left-hand ghost.  Either way, even though the revision named in
 | 
|
2276  | 
            # the error isn't in this repo, we know it's the next step in this
 | 
|
2277  | 
            # left-hand history.
 | 
|
| 
4419.2.5
by Andrew Bennetts
 Add Repository.get_rev_id_for_revno, and use it both as the _ensure_real fallback and as the server-side implementation.  | 
2278  | 
partial_history.append(err.revision_id)  | 
| 
4419.2.6
by Andrew Bennetts
 Add tests for server-side logic, and fix the bugs exposed by those tests.  | 
2279  | 
if len(partial_history) <= distance_from_known:  | 
2280  | 
            # Didn't find enough history to get a revid for the revno.
 | 
|
2281  | 
earliest_revno = known_revno - len(partial_history) + 1  | 
|
| 
4419.2.5
by Andrew Bennetts
 Add Repository.get_rev_id_for_revno, and use it both as the _ensure_real fallback and as the server-side implementation.  | 
2282  | 
return (False, (earliest_revno, partial_history[-1]))  | 
| 
4419.2.6
by Andrew Bennetts
 Add tests for server-side logic, and fix the bugs exposed by those tests.  | 
2283  | 
if len(partial_history) - 1 > distance_from_known:  | 
| 
4419.2.5
by Andrew Bennetts
 Add Repository.get_rev_id_for_revno, and use it both as the _ensure_real fallback and as the server-side implementation.  | 
2284  | 
raise AssertionError('_iter_for_revno returned too much history')  | 
2285  | 
return (True, partial_history[-1])  | 
|
2286  | 
||
| 
2230.3.54
by Aaron Bentley
 Move reverse history iteration to repository  | 
2287  | 
def iter_reverse_revision_history(self, revision_id):  | 
2288  | 
"""Iterate backwards through revision ids in the lefthand history  | 
|
2289  | 
||
2290  | 
        :param revision_id: The revision id to start with.  All its lefthand
 | 
|
2291  | 
            ancestors will be traversed.
 | 
|
2292  | 
        """
 | 
|
| 
3287.5.2
by Robert Collins
 Deprecate VersionedFile.get_parents, breaking pulling from a ghost containing knit or pack repository to weaves, which improves correctness and allows simplification of core code.  | 
2293  | 
graph = self.get_graph()  | 
| 
2230.3.54
by Aaron Bentley
 Move reverse history iteration to repository  | 
2294  | 
next_id = revision_id  | 
2295  | 
while True:  | 
|
| 
3287.5.2
by Robert Collins
 Deprecate VersionedFile.get_parents, breaking pulling from a ghost containing knit or pack repository to weaves, which improves correctness and allows simplification of core code.  | 
2296  | 
if next_id in (None, _mod_revision.NULL_REVISION):  | 
2297  | 
                return
 | 
|
| 
4266.3.1
by Jelmer Vernooij
 Support cloning of branches with ghosts in the left hand side history.  | 
2298  | 
try:  | 
2299  | 
parents = graph.get_parent_map([next_id])[next_id]  | 
|
2300  | 
except KeyError:  | 
|
2301  | 
raise errors.RevisionNotPresent(next_id, self)  | 
|
| 
4266.3.10
by Jelmer Vernooij
 Remove no longer valid comment about catching KeyError.  | 
2302  | 
yield next_id  | 
| 
2230.3.54
by Aaron Bentley
 Move reverse history iteration to repository  | 
2303  | 
if len(parents) == 0:  | 
2304  | 
                return
 | 
|
2305  | 
else:  | 
|
2306  | 
next_id = parents[0]  | 
|
2307  | 
||
| 
1594.2.3
by Robert Collins
 bugfix revision.MultipleRevisionSources.get_revision_graph to integrate ghosts between sources. [slow on weaves, fast on knits.  | 
2308  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2309  | 
def get_revision_inventory(self, revision_id):  | 
2310  | 
"""Return inventory of a past revision."""  | 
|
2311  | 
        # TODO: Unify this with get_inventory()
 | 
|
2312  | 
        # bzr 0.0.6 and later imposes the constraint that the inventory_id
 | 
|
2313  | 
        # must be the same as its revision, so this is trivial.
 | 
|
| 
1534.4.28
by Robert Collins
 first cut at merge from integration.  | 
2314  | 
if revision_id is None:  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2315  | 
            # This does not make sense: if there is no revision,
 | 
2316  | 
            # then it is the current tree inventory surely ?!
 | 
|
2317  | 
            # and thus get_root_id() is something that looks at the last
 | 
|
2318  | 
            # commit on the branch, and the get_root_id is an inventory check.
 | 
|
2319  | 
raise NotImplementedError  | 
|
2320  | 
            # return Inventory(self.get_root_id())
 | 
|
2321  | 
else:  | 
|
2322  | 
return self.get_inventory(revision_id)  | 
|
2323  | 
||
| 
1534.6.3
by Robert Collins
 find_repository sufficiently robust.  | 
2324  | 
def is_shared(self):  | 
2325  | 
"""Return True if this repository is flagged as a shared repository."""  | 
|
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2326  | 
raise NotImplementedError(self.is_shared)  | 
| 
1534.6.3
by Robert Collins
 find_repository sufficiently robust.  | 
2327  | 
|
| 
1594.2.7
by Robert Collins
 Add versionedfile.fix_parents api for correcting data post hoc.  | 
2328  | 
    @needs_write_lock
 | 
| 
1692.1.1
by Robert Collins
 * Repository.reconcile now takes a thorough keyword parameter to allow  | 
2329  | 
def reconcile(self, other=None, thorough=False):  | 
| 
1594.2.7
by Robert Collins
 Add versionedfile.fix_parents api for correcting data post hoc.  | 
2330  | 
"""Reconcile this repository."""  | 
2331  | 
from bzrlib.reconcile import RepoReconciler  | 
|
| 
1692.1.1
by Robert Collins
 * Repository.reconcile now takes a thorough keyword parameter to allow  | 
2332  | 
reconciler = RepoReconciler(self, thorough=thorough)  | 
| 
1594.2.7
by Robert Collins
 Add versionedfile.fix_parents api for correcting data post hoc.  | 
2333  | 
reconciler.reconcile()  | 
2334  | 
return reconciler  | 
|
| 
2440.1.1
by Martin Pool
 Add new Repository.sprout,  | 
2335  | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
2336  | 
def _refresh_data(self):  | 
2337  | 
"""Helper called from lock_* to ensure coherency with disk.  | 
|
2338  | 
||
2339  | 
        The default implementation does nothing; it is however possible
 | 
|
2340  | 
        for repositories to maintain loaded indices across multiple locks
 | 
|
2341  | 
        by checking inside their implementation of this method to see
 | 
|
2342  | 
        whether their indices are still valid. This depends of course on
 | 
|
| 
4145.1.2
by Robert Collins
 Add a refresh_data method on Repository allowing cleaner handling of insertions into RemoteRepository objects with _real_repository instances.  | 
2343  | 
        the disk format being validatable in this manner. This method is
 | 
2344  | 
        also called by the refresh_data() public interface to cause a refresh
 | 
|
2345  | 
        to occur while in a write lock so that data inserted by a smart server
 | 
|
2346  | 
        push operation is visible on the client's instance of the physical
 | 
|
2347  | 
        repository.
 | 
|
| 
2617.6.2
by Robert Collins
 Add abort_write_group and wire write_groups into fetch and commit.  | 
2348  | 
        """
 | 
2349  | 
||
| 
1534.6.3
by Robert Collins
 find_repository sufficiently robust.  | 
2350  | 
    @needs_read_lock
 | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2351  | 
def revision_tree(self, revision_id):  | 
2352  | 
"""Return Tree for a revision on this branch.  | 
|
2353  | 
||
| 
3668.5.2
by Jelmer Vernooij
 Fix docstring.  | 
2354  | 
        `revision_id` may be NULL_REVISION for the empty tree revision.
 | 
| 
1852.5.1
by Robert Collins
 Deprecate EmptyTree in favour of using Repository.revision_tree.  | 
2355  | 
        """
 | 
| 
3668.5.1
by Jelmer Vernooij
 Use NULL_REVISION rather than None for Repository.revision_tree().  | 
2356  | 
revision_id = _mod_revision.ensure_null(revision_id)  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2357  | 
        # TODO: refactor this to use an existing revision object
 | 
2358  | 
        # so we don't need to read it in twice.
 | 
|
| 
3668.5.1
by Jelmer Vernooij
 Use NULL_REVISION rather than None for Repository.revision_tree().  | 
2359  | 
if revision_id == _mod_revision.NULL_REVISION:  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2360  | 
return RevisionTree(self, Inventory(root_id=None),  | 
| 
1731.1.61
by Aaron Bentley
 Merge bzr.dev  | 
2361  | 
_mod_revision.NULL_REVISION)  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2362  | 
else:  | 
2363  | 
inv = self.get_revision_inventory(revision_id)  | 
|
| 
1185.65.17
by Robert Collins
 Merge from integration, mode-changes are broken.  | 
2364  | 
return RevisionTree(self, inv, revision_id)  | 
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2365  | 
|
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
2366  | 
def revision_trees(self, revision_ids):  | 
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
2367  | 
"""Return Trees for revisions in this repository.  | 
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
2368  | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
2369  | 
        :param revision_ids: a sequence of revision-ids;
 | 
2370  | 
          a revision-id may not be None or 'null:'
 | 
|
2371  | 
        """
 | 
|
| 
3169.2.1
by Robert Collins
 New method ``iter_inventories`` on Repository for access to many  | 
2372  | 
inventories = self.iter_inventories(revision_ids)  | 
2373  | 
for inv in inventories:  | 
|
2374  | 
yield RevisionTree(self, inv, inv.revision_id)  | 
|
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
2375  | 
|
| 
4137.3.2
by Ian Clatworthy
 Repository.get_deltas_for_revisions() now supports file-id filtering  | 
2376  | 
def _filtered_revision_trees(self, revision_ids, file_ids):  | 
2377  | 
"""Return Tree for a revision on this branch with only some files.  | 
|
2378  | 
||
2379  | 
        :param revision_ids: a sequence of revision-ids;
 | 
|
2380  | 
          a revision-id may not be None or 'null:'
 | 
|
2381  | 
        :param file_ids: if not None, the result is filtered
 | 
|
2382  | 
          so that only those file-ids, their parents and their
 | 
|
2383  | 
          children are included.
 | 
|
2384  | 
        """
 | 
|
2385  | 
inventories = self.iter_inventories(revision_ids)  | 
|
2386  | 
for inv in inventories:  | 
|
2387  | 
            # Should we introduce a FilteredRevisionTree class rather
 | 
|
2388  | 
            # than pre-filter the inventory here?
 | 
|
2389  | 
filtered_inv = inv.filter(file_ids)  | 
|
2390  | 
yield RevisionTree(self, filtered_inv, filtered_inv.revision_id)  | 
|
2391  | 
||
| 
1756.3.3
by Aaron Bentley
 More refactoring, introduce revision_trees.  | 
2392  | 
    @needs_read_lock
 | 
| 
2530.1.1
by Aaron Bentley
 Make topological sorting optional for get_ancestry  | 
2393  | 
def get_ancestry(self, revision_id, topo_sorted=True):  | 
| 
1185.66.2
by Aaron Bentley
 Moved get_ancestry to RevisionStorage  | 
2394  | 
"""Return a list of revision-ids integrated by a revision.  | 
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
2395  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2396  | 
        The first element of the list is always None, indicating the origin
 | 
2397  | 
        revision.  This might change when we have history horizons, or
 | 
|
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
2398  | 
        perhaps we should have a new API.
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2399  | 
|
| 
1185.66.2
by Aaron Bentley
 Moved get_ancestry to RevisionStorage  | 
2400  | 
        This is topologically sorted.
 | 
2401  | 
        """
 | 
|
| 
2598.5.1
by Aaron Bentley
 Start eliminating the use of None to indicate null revision  | 
2402  | 
if _mod_revision.is_null(revision_id):  | 
| 
1185.66.2
by Aaron Bentley
 Moved get_ancestry to RevisionStorage  | 
2403  | 
return [None]  | 
| 
1534.4.41
by Robert Collins
 Branch now uses BzrDir reasonably sanely.  | 
2404  | 
if not self.has_revision(revision_id):  | 
2405  | 
raise errors.NoSuchRevision(self, revision_id)  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2406  | 
graph = self.get_graph()  | 
2407  | 
keys = set()  | 
|
2408  | 
search = graph._make_breadth_first_searcher([revision_id])  | 
|
2409  | 
while True:  | 
|
2410  | 
try:  | 
|
2411  | 
found, ghosts = search.next_with_ghosts()  | 
|
2412  | 
except StopIteration:  | 
|
2413  | 
                break
 | 
|
2414  | 
keys.update(found)  | 
|
2415  | 
if _mod_revision.NULL_REVISION in keys:  | 
|
2416  | 
keys.remove(_mod_revision.NULL_REVISION)  | 
|
2417  | 
if topo_sorted:  | 
|
2418  | 
parent_map = graph.get_parent_map(keys)  | 
|
2419  | 
keys = tsort.topo_sort(parent_map)  | 
|
2420  | 
return [None] + list(keys)  | 
|
| 
1185.66.2
by Aaron Bentley
 Moved get_ancestry to RevisionStorage  | 
2421  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
2422  | 
def pack(self, hint=None):  | 
| 
2604.2.1
by Robert Collins
 (robertc) Introduce a pack command.  | 
2423  | 
"""Compress the data within the repository.  | 
2424  | 
||
2425  | 
        This operation only makes sense for some repository types. For other
 | 
|
2426  | 
        types it should be a no-op that just returns.
 | 
|
2427  | 
||
2428  | 
        This stub method does not require a lock, but subclasses should use
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2429  | 
        @needs_write_lock as this is a long running call its reasonable to
 | 
| 
2604.2.1
by Robert Collins
 (robertc) Introduce a pack command.  | 
2430  | 
        implicitly lock for the user.
 | 
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
2431  | 
|
2432  | 
        :param hint: If not supplied, the whole repository is packed.
 | 
|
2433  | 
            If supplied, the repository may use the hint parameter as a
 | 
|
2434  | 
            hint for the parts of the repository to pack. A hint can be
 | 
|
2435  | 
            obtained from the result of commit_write_group(). Out of
 | 
|
2436  | 
            date hints are simply ignored, because concurrent operations
 | 
|
2437  | 
            can obsolete them rapidly.
 | 
|
| 
2604.2.1
by Robert Collins
 (robertc) Introduce a pack command.  | 
2438  | 
        """
 | 
2439  | 
||
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2440  | 
def get_transaction(self):  | 
2441  | 
return self.control_files.get_transaction()  | 
|
2442  | 
||
| 
3517.4.17
by Martin Pool
 Redo base Repository.get_parent_map to use .revisions graph  | 
2443  | 
def get_parent_map(self, revision_ids):  | 
| 
4379.3.3
by Gary van der Merwe
 Rename and add doc string for StackedParentsProvider.  | 
2444  | 
"""See graph.StackedParentsProvider.get_parent_map"""  | 
| 
3517.4.17
by Martin Pool
 Redo base Repository.get_parent_map to use .revisions graph  | 
2445  | 
        # revisions index works in keys; this just works in revisions
 | 
2446  | 
        # therefore wrap and unwrap
 | 
|
2447  | 
query_keys = []  | 
|
2448  | 
result = {}  | 
|
2449  | 
for revision_id in revision_ids:  | 
|
2450  | 
if revision_id == _mod_revision.NULL_REVISION:  | 
|
2451  | 
result[revision_id] = ()  | 
|
2452  | 
elif revision_id is None:  | 
|
| 
3373.5.2
by John Arbash Meinel
 Add repository_implementation tests for get_parent_map  | 
2453  | 
raise ValueError('get_parent_map(None) is not valid')  | 
| 
3517.4.17
by Martin Pool
 Redo base Repository.get_parent_map to use .revisions graph  | 
2454  | 
else:  | 
2455  | 
query_keys.append((revision_id ,))  | 
|
2456  | 
for ((revision_id,), parent_keys) in \  | 
|
2457  | 
self.revisions.get_parent_map(query_keys).iteritems():  | 
|
2458  | 
if parent_keys:  | 
|
2459  | 
result[revision_id] = tuple(parent_revid  | 
|
2460  | 
for (parent_revid,) in parent_keys)  | 
|
2461  | 
else:  | 
|
2462  | 
result[revision_id] = (_mod_revision.NULL_REVISION,)  | 
|
2463  | 
return result  | 
|
| 
2490.2.13
by Aaron Bentley
 Update distinct -> lowest, refactor, add ParentsProvider concept  | 
2464  | 
|
2465  | 
def _make_parents_provider(self):  | 
|
2466  | 
return self  | 
|
2467  | 
||
| 
2490.2.21
by Aaron Bentley
 Rename graph to deprecated_graph  | 
2468  | 
def get_graph(self, other_repository=None):  | 
| 
2490.2.13
by Aaron Bentley
 Update distinct -> lowest, refactor, add ParentsProvider concept  | 
2469  | 
"""Return the graph walker for this repository format"""  | 
2470  | 
parents_provider = self._make_parents_provider()  | 
|
| 
2490.2.14
by Aaron Bentley
 Avoid StackedParentsProvider when underlying repos match  | 
2471  | 
if (other_repository is not None and  | 
| 
3211.3.1
by Jelmer Vernooij
 Use convenience function to check whether two repository handles are referring to the same repository.  | 
2472  | 
not self.has_same_location(other_repository)):  | 
| 
4379.3.3
by Gary van der Merwe
 Rename and add doc string for StackedParentsProvider.  | 
2473  | 
parents_provider = graph.StackedParentsProvider(  | 
| 
2490.2.13
by Aaron Bentley
 Update distinct -> lowest, refactor, add ParentsProvider concept  | 
2474  | 
[parents_provider, other_repository._make_parents_provider()])  | 
| 
2490.2.22
by Aaron Bentley
 Rename GraphWalker -> Graph, _AncestryWalker -> _BreadthFirstSearcher  | 
2475  | 
return graph.Graph(parents_provider)  | 
| 
2490.2.13
by Aaron Bentley
 Update distinct -> lowest, refactor, add ParentsProvider concept  | 
2476  | 
|
| 
4145.2.1
by Ian Clatworthy
 faster check  | 
2477  | 
def _get_versioned_file_checker(self, text_key_references=None):  | 
2478  | 
"""Return an object suitable for checking versioned files.  | 
|
2479  | 
        
 | 
|
2480  | 
        :param text_key_references: if non-None, an already built
 | 
|
2481  | 
            dictionary mapping text keys ((fileid, revision_id) tuples)
 | 
|
2482  | 
            to whether they were referred to by the inventory of the
 | 
|
2483  | 
            revision_id that they contain. If None, this will be
 | 
|
2484  | 
            calculated.
 | 
|
2485  | 
        """
 | 
|
2486  | 
return _VersionedFileChecker(self,  | 
|
2487  | 
text_key_references=text_key_references)  | 
|
| 
2745.6.47
by Andrew Bennetts
 Move check_parents out of VersionedFile.  | 
2488  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
2489  | 
def revision_ids_to_search_result(self, result_set):  | 
2490  | 
"""Convert a set of revision ids to a graph SearchResult."""  | 
|
2491  | 
result_parents = set()  | 
|
2492  | 
for parents in self.get_graph().get_parent_map(  | 
|
2493  | 
result_set).itervalues():  | 
|
2494  | 
result_parents.update(parents)  | 
|
2495  | 
included_keys = result_set.intersection(result_parents)  | 
|
2496  | 
start_keys = result_set.difference(included_keys)  | 
|
2497  | 
exclude_keys = result_parents.difference(result_set)  | 
|
2498  | 
result = graph.SearchResult(start_keys, exclude_keys,  | 
|
2499  | 
len(result_set), result_set)  | 
|
2500  | 
return result  | 
|
2501  | 
||
| 
1185.65.27
by Robert Collins
 Tweak storage towards mergability.  | 
2502  | 
    @needs_write_lock
 | 
| 
1534.6.5
by Robert Collins
 Cloning of repos preserves shared and make-working-tree attributes.  | 
2503  | 
def set_make_working_trees(self, new_value):  | 
2504  | 
"""Set the policy flag for making working trees when creating branches.  | 
|
2505  | 
||
2506  | 
        This only applies to branches that use this repository.
 | 
|
2507  | 
||
2508  | 
        The default is 'True'.
 | 
|
2509  | 
        :param new_value: True to restore the default, False to disable making
 | 
|
2510  | 
                          working trees.
 | 
|
2511  | 
        """
 | 
|
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2512  | 
raise NotImplementedError(self.set_make_working_trees)  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2513  | 
|
| 
1534.6.5
by Robert Collins
 Cloning of repos preserves shared and make-working-tree attributes.  | 
2514  | 
def make_working_trees(self):  | 
2515  | 
"""Returns the policy for making working trees on new branches."""  | 
|
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2516  | 
raise NotImplementedError(self.make_working_trees)  | 
| 
1534.6.5
by Robert Collins
 Cloning of repos preserves shared and make-working-tree attributes.  | 
2517  | 
|
2518  | 
    @needs_write_lock
 | 
|
| 
1185.65.1
by Aaron Bentley
 Refactored out ControlFiles and RevisionStore from _Branch  | 
2519  | 
def sign_revision(self, revision_id, gpg_strategy):  | 
2520  | 
plaintext = Testament.from_revision(self, revision_id).as_short_text()  | 
|
2521  | 
self.store_revision_signature(gpg_strategy, plaintext, revision_id)  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2522  | 
|
| 
1563.2.29
by Robert Collins
 Remove all but fetch references to repository.revision_store.  | 
2523  | 
    @needs_read_lock
 | 
2524  | 
def has_signature_for_revision_id(self, revision_id):  | 
|
2525  | 
"""Query for a revision signature for revision_id in the repository."""  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2526  | 
if not self.has_revision(revision_id):  | 
2527  | 
raise errors.NoSuchRevision(self, revision_id)  | 
|
2528  | 
sig_present = (1 == len(  | 
|
2529  | 
self.signatures.get_parent_map([(revision_id,)])))  | 
|
2530  | 
return sig_present  | 
|
| 
1563.2.29
by Robert Collins
 Remove all but fetch references to repository.revision_store.  | 
2531  | 
|
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
2532  | 
    @needs_read_lock
 | 
2533  | 
def get_signature_text(self, revision_id):  | 
|
2534  | 
"""Return the text for a signature."""  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2535  | 
stream = self.signatures.get_record_stream([(revision_id,)],  | 
2536  | 
'unordered', True)  | 
|
2537  | 
record = stream.next()  | 
|
2538  | 
if record.storage_kind == 'absent':  | 
|
2539  | 
raise errors.NoSuchRevision(self, revision_id)  | 
|
2540  | 
return record.get_bytes_as('fulltext')  | 
|
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
2541  | 
|
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
2542  | 
    @needs_read_lock
 | 
| 
2745.6.36
by Andrew Bennetts
 Deprecate revision_ids arg to Repository.check and other tweaks.  | 
2543  | 
def check(self, revision_ids=None):  | 
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
2544  | 
"""Check consistency of all history of given revision_ids.  | 
2545  | 
||
2546  | 
        Different repository implementations should override _check().
 | 
|
2547  | 
||
2548  | 
        :param revision_ids: A non-empty list of revision_ids whose ancestry
 | 
|
2549  | 
             will be checked.  Typically the last revision_id of a branch.
 | 
|
2550  | 
        """
 | 
|
2551  | 
return self._check(revision_ids)  | 
|
2552  | 
||
2553  | 
def _check(self, revision_ids):  | 
|
| 
1773.4.1
by Martin Pool
 Add pyflakes makefile target; fix many warnings  | 
2554  | 
result = check.Check(self)  | 
| 
1732.2.4
by Martin Pool
 Split check into Branch.check and Repository.check  | 
2555  | 
result.check()  | 
2556  | 
return result  | 
|
2557  | 
||
| 
1904.2.3
by Martin Pool
 Give a warning on access to old repository formats  | 
2558  | 
def _warn_if_deprecated(self):  | 
| 
1904.2.5
by Martin Pool
 Fix format warning inside test suite and add test  | 
2559  | 
global _deprecation_warning_done  | 
2560  | 
if _deprecation_warning_done:  | 
|
2561  | 
            return
 | 
|
2562  | 
_deprecation_warning_done = True  | 
|
| 
1904.2.3
by Martin Pool
 Give a warning on access to old repository formats  | 
2563  | 
warning("Format %s for %s is deprecated - please use 'bzr upgrade' to get better performance"  | 
2564  | 
% (self._format, self.bzrdir.transport.base))  | 
|
2565  | 
||
| 
1910.2.63
by Aaron Bentley
 Add supports_rich_root member to repository  | 
2566  | 
def supports_rich_root(self):  | 
2567  | 
return self._format.rich_root_data  | 
|
2568  | 
||
| 
2150.2.2
by Robert Collins
 Change the commit builder selected-revision-id test to use a unicode revision id where possible, leading to stricter testing of the hypothetical unicode revision id support in bzr.  | 
2569  | 
def _check_ascii_revisionid(self, revision_id, method):  | 
2570  | 
"""Private helper for ascii-only repositories."""  | 
|
2571  | 
        # weave repositories refuse to store revisionids that are non-ascii.
 | 
|
2572  | 
if revision_id is not None:  | 
|
2573  | 
            # weaves require ascii revision ids.
 | 
|
2574  | 
if isinstance(revision_id, unicode):  | 
|
2575  | 
try:  | 
|
2576  | 
revision_id.encode('ascii')  | 
|
2577  | 
except UnicodeEncodeError:  | 
|
2578  | 
raise errors.NonAsciiRevisionId(method, self)  | 
|
| 
2249.5.12
by John Arbash Meinel
 Change the APIs for VersionedFile, Store, and some of Repository into utf-8  | 
2579  | 
else:  | 
2580  | 
try:  | 
|
2581  | 
revision_id.decode('ascii')  | 
|
2582  | 
except UnicodeDecodeError:  | 
|
2583  | 
raise errors.NonAsciiRevisionId(method, self)  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2584  | 
|
| 
2819.2.4
by Andrew Bennetts
 Add a 'revision_graph_can_have_wrong_parents' method to repository.  | 
2585  | 
def revision_graph_can_have_wrong_parents(self):  | 
2586  | 
"""Is it possible for this repository to have a revision graph with  | 
|
2587  | 
        incorrect parents?
 | 
|
| 
2150.2.2
by Robert Collins
 Change the commit builder selected-revision-id test to use a unicode revision id where possible, leading to stricter testing of the hypothetical unicode revision id support in bzr.  | 
2588  | 
|
| 
2819.2.4
by Andrew Bennetts
 Add a 'revision_graph_can_have_wrong_parents' method to repository.  | 
2589  | 
        If True, then this repository must also implement
 | 
2590  | 
        _find_inconsistent_revision_parents so that check and reconcile can
 | 
|
2591  | 
        check for inconsistencies before proceeding with other checks that may
 | 
|
2592  | 
        depend on the revision index being consistent.
 | 
|
2593  | 
        """
 | 
|
2594  | 
raise NotImplementedError(self.revision_graph_can_have_wrong_parents)  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
2595  | 
|
2596  | 
||
| 
2241.1.18
by mbp at sourcefrog
 Restore use of deprecating delegator for old formats in bzrlib.repository.  | 
2597  | 
# remove these delegates a while after bzr 0.15
 | 
2598  | 
def __make_delegated(name, from_module):  | 
|
2599  | 
def _deprecated_repository_forwarder():  | 
|
2600  | 
symbol_versioning.warn('%s moved to %s in bzr 0.15'  | 
|
2601  | 
% (name, from_module),  | 
|
| 
2241.1.20
by mbp at sourcefrog
 update tests for new locations of weave repos  | 
2602  | 
DeprecationWarning,  | 
2603  | 
stacklevel=2)  | 
|
| 
2241.1.18
by mbp at sourcefrog
 Restore use of deprecating delegator for old formats in bzrlib.repository.  | 
2604  | 
m = __import__(from_module, globals(), locals(), [name])  | 
2605  | 
try:  | 
|
2606  | 
return getattr(m, name)  | 
|
2607  | 
except AttributeError:  | 
|
2608  | 
raise AttributeError('module %s has no name %s'  | 
|
2609  | 
% (m, name))  | 
|
2610  | 
globals()[name] = _deprecated_repository_forwarder  | 
|
2611  | 
||
2612  | 
for _name in [  | 
|
2613  | 
'AllInOneRepository',  | 
|
2614  | 
'WeaveMetaDirRepository',  | 
|
2615  | 
'PreSplitOutRepositoryFormat',  | 
|
2616  | 
'RepositoryFormat4',  | 
|
2617  | 
'RepositoryFormat5',  | 
|
2618  | 
'RepositoryFormat6',  | 
|
2619  | 
'RepositoryFormat7',  | 
|
2620  | 
        ]:
 | 
|
2621  | 
__make_delegated(_name, 'bzrlib.repofmt.weaverepo')  | 
|
2622  | 
||
2623  | 
for _name in [  | 
|
2624  | 
'KnitRepository',  | 
|
2625  | 
'RepositoryFormatKnit',  | 
|
2626  | 
'RepositoryFormatKnit1',  | 
|
2627  | 
        ]:
 | 
|
2628  | 
__make_delegated(_name, 'bzrlib.repofmt.knitrepo')  | 
|
2629  | 
||
2630  | 
||
| 
2996.2.2
by Aaron Bentley
 Create install_revisions function  | 
2631  | 
def install_revision(repository, rev, revision_tree):  | 
2632  | 
"""Install all revision data into a repository."""  | 
|
2633  | 
install_revisions(repository, [(rev, revision_tree, None)])  | 
|
2634  | 
||
2635  | 
||
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
2636  | 
def install_revisions(repository, iterable, num_revisions=None, pb=None):  | 
| 
2996.2.4
by Aaron Bentley
 Rename function to add_signature_text  | 
2637  | 
"""Install all revision data into a repository.  | 
2638  | 
||
2639  | 
    Accepts an iterable of revision, tree, signature tuples.  The signature
 | 
|
2640  | 
    may be None.
 | 
|
2641  | 
    """
 | 
|
| 
2592.3.96
by Robert Collins
 Merge index improvements (includes bzr.dev).  | 
2642  | 
repository.start_write_group()  | 
2643  | 
try:  | 
|
| 
3735.2.13
by Robert Collins
 Teach install_revisions to use inventory deltas when appropriate.  | 
2644  | 
inventory_cache = lru_cache.LRUCache(10)  | 
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
2645  | 
for n, (revision, revision_tree, signature) in enumerate(iterable):  | 
| 
3735.2.13
by Robert Collins
 Teach install_revisions to use inventory deltas when appropriate.  | 
2646  | 
_install_revision(repository, revision, revision_tree, signature,  | 
2647  | 
inventory_cache)  | 
|
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
2648  | 
if pb is not None:  | 
2649  | 
pb.update('Transferring revisions', n + 1, num_revisions)  | 
|
| 
2592.3.96
by Robert Collins
 Merge index improvements (includes bzr.dev).  | 
2650  | 
except:  | 
2651  | 
repository.abort_write_group()  | 
|
| 
2592.3.101
by Robert Collins
 Correctly propogate exceptions from repository.install_revisions.  | 
2652  | 
        raise
 | 
| 
2592.3.96
by Robert Collins
 Merge index improvements (includes bzr.dev).  | 
2653  | 
else:  | 
2654  | 
repository.commit_write_group()  | 
|
2655  | 
||
2656  | 
||
| 
3735.2.13
by Robert Collins
 Teach install_revisions to use inventory deltas when appropriate.  | 
2657  | 
def _install_revision(repository, rev, revision_tree, signature,  | 
2658  | 
inventory_cache):  | 
|
| 
2592.3.96
by Robert Collins
 Merge index improvements (includes bzr.dev).  | 
2659  | 
"""Install all revision data into a repository."""  | 
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2660  | 
present_parents = []  | 
2661  | 
parent_trees = {}  | 
|
2662  | 
for p_id in rev.parent_ids:  | 
|
2663  | 
if repository.has_revision(p_id):  | 
|
2664  | 
present_parents.append(p_id)  | 
|
2665  | 
parent_trees[p_id] = repository.revision_tree(p_id)  | 
|
2666  | 
else:  | 
|
| 
3668.5.1
by Jelmer Vernooij
 Use NULL_REVISION rather than None for Repository.revision_tree().  | 
2667  | 
parent_trees[p_id] = repository.revision_tree(  | 
2668  | 
_mod_revision.NULL_REVISION)  | 
|
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2669  | 
|
2670  | 
inv = revision_tree.inventory  | 
|
| 
1910.2.51
by Aaron Bentley
 Bundles now corrupt repositories  | 
2671  | 
entries = inv.iter_entries()  | 
| 
2617.6.6
by Robert Collins
 Some review feedback.  | 
2672  | 
    # backwards compatibility hack: skip the root id.
 | 
| 
1910.2.63
by Aaron Bentley
 Add supports_rich_root member to repository  | 
2673  | 
if not repository.supports_rich_root():  | 
| 
1910.2.60
by Aaron Bentley
 Ensure that new-model revisions aren't installed into old-model repos  | 
2674  | 
path, root = entries.next()  | 
2675  | 
if root.revision != rev.revision_id:  | 
|
| 
1910.2.63
by Aaron Bentley
 Add supports_rich_root member to repository  | 
2676  | 
raise errors.IncompatibleRevision(repr(repository))  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2677  | 
text_keys = {}  | 
2678  | 
for path, ie in entries:  | 
|
2679  | 
text_keys[(ie.file_id, ie.revision)] = ie  | 
|
2680  | 
text_parent_map = repository.texts.get_parent_map(text_keys)  | 
|
2681  | 
missing_texts = set(text_keys) - set(text_parent_map)  | 
|
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2682  | 
    # Add the texts that are not already present
 | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2683  | 
for text_key in missing_texts:  | 
2684  | 
ie = text_keys[text_key]  | 
|
2685  | 
text_parents = []  | 
|
2686  | 
        # FIXME: TODO: The following loop overlaps/duplicates that done by
 | 
|
2687  | 
        # commit to determine parents. There is a latent/real bug here where
 | 
|
2688  | 
        # the parents inserted are not those commit would do - in particular
 | 
|
2689  | 
        # they are not filtered by heads(). RBC, AB
 | 
|
2690  | 
for revision, tree in parent_trees.iteritems():  | 
|
2691  | 
if ie.file_id not in tree:  | 
|
2692  | 
                continue
 | 
|
2693  | 
parent_id = tree.inventory[ie.file_id].revision  | 
|
2694  | 
if parent_id in text_parents:  | 
|
2695  | 
                continue
 | 
|
2696  | 
text_parents.append((ie.file_id, parent_id))  | 
|
2697  | 
lines = revision_tree.get_file(ie.file_id).readlines()  | 
|
2698  | 
repository.texts.add_lines(text_key, text_parents, lines)  | 
|
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2699  | 
try:  | 
2700  | 
        # install the inventory
 | 
|
| 
3735.2.13
by Robert Collins
 Teach install_revisions to use inventory deltas when appropriate.  | 
2701  | 
if repository._format._commit_inv_deltas and len(rev.parent_ids):  | 
2702  | 
            # Cache this inventory
 | 
|
2703  | 
inventory_cache[rev.revision_id] = inv  | 
|
2704  | 
try:  | 
|
2705  | 
basis_inv = inventory_cache[rev.parent_ids[0]]  | 
|
2706  | 
except KeyError:  | 
|
2707  | 
repository.add_inventory(rev.revision_id, inv, present_parents)  | 
|
2708  | 
else:  | 
|
| 
3735.2.47
by Robert Collins
 Move '_make_inv_delta' onto Inventory (UNTESTED).  | 
2709  | 
delta = inv._make_delta(basis_inv)  | 
| 
3735.13.4
by John Arbash Meinel
 Track down more code paths that were broken by the merge.  | 
2710  | 
repository.add_inventory_by_delta(rev.parent_ids[0], delta,  | 
| 
3735.2.13
by Robert Collins
 Teach install_revisions to use inventory deltas when appropriate.  | 
2711  | 
rev.revision_id, present_parents)  | 
2712  | 
else:  | 
|
2713  | 
repository.add_inventory(rev.revision_id, inv, present_parents)  | 
|
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2714  | 
except errors.RevisionAlreadyPresent:  | 
2715  | 
        pass
 | 
|
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
2716  | 
if signature is not None:  | 
| 
2996.2.8
by Aaron Bentley
 Fix add_signature discrepancies  | 
2717  | 
repository.add_signature_text(rev.revision_id, signature)  | 
| 
1185.82.84
by Aaron Bentley
 Moved stuff around  | 
2718  | 
repository.add_revision(rev.revision_id, rev, inv)  | 
2719  | 
||
2720  | 
||
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2721  | 
class MetaDirRepository(Repository):  | 
| 
3407.2.13
by Martin Pool
 Remove indirection through control_files to get transports  | 
2722  | 
"""Repositories in the new meta-dir layout.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2723  | 
|
| 
3407.2.13
by Martin Pool
 Remove indirection through control_files to get transports  | 
2724  | 
    :ivar _transport: Transport for access to repository control files,
 | 
2725  | 
        typically pointing to .bzr/repository.
 | 
|
2726  | 
    """
 | 
|
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2727  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2728  | 
def __init__(self, _format, a_bzrdir, control_files):  | 
2729  | 
super(MetaDirRepository, self).__init__(_format, a_bzrdir, control_files)  | 
|
| 
3407.2.3
by Martin Pool
 Branch and Repository use their own ._transport rather than going through .control_files  | 
2730  | 
self._transport = control_files._transport  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2731  | 
|
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2732  | 
def is_shared(self):  | 
2733  | 
"""Return True if this repository is flagged as a shared repository."""  | 
|
| 
3407.2.3
by Martin Pool
 Branch and Repository use their own ._transport rather than going through .control_files  | 
2734  | 
return self._transport.has('shared-storage')  | 
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2735  | 
|
2736  | 
    @needs_write_lock
 | 
|
2737  | 
def set_make_working_trees(self, new_value):  | 
|
2738  | 
"""Set the policy flag for making working trees when creating branches.  | 
|
2739  | 
||
2740  | 
        This only applies to branches that use this repository.
 | 
|
2741  | 
||
2742  | 
        The default is 'True'.
 | 
|
2743  | 
        :param new_value: True to restore the default, False to disable making
 | 
|
2744  | 
                          working trees.
 | 
|
2745  | 
        """
 | 
|
2746  | 
if new_value:  | 
|
2747  | 
try:  | 
|
| 
3407.2.3
by Martin Pool
 Branch and Repository use their own ._transport rather than going through .control_files  | 
2748  | 
self._transport.delete('no-working-trees')  | 
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2749  | 
except errors.NoSuchFile:  | 
2750  | 
                pass
 | 
|
2751  | 
else:  | 
|
| 
3407.2.5
by Martin Pool
 Deprecate LockableFiles.put_utf8  | 
2752  | 
self._transport.put_bytes('no-working-trees', '',  | 
| 
3407.2.18
by Martin Pool
 BzrDir takes responsibility for default file/dir modes  | 
2753  | 
mode=self.bzrdir._get_file_mode())  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2754  | 
|
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2755  | 
def make_working_trees(self):  | 
2756  | 
"""Returns the policy for making working trees on new branches."""  | 
|
| 
3407.2.3
by Martin Pool
 Branch and Repository use their own ._transport rather than going through .control_files  | 
2757  | 
return not self._transport.has('no-working-trees')  | 
| 
1596.2.12
by Robert Collins
 Merge and make Knit Repository use the revision store for all possible queries.  | 
2758  | 
|
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2759  | 
|
| 
3316.2.3
by Robert Collins
 Remove manual notification of transaction finishing on versioned files.  | 
2760  | 
class MetaDirVersionedFileRepository(MetaDirRepository):  | 
2761  | 
"""Repositories in a meta-dir, that work via versioned file objects."""  | 
|
2762  | 
||
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2763  | 
def __init__(self, _format, a_bzrdir, control_files):  | 
| 
3316.2.5
by Robert Collins
 Review feedback.  | 
2764  | 
super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
2765  | 
control_files)  | 
| 
3316.2.3
by Robert Collins
 Remove manual notification of transaction finishing on versioned files.  | 
2766  | 
|
2767  | 
||
| 
4032.3.1
by Robert Collins
 Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls.  | 
2768  | 
network_format_registry = registry.FormatRegistry()  | 
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2769  | 
"""Registry of formats indexed by their network name.
 | 
2770  | 
||
2771  | 
The network name for a repository format is an identifier that can be used when
 | 
|
2772  | 
referring to formats with smart server operations. See
 | 
|
2773  | 
RepositoryFormat.network_name() for more detail.
 | 
|
2774  | 
"""
 | 
|
| 
3990.5.1
by Andrew Bennetts
 Add network_name() to RepositoryFormat.  | 
2775  | 
|
2776  | 
||
| 
4032.3.1
by Robert Collins
 Add a BranchFormat.network_name() method as preparation for creating branches via RPC calls.  | 
2777  | 
format_registry = registry.FormatRegistry(network_format_registry)  | 
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2778  | 
"""Registry of formats, indexed by their BzrDirMetaFormat format string.
 | 
| 
2241.1.11
by Martin Pool
 Get rid of RepositoryFormat*_instance objects. Instead the format  | 
2779  | 
|
2780  | 
This can contain either format instances themselves, or classes/factories that
 | 
|
2781  | 
can be called to obtain one.
 | 
|
2782  | 
"""
 | 
|
| 
2241.1.2
by Martin Pool
 change to using external Repository format registry  | 
2783  | 
|
| 
2220.2.3
by Martin Pool
 Add tag: revision namespace.  | 
2784  | 
|
2785  | 
#####################################################################
 | 
|
2786  | 
# Repository Formats
 | 
|
| 
1910.2.46
by Aaron Bentley
 Whitespace fix  | 
2787  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2788  | 
class RepositoryFormat(object):  | 
2789  | 
"""A repository format.  | 
|
2790  | 
||
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2791  | 
    Formats provide four things:
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2792  | 
     * An initialization routine to construct repository data on disk.
 | 
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2793  | 
     * a optional format string which is used when the BzrDir supports
 | 
2794  | 
       versioned children.
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2795  | 
     * an open routine which returns a Repository instance.
 | 
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2796  | 
     * A network name for referring to the format in smart server RPC
 | 
2797  | 
       methods.
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2798  | 
|
| 
2889.1.2
by Robert Collins
 Review feedback.  | 
2799  | 
    There is one and only one Format subclass for each on-disk format. But
 | 
2800  | 
    there can be one Repository subclass that is used for several different
 | 
|
2801  | 
    formats. The _format attribute on a Repository instance can be used to
 | 
|
2802  | 
    determine the disk format.
 | 
|
| 
2889.1.1
by Robert Collins
 * The class ``bzrlib.repofmt.knitrepo.KnitRepository3`` has been folded into  | 
2803  | 
|
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2804  | 
    Formats are placed in a registry by their format string for reference
 | 
2805  | 
    during opening. These should be subclasses of RepositoryFormat for
 | 
|
2806  | 
    consistency.
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2807  | 
|
2808  | 
    Once a format is deprecated, just deprecate the initialize and open
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2809  | 
    methods on the format class. Do not deprecate the object, as the
 | 
| 
4031.3.1
by Frank Aspell
 Fixing various typos  | 
2810  | 
    object may be created even when a repository instance hasn't been
 | 
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2811  | 
    created.
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2812  | 
|
2813  | 
    Common instance attributes:
 | 
|
2814  | 
    _matchingbzrdir - the bzrdir format that the repository format was
 | 
|
2815  | 
    originally written to work with. This can be used if manually
 | 
|
2816  | 
    constructing a bzrdir and repository, or more commonly for test suite
 | 
|
| 
3128.1.3
by Vincent Ladeuil
 Since we are there s/parameteris.*/parameteriz&/.  | 
2817  | 
    parameterization.
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2818  | 
    """
 | 
2819  | 
||
| 
2949.1.2
by Robert Collins
 * Fetch with pack repositories will no longer read the entire history graph.  | 
2820  | 
    # Set to True or False in derived classes. True indicates that the format
 | 
2821  | 
    # supports ghosts gracefully.
 | 
|
2822  | 
supports_ghosts = None  | 
|
| 
3221.3.1
by Robert Collins
 * Repository formats have a new supported-feature attribute  | 
2823  | 
    # Can this repository be given external locations to lookup additional
 | 
2824  | 
    # data. Set to True or False in derived classes.
 | 
|
2825  | 
supports_external_lookups = None  | 
|
| 
3735.2.1
by Robert Collins
 Add the concept of CHK lookups to Repository.  | 
2826  | 
    # Does this format support CHK bytestring lookups. Set to True or False in
 | 
2827  | 
    # derived classes.
 | 
|
2828  | 
supports_chks = None  | 
|
| 
3735.2.12
by Robert Collins
 Implement commit-via-deltas for split inventory repositories.  | 
2829  | 
    # Should commit add an inventory, or an inventory delta to the repository.
 | 
2830  | 
_commit_inv_deltas = True  | 
|
| 
4053.1.4
by Robert Collins
 Move the fetch control attributes from Repository to RepositoryFormat.  | 
2831  | 
    # What order should fetch operations request streams in?
 | 
2832  | 
    # The default is unordered as that is the cheapest for an origin to
 | 
|
2833  | 
    # provide.
 | 
|
2834  | 
_fetch_order = 'unordered'  | 
|
2835  | 
    # Does this repository format use deltas that can be fetched as-deltas ?
 | 
|
2836  | 
    # (E.g. knits, where the knit deltas can be transplanted intact.
 | 
|
2837  | 
    # We default to False, which will ensure that enough data to get
 | 
|
2838  | 
    # a full text out of any fetch stream will be grabbed.
 | 
|
2839  | 
_fetch_uses_deltas = False  | 
|
2840  | 
    # Should fetch trigger a reconcile after the fetch? Only needed for
 | 
|
2841  | 
    # some repository formats that can suffer internal inconsistencies.
 | 
|
2842  | 
_fetch_reconcile = False  | 
|
| 
4183.5.1
by Robert Collins
 Add RepositoryFormat.fast_deltas to signal fast delta creation.  | 
2843  | 
    # Does this format have < O(tree_size) delta generation. Used to hint what
 | 
2844  | 
    # code path for commit, amongst other things.
 | 
|
2845  | 
fast_deltas = None  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
2846  | 
    # Does doing a pack operation compress data? Useful for the pack UI command
 | 
2847  | 
    # (so if there is one pack, the operation can still proceed because it may
 | 
|
2848  | 
    # help), and for fetching when data won't have come from the same
 | 
|
2849  | 
    # compressor.
 | 
|
2850  | 
pack_compresses = False  | 
|
| 
2949.1.2
by Robert Collins
 * Fetch with pack repositories will no longer read the entire history graph.  | 
2851  | 
|
| 
1904.2.3
by Martin Pool
 Give a warning on access to old repository formats  | 
2852  | 
def __str__(self):  | 
2853  | 
return "<%s>" % self.__class__.__name__  | 
|
2854  | 
||
| 
2241.1.11
by Martin Pool
 Get rid of RepositoryFormat*_instance objects. Instead the format  | 
2855  | 
def __eq__(self, other):  | 
2856  | 
        # format objects are generally stateless
 | 
|
2857  | 
return isinstance(other, self.__class__)  | 
|
2858  | 
||
| 
2100.3.35
by Aaron Bentley
 equality operations on bzrdir  | 
2859  | 
def __ne__(self, other):  | 
| 
2100.3.31
by Aaron Bentley
 Merged bzr.dev (17 tests failing)  | 
2860  | 
return not self == other  | 
2861  | 
||
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2862  | 
    @classmethod
 | 
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2863  | 
def find_format(klass, a_bzrdir):  | 
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2864  | 
"""Return the format for the repository object in a_bzrdir.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2865  | 
|
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2866  | 
        This is used by bzr native formats that have a "format" file in
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2867  | 
        the repository.  Other methods may be used by different types of
 | 
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2868  | 
        control directory.
 | 
2869  | 
        """
 | 
|
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2870  | 
try:  | 
2871  | 
transport = a_bzrdir.get_repository_transport(None)  | 
|
2872  | 
format_string = transport.get("format").read()  | 
|
| 
2241.1.2
by Martin Pool
 change to using external Repository format registry  | 
2873  | 
return format_registry.get(format_string)  | 
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2874  | 
except errors.NoSuchFile:  | 
2875  | 
raise errors.NoRepositoryPresent(a_bzrdir)  | 
|
2876  | 
except KeyError:  | 
|
| 
3246.3.2
by Daniel Watkins
 Modified uses of errors.UnknownFormatError.  | 
2877  | 
raise errors.UnknownFormatError(format=format_string,  | 
2878  | 
kind='repository')  | 
|
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2879  | 
|
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2880  | 
    @classmethod
 | 
| 
2241.1.2
by Martin Pool
 change to using external Repository format registry  | 
2881  | 
def register_format(klass, format):  | 
2882  | 
format_registry.register(format.get_format_string(), format)  | 
|
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2883  | 
|
2884  | 
    @classmethod
 | 
|
2885  | 
def unregister_format(klass, format):  | 
|
| 
2241.1.2
by Martin Pool
 change to using external Repository format registry  | 
2886  | 
format_registry.remove(format.get_format_string())  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2887  | 
|
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2888  | 
    @classmethod
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2889  | 
def get_default_format(klass):  | 
2890  | 
"""Return the current default format."""  | 
|
| 
2204.5.3
by Aaron Bentley
 zap old repository default handling  | 
2891  | 
from bzrlib import bzrdir  | 
2892  | 
return bzrdir.format_registry.make_bzrdir('default').repository_format  | 
|
| 
2241.1.1
by Martin Pool
 Change RepositoryFormat to use a Registry rather than ad-hoc dictionary  | 
2893  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2894  | 
def get_format_string(self):  | 
2895  | 
"""Return the ASCII format string that identifies this format.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2896  | 
|
2897  | 
        Note that in pre format ?? repositories the format string is
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2898  | 
        not permitted nor written to disk.
 | 
2899  | 
        """
 | 
|
2900  | 
raise NotImplementedError(self.get_format_string)  | 
|
2901  | 
||
| 
1624.3.19
by Olaf Conradi
 New call get_format_description to give a user-friendly description of a  | 
2902  | 
def get_format_description(self):  | 
| 
1759.2.1
by Jelmer Vernooij
 Fix some types (found using aspell).  | 
2903  | 
"""Return the short description for this format."""  | 
| 
1624.3.19
by Olaf Conradi
 New call get_format_description to give a user-friendly description of a  | 
2904  | 
raise NotImplementedError(self.get_format_description)  | 
2905  | 
||
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
2906  | 
    # TODO: this shouldn't be in the base class, it's specific to things that
 | 
2907  | 
    # use weaves or knits -- mbp 20070207
 | 
|
| 
1563.2.17
by Robert Collins
 Change knits repositories to use a knit versioned file store for file texts.  | 
2908  | 
def _get_versioned_file_store(self,  | 
2909  | 
name,  | 
|
2910  | 
transport,  | 
|
2911  | 
control_files,  | 
|
2912  | 
prefixed=True,  | 
|
| 
2241.1.10
by Martin Pool
 Remove more references to weaves from the repository.py file  | 
2913  | 
versionedfile_class=None,  | 
| 
1946.2.5
by John Arbash Meinel
 Make knit stores delay creation, but not control stores  | 
2914  | 
versionedfile_kwargs={},  | 
| 
1608.2.12
by Martin Pool
 Store-escaping must quote uppercase characters too, so that they're safely  | 
2915  | 
escaped=False):  | 
| 
2241.1.10
by Martin Pool
 Remove more references to weaves from the repository.py file  | 
2916  | 
if versionedfile_class is None:  | 
2917  | 
versionedfile_class = self._versionedfile_class  | 
|
| 
1563.2.17
by Robert Collins
 Change knits repositories to use a knit versioned file store for file texts.  | 
2918  | 
weave_transport = control_files._transport.clone(name)  | 
2919  | 
dir_mode = control_files._dir_mode  | 
|
2920  | 
file_mode = control_files._file_mode  | 
|
2921  | 
return VersionedFileStore(weave_transport, prefixed=prefixed,  | 
|
| 
1608.2.12
by Martin Pool
 Store-escaping must quote uppercase characters too, so that they're safely  | 
2922  | 
dir_mode=dir_mode,  | 
2923  | 
file_mode=file_mode,  | 
|
2924  | 
versionedfile_class=versionedfile_class,  | 
|
| 
1946.2.5
by John Arbash Meinel
 Make knit stores delay creation, but not control stores  | 
2925  | 
versionedfile_kwargs=versionedfile_kwargs,  | 
| 
1608.2.12
by Martin Pool
 Store-escaping must quote uppercase characters too, so that they're safely  | 
2926  | 
escaped=escaped)  | 
| 
1563.2.17
by Robert Collins
 Change knits repositories to use a knit versioned file store for file texts.  | 
2927  | 
|
| 
1534.6.1
by Robert Collins
 allow API creation of shared repositories  | 
2928  | 
def initialize(self, a_bzrdir, shared=False):  | 
2929  | 
"""Initialize a repository of this format in a_bzrdir.  | 
|
2930  | 
||
2931  | 
        :param a_bzrdir: The bzrdir to put the new repository in it.
 | 
|
2932  | 
        :param shared: The repository should be initialized as a sharable one.
 | 
|
| 
1752.2.52
by Andrew Bennetts
 Flesh out more Remote* methods needed to open and initialise remote branches/trees/repositories.  | 
2933  | 
        :returns: The new repository object.
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2934  | 
|
| 
1534.6.1
by Robert Collins
 allow API creation of shared repositories  | 
2935  | 
        This may raise UninitializableFormat if shared repository are not
 | 
2936  | 
        compatible the a_bzrdir.
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2937  | 
        """
 | 
| 
1752.2.52
by Andrew Bennetts
 Flesh out more Remote* methods needed to open and initialise remote branches/trees/repositories.  | 
2938  | 
raise NotImplementedError(self.initialize)  | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2939  | 
|
2940  | 
def is_supported(self):  | 
|
2941  | 
"""Is this format supported?  | 
|
2942  | 
||
2943  | 
        Supported formats must be initializable and openable.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2944  | 
        Unsupported formats may not support initialization or committing or
 | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2945  | 
        some other features depending on the reason for not being supported.
 | 
2946  | 
        """
 | 
|
2947  | 
return True  | 
|
2948  | 
||
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
2949  | 
def network_name(self):  | 
2950  | 
"""A simple byte string uniquely identifying this format for RPC calls.  | 
|
2951  | 
||
2952  | 
        MetaDir repository formats use their disk format string to identify the
 | 
|
2953  | 
        repository over the wire. All in one formats such as bzr < 0.8, and
 | 
|
2954  | 
        foreign formats like svn/git and hg should use some marker which is
 | 
|
2955  | 
        unique and immutable.
 | 
|
2956  | 
        """
 | 
|
2957  | 
raise NotImplementedError(self.network_name)  | 
|
2958  | 
||
| 
1910.2.12
by Aaron Bentley
 Implement knit repo format 2  | 
2959  | 
def check_conversion_target(self, target_format):  | 
2960  | 
raise NotImplementedError(self.check_conversion_target)  | 
|
2961  | 
||
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2962  | 
def open(self, a_bzrdir, _found=False):  | 
2963  | 
"""Return an instance of this format for the bzrdir a_bzrdir.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
2964  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2965  | 
        _found is a private parameter, do not use it.
 | 
2966  | 
        """
 | 
|
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2967  | 
raise NotImplementedError(self.open)  | 
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
2968  | 
|
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2969  | 
|
2970  | 
class MetaDirRepositoryFormat(RepositoryFormat):  | 
|
| 
1759.2.1
by Jelmer Vernooij
 Fix some types (found using aspell).  | 
2971  | 
"""Common base class for the new repositories using the metadir layout."""  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2972  | 
|
| 
1910.2.14
by Aaron Bentley
 Fail when trying to use interrepository on Knit2 and Knit1  | 
2973  | 
rich_root_data = False  | 
| 
2323.5.17
by Martin Pool
 Add supports_tree_reference to all repo formats (robert)  | 
2974  | 
supports_tree_reference = False  | 
| 
3221.3.1
by Robert Collins
 * Repository formats have a new supported-feature attribute  | 
2975  | 
supports_external_lookups = False  | 
| 
3845.1.1
by John Arbash Meinel
 Ensure that RepositoryFormat._matchingbzrdir.repository_format matches.  | 
2976  | 
|
2977  | 
    @property
 | 
|
2978  | 
def _matchingbzrdir(self):  | 
|
2979  | 
matching = bzrdir.BzrDirMetaFormat1()  | 
|
2980  | 
matching.repository_format = self  | 
|
2981  | 
return matching  | 
|
| 
1910.2.14
by Aaron Bentley
 Fail when trying to use interrepository on Knit2 and Knit1  | 
2982  | 
|
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
2983  | 
def __init__(self):  | 
2984  | 
super(MetaDirRepositoryFormat, self).__init__()  | 
|
2985  | 
||
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2986  | 
def _create_control_files(self, a_bzrdir):  | 
2987  | 
"""Create the required files and the initial control_files object."""  | 
|
| 
1759.2.2
by Jelmer Vernooij
 Revert some of my spelling fixes and fix some typos after review by Aaron.  | 
2988  | 
        # FIXME: RBC 20060125 don't peek under the covers
 | 
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2989  | 
        # NB: no need to escape relative paths that are url safe.
 | 
2990  | 
repository_transport = a_bzrdir.get_repository_transport(self)  | 
|
| 
1996.3.4
by John Arbash Meinel
 lazy_import bzrlib/repository.py  | 
2991  | 
control_files = lockable_files.LockableFiles(repository_transport,  | 
2992  | 
'lock', lockdir.LockDir)  | 
|
| 
1553.5.61
by Martin Pool
 Locks protecting LockableFiles must now be explicitly created before use.  | 
2993  | 
control_files.create_lock()  | 
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
2994  | 
return control_files  | 
2995  | 
||
2996  | 
def _upload_blank_content(self, a_bzrdir, dirs, files, utf8_files, shared):  | 
|
2997  | 
"""Upload the initial blank content."""  | 
|
2998  | 
control_files = self._create_control_files(a_bzrdir)  | 
|
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
2999  | 
control_files.lock_write()  | 
| 
3407.2.4
by Martin Pool
 Small cleanups to initial creation of repository files  | 
3000  | 
transport = control_files._transport  | 
3001  | 
if shared == True:  | 
|
3002  | 
utf8_files += [('shared-storage', '')]  | 
|
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
3003  | 
try:  | 
| 
3407.2.18
by Martin Pool
 BzrDir takes responsibility for default file/dir modes  | 
3004  | 
transport.mkdir_multi(dirs, mode=a_bzrdir._get_dir_mode())  | 
| 
3407.2.4
by Martin Pool
 Small cleanups to initial creation of repository files  | 
3005  | 
for (filename, content_stream) in files:  | 
3006  | 
transport.put_file(filename, content_stream,  | 
|
| 
3407.2.18
by Martin Pool
 BzrDir takes responsibility for default file/dir modes  | 
3007  | 
mode=a_bzrdir._get_file_mode())  | 
| 
3407.2.4
by Martin Pool
 Small cleanups to initial creation of repository files  | 
3008  | 
for (filename, content_bytes) in utf8_files:  | 
3009  | 
transport.put_bytes_non_atomic(filename, content_bytes,  | 
|
| 
3407.2.18
by Martin Pool
 BzrDir takes responsibility for default file/dir modes  | 
3010  | 
mode=a_bzrdir._get_file_mode())  | 
| 
1534.4.47
by Robert Collins
 Split out repository into .bzr/repository  | 
3011  | 
finally:  | 
3012  | 
control_files.unlock()  | 
|
| 
1556.1.3
by Robert Collins
 Rearrangment of Repository logic to be less type code driven, and bugfix InterRepository.missing_revision_ids  | 
3013  | 
|
| 
3990.5.1
by Andrew Bennetts
 Add network_name() to RepositoryFormat.  | 
3014  | 
def network_name(self):  | 
3015  | 
"""Metadir formats have matching disk and network format strings."""  | 
|
3016  | 
return self.get_format_string()  | 
|
3017  | 
||
3018  | 
||
| 
3990.5.3
by Robert Collins
 Docs and polish on RepositoryFormat.network_name.  | 
3019  | 
# Pre-0.8 formats that don't have a disk format string (because they are
 | 
3020  | 
# versioned by the matching control directory). We use the control directories
 | 
|
3021  | 
# disk format string as a key for the network_name because they meet the
 | 
|
| 
4031.3.1
by Frank Aspell
 Fixing various typos  | 
3022  | 
# constraints (simple string, unique, immutable).
 | 
| 
3990.5.1
by Andrew Bennetts
 Add network_name() to RepositoryFormat.  | 
3023  | 
network_format_registry.register_lazy(  | 
3024  | 
"Bazaar-NG branch, format 5\n",  | 
|
3025  | 
'bzrlib.repofmt.weaverepo',  | 
|
3026  | 
'RepositoryFormat5',  | 
|
3027  | 
)
 | 
|
3028  | 
network_format_registry.register_lazy(  | 
|
3029  | 
"Bazaar-NG branch, format 6\n",  | 
|
3030  | 
'bzrlib.repofmt.weaverepo',  | 
|
3031  | 
'RepositoryFormat6',  | 
|
3032  | 
)
 | 
|
3033  | 
||
3034  | 
# formats which have no format string are not discoverable or independently
 | 
|
| 
4032.1.1
by John Arbash Meinel
 Merge the removal of all trailing whitespace, and resolve conflicts.  | 
3035  | 
# creatable on disk, so are not registered in format_registry.  They're
 | 
| 
2241.1.11
by Martin Pool
 Get rid of RepositoryFormat*_instance objects. Instead the format  | 
3036  | 
# all in bzrlib.repofmt.weaverepo now.  When an instance of one of these is
 | 
3037  | 
# needed, it's constructed directly by the BzrDir.  Non-native formats where
 | 
|
3038  | 
# the repository is not separately opened are similar.
 | 
|
3039  | 
||
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
3040  | 
format_registry.register_lazy(  | 
3041  | 
'Bazaar-NG Repository format 7',  | 
|
3042  | 
'bzrlib.repofmt.weaverepo',  | 
|
| 
2241.1.11
by Martin Pool
 Get rid of RepositoryFormat*_instance objects. Instead the format  | 
3043  | 
    'RepositoryFormat7'
 | 
| 
2241.1.4
by Martin Pool
 Moved old weave-based repository formats into bzrlib.repofmt.weaverepo.  | 
3044  | 
    )
 | 
| 
2592.3.22
by Robert Collins
 Add new experimental repository formats.  | 
3045  | 
|
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
3046  | 
format_registry.register_lazy(  | 
3047  | 
'Bazaar-NG Knit Repository Format 1',  | 
|
3048  | 
'bzrlib.repofmt.knitrepo',  | 
|
| 
2241.1.11
by Martin Pool
 Get rid of RepositoryFormat*_instance objects. Instead the format  | 
3049  | 
'RepositoryFormatKnit1',  | 
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
3050  | 
    )
 | 
3051  | 
||
| 
2241.1.5
by Martin Pool
 Move KnitFormat2 into repofmt  | 
3052  | 
format_registry.register_lazy(  | 
| 
2255.2.230
by Robert Collins
 Update tree format signatures to mention introducing bzr version.  | 
3053  | 
'Bazaar Knit Repository Format 3 (bzr 0.15)\n',  | 
| 
2100.3.31
by Aaron Bentley
 Merged bzr.dev (17 tests failing)  | 
3054  | 
'bzrlib.repofmt.knitrepo',  | 
3055  | 
'RepositoryFormatKnit3',  | 
|
3056  | 
    )
 | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
3057  | 
|
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3058  | 
format_registry.register_lazy(  | 
3059  | 
'Bazaar Knit Repository Format 4 (bzr 1.0)\n',  | 
|
3060  | 
'bzrlib.repofmt.knitrepo',  | 
|
3061  | 
'RepositoryFormatKnit4',  | 
|
3062  | 
    )
 | 
|
3063  | 
||
| 
2939.2.1
by Ian Clatworthy
 use 'knitpack' naming instead of 'experimental' for pack formats  | 
3064  | 
# Pack-based formats. There is one format for pre-subtrees, and one for
 | 
3065  | 
# post-subtrees to allow ease of testing.
 | 
|
| 
3152.2.1
by Robert Collins
 * A new repository format 'development' has been added. This format will  | 
3066  | 
# NOTE: These are experimental in 0.92. Stable in 1.0 and above
 | 
| 
2592.3.22
by Robert Collins
 Add new experimental repository formats.  | 
3067  | 
format_registry.register_lazy(  | 
| 
2939.2.6
by Ian Clatworthy
 more review feedback from lifeless and poolie  | 
3068  | 
'Bazaar pack repository format 1 (needs bzr 0.92)\n',  | 
| 
2592.3.88
by Robert Collins
 Move Pack repository logic to bzrlib.repofmt.pack_repo.  | 
3069  | 
'bzrlib.repofmt.pack_repo',  | 
| 
2592.3.224
by Martin Pool
 Rename GraphKnitRepository etc to KnitPackRepository  | 
3070  | 
'RepositoryFormatKnitPack1',  | 
| 
2592.3.22
by Robert Collins
 Add new experimental repository formats.  | 
3071  | 
    )
 | 
3072  | 
format_registry.register_lazy(  | 
|
| 
2939.2.6
by Ian Clatworthy
 more review feedback from lifeless and poolie  | 
3073  | 
'Bazaar pack repository format 1 with subtree support (needs bzr 0.92)\n',  | 
| 
2592.3.88
by Robert Collins
 Move Pack repository logic to bzrlib.repofmt.pack_repo.  | 
3074  | 
'bzrlib.repofmt.pack_repo',  | 
| 
2592.3.224
by Martin Pool
 Rename GraphKnitRepository etc to KnitPackRepository  | 
3075  | 
'RepositoryFormatKnitPack3',  | 
| 
2592.3.22
by Robert Collins
 Add new experimental repository formats.  | 
3076  | 
    )
 | 
| 
2996.2.11
by Aaron Bentley
 Implement rich-root-pack format ( #164639)  | 
3077  | 
format_registry.register_lazy(  | 
3078  | 
'Bazaar pack repository format 1 with rich root (needs bzr 1.0)\n',  | 
|
3079  | 
'bzrlib.repofmt.pack_repo',  | 
|
3080  | 
'RepositoryFormatKnitPack4',  | 
|
3081  | 
    )
 | 
|
| 
3549.1.5
by Martin Pool
 Add stable format names for stacked branches  | 
3082  | 
format_registry.register_lazy(  | 
3083  | 
'Bazaar RepositoryFormatKnitPack5 (bzr 1.6)\n',  | 
|
3084  | 
'bzrlib.repofmt.pack_repo',  | 
|
3085  | 
'RepositoryFormatKnitPack5',  | 
|
3086  | 
    )
 | 
|
3087  | 
format_registry.register_lazy(  | 
|
| 
3606.10.1
by John Arbash Meinel
 Create a new --1.6-rich-root, deprecate the old one.  | 
3088  | 
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6.1)\n',  | 
3089  | 
'bzrlib.repofmt.pack_repo',  | 
|
3090  | 
'RepositoryFormatKnitPack5RichRoot',  | 
|
3091  | 
    )
 | 
|
3092  | 
format_registry.register_lazy(  | 
|
| 
3549.1.6
by Martin Pool
 Change stacked-subtree to stacked-rich-root  | 
3093  | 
'Bazaar RepositoryFormatKnitPack5RichRoot (bzr 1.6)\n',  | 
| 
3549.1.5
by Martin Pool
 Add stable format names for stacked branches  | 
3094  | 
'bzrlib.repofmt.pack_repo',  | 
| 
3606.10.1
by John Arbash Meinel
 Create a new --1.6-rich-root, deprecate the old one.  | 
3095  | 
'RepositoryFormatKnitPack5RichRootBroken',  | 
| 
3549.1.5
by Martin Pool
 Add stable format names for stacked branches  | 
3096  | 
    )
 | 
| 
3805.3.1
by John Arbash Meinel
 Add repository 1.9 format, and update the documentation.  | 
3097  | 
format_registry.register_lazy(  | 
3098  | 
'Bazaar RepositoryFormatKnitPack6 (bzr 1.9)\n',  | 
|
3099  | 
'bzrlib.repofmt.pack_repo',  | 
|
3100  | 
'RepositoryFormatKnitPack6',  | 
|
3101  | 
    )
 | 
|
3102  | 
format_registry.register_lazy(  | 
|
3103  | 
'Bazaar RepositoryFormatKnitPack6RichRoot (bzr 1.9)\n',  | 
|
3104  | 
'bzrlib.repofmt.pack_repo',  | 
|
3105  | 
'RepositoryFormatKnitPack6RichRoot',  | 
|
3106  | 
    )
 | 
|
| 
3549.1.5
by Martin Pool
 Add stable format names for stacked branches  | 
3107  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3108  | 
# Development formats.
 | 
| 
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
 Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil)  | 
3109  | 
# Obsolete but kept pending a CHK based subtree format.
 | 
| 
3735.1.1
by Robert Collins
 Add development2 formats using BTree indices.  | 
3110  | 
format_registry.register_lazy(  | 
3111  | 
("Bazaar development format 2 with subtree support "  | 
|
3112  | 
"(needs bzr.dev from before 1.8)\n"),  | 
|
3113  | 
'bzrlib.repofmt.pack_repo',  | 
|
3114  | 
'RepositoryFormatPackDevelopment2Subtree',  | 
|
3115  | 
    )
 | 
|
| 
2592.3.22
by Robert Collins
 Add new experimental repository formats.  | 
3116  | 
|
| 
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
 Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil)  | 
3117  | 
# 1.14->1.16 go below here
 | 
3118  | 
format_registry.register_lazy(  | 
|
3119  | 
    'Bazaar development format - group compression and chk inventory'
 | 
|
3120  | 
' (needs bzr.dev from 1.14)\n',  | 
|
3121  | 
'bzrlib.repofmt.groupcompress_repo',  | 
|
3122  | 
'RepositoryFormatCHK1',  | 
|
| 
3735.31.1
by John Arbash Meinel
 Bring the groupcompress plugin into the brisbane-core branch.  | 
3123  | 
    )
 | 
| 
4241.6.8
by Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil
 Add --development6-rich-root, disabling the legacy and unneeded development2 format, and activating the tests for CHK features disabled pending this format. (Robert Collins, John Arbash Meinel, Ian Clatworthy, Vincent Ladeuil)  | 
3124  | 
|
| 
4290.1.7
by Jelmer Vernooij
 Add development7-rich-root format that uses the RIO Serializer.  | 
3125  | 
format_registry.register_lazy(  | 
| 
4290.1.12
by Jelmer Vernooij
 Use bencode rather than rio in the new revision serialiszer.  | 
3126  | 
    'Bazaar development format - chk repository with bencode revision '
 | 
| 
4413.3.1
by Jelmer Vernooij
 Mention bzr 1.16 in the dev7 format description.  | 
3127  | 
'serialization (needs bzr.dev from 1.16)\n',  | 
| 
4290.1.7
by Jelmer Vernooij
 Add development7-rich-root format that uses the RIO Serializer.  | 
3128  | 
'bzrlib.repofmt.groupcompress_repo',  | 
3129  | 
'RepositoryFormatCHK2',  | 
|
3130  | 
    )
 | 
|
| 
4428.2.1
by Martin Pool
 Add 2a format  | 
3131  | 
format_registry.register_lazy(  | 
3132  | 
'Bazaar repository format 2a (needs bzr 1.16 or later)\n',  | 
|
3133  | 
'bzrlib.repofmt.groupcompress_repo',  | 
|
3134  | 
'RepositoryFormat2a',  | 
|
3135  | 
    )
 | 
|
| 
4290.1.7
by Jelmer Vernooij
 Add development7-rich-root format that uses the RIO Serializer.  | 
3136  | 
|
| 
1534.4.40
by Robert Collins
 Add RepositoryFormats and allow bzrdir.open or create _repository to be used.  | 
3137  | 
|
| 
1563.2.12
by Robert Collins
 Checkpointing: created InterObject to factor out common inter object worker code, added InterVersionedFile and tests to allow making join work between any versionedfile.  | 
3138  | 
class InterRepository(InterObject):  | 
| 
1534.1.27
by Robert Collins
 Start InterRepository with InterRepository.get.  | 
3139  | 
"""This class represents operations taking place between two repositories.  | 
3140  | 
||
| 
1534.1.33
by Robert Collins
 Move copy_content_into into InterRepository and InterWeaveRepo, and disable the default codepath test as we have optimised paths for all current combinations.  | 
3141  | 
    Its instances have methods like copy_content and fetch, and contain
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3142  | 
    references to the source and target repositories these operations can be
 | 
| 
1534.1.27
by Robert Collins
 Start InterRepository with InterRepository.get.  | 
3143  | 
    carried out on.
 | 
3144  | 
||
3145  | 
    Often we will provide convenience methods on 'repository' which carry out
 | 
|
3146  | 
    operations with another repository - they will always forward to
 | 
|
3147  | 
    InterRepository.get(other).method_name(parameters).
 | 
|
3148  | 
    """
 | 
|
3149  | 
||
| 
4144.2.1
by Andrew Bennetts
 Always batch revisions to ask of target when doing _walk_to_common_revisions, rather than special-casing in Inter*Remote*.  | 
3150  | 
_walk_to_common_revisions_batch_size = 50  | 
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3151  | 
_optimisers = []  | 
| 
1534.1.28
by Robert Collins
 Allow for optimised InterRepository selection.  | 
3152  | 
"""The available optimised InterRepository types."""  | 
3153  | 
||
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
3154  | 
    @needs_write_lock
 | 
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3155  | 
def copy_content(self, revision_id=None):  | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
3156  | 
"""Make a complete copy of the content in self into destination.  | 
3157  | 
||
3158  | 
        This is a destructive operation! Do not use it on existing
 | 
|
3159  | 
        repositories.
 | 
|
3160  | 
||
3161  | 
        :param revision_id: Only copy the content needed to construct
 | 
|
3162  | 
                            revision_id and its parents.
 | 
|
3163  | 
        """
 | 
|
3164  | 
try:  | 
|
3165  | 
self.target.set_make_working_trees(self.source.make_working_trees())  | 
|
3166  | 
except NotImplementedError:  | 
|
3167  | 
            pass
 | 
|
3168  | 
self.target.fetch(self.source, revision_id=revision_id)  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3169  | 
|
| 
4110.2.23
by Martin Pool
 blackbox hpss test should check repository was remotely locked  | 
3170  | 
    @needs_write_lock
 | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
3171  | 
def fetch(self, revision_id=None, pb=None, find_ghosts=False,  | 
3172  | 
fetch_spec=None):  | 
|
| 
1534.1.31
by Robert Collins
 Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo.  | 
3173  | 
"""Fetch the content required to construct revision_id.  | 
3174  | 
||
| 
1910.7.17
by Andrew Bennetts
 Various cosmetic changes.  | 
3175  | 
        The content is copied from self.source to self.target.
 | 
| 
1534.1.31
by Robert Collins
 Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo.  | 
3176  | 
|
3177  | 
        :param revision_id: if None all content is copied, if NULL_REVISION no
 | 
|
3178  | 
                            content is copied.
 | 
|
3179  | 
        :param pb: optional progress bar to use for progress reports. If not
 | 
|
3180  | 
                   provided a default one will be created.
 | 
|
| 
4065.1.1
by Robert Collins
 Change the return value of fetch() to None.  | 
3181  | 
        :return: None.
 | 
| 
1534.1.31
by Robert Collins
 Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo.  | 
3182  | 
        """
 | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
3183  | 
from bzrlib.fetch import RepoFetcher  | 
3184  | 
f = RepoFetcher(to_repository=self.target,  | 
|
3185  | 
from_repository=self.source,  | 
|
3186  | 
last_revision=revision_id,  | 
|
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
3187  | 
fetch_spec=fetch_spec,  | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
3188  | 
pb=pb, find_ghosts=find_ghosts)  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3189  | 
|
3190  | 
def _walk_to_common_revisions(self, revision_ids):  | 
|
3191  | 
"""Walk out from revision_ids in source to revisions target has.  | 
|
3192  | 
||
3193  | 
        :param revision_ids: The start point for the search.
 | 
|
3194  | 
        :return: A set of revision ids.
 | 
|
3195  | 
        """
 | 
|
| 
4144.3.12
by Andrew Bennetts
 Remove target_get_graph and target_get_parent_map attributes from InterRepository; nothing overrides them anymore.  | 
3196  | 
target_graph = self.target.get_graph()  | 
| 
1551.19.41
by Aaron Bentley
 Accelerate no-op pull  | 
3197  | 
revision_ids = frozenset(revision_ids)  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3198  | 
missing_revs = set()  | 
| 
1551.19.41
by Aaron Bentley
 Accelerate no-op pull  | 
3199  | 
source_graph = self.source.get_graph()  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3200  | 
        # ensure we don't pay silly lookup costs.
 | 
| 
1551.19.41
by Aaron Bentley
 Accelerate no-op pull  | 
3201  | 
searcher = source_graph._make_breadth_first_searcher(revision_ids)  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3202  | 
null_set = frozenset([_mod_revision.NULL_REVISION])  | 
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3203  | 
searcher_exhausted = False  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3204  | 
while True:  | 
| 
3452.2.6
by Andrew Bennetts
 Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to  | 
3205  | 
next_revs = set()  | 
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3206  | 
ghosts = set()  | 
3207  | 
            # Iterate the searcher until we have enough next_revs
 | 
|
| 
3452.2.6
by Andrew Bennetts
 Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to  | 
3208  | 
while len(next_revs) < self._walk_to_common_revisions_batch_size:  | 
3209  | 
try:  | 
|
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3210  | 
next_revs_part, ghosts_part = searcher.next_with_ghosts()  | 
| 
3452.2.6
by Andrew Bennetts
 Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to  | 
3211  | 
next_revs.update(next_revs_part)  | 
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3212  | 
ghosts.update(ghosts_part)  | 
| 
3452.2.6
by Andrew Bennetts
 Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to  | 
3213  | 
except StopIteration:  | 
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3214  | 
searcher_exhausted = True  | 
| 
3452.2.6
by Andrew Bennetts
 Batch get_parent_map calls in InterPackToRemotePack._walk_to_common_revisions to  | 
3215  | 
                    break
 | 
| 
3731.4.3
by Andrew Bennetts
 Rework ghost checking in _walk_to_common_revisions.  | 
3216  | 
            # If there are ghosts in the source graph, and the caller asked for
 | 
3217  | 
            # them, make sure that they are present in the target.
 | 
|
| 
3731.4.5
by Andrew Bennetts
 Clarify the code slightly.  | 
3218  | 
            # We don't care about other ghosts as we can't fetch them and
 | 
3219  | 
            # haven't been asked to.
 | 
|
3220  | 
ghosts_to_check = set(revision_ids.intersection(ghosts))  | 
|
3221  | 
revs_to_get = set(next_revs).union(ghosts_to_check)  | 
|
3222  | 
if revs_to_get:  | 
|
3223  | 
have_revs = set(target_graph.get_parent_map(revs_to_get))  | 
|
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3224  | 
                # we always have NULL_REVISION present.
 | 
| 
3731.4.5
by Andrew Bennetts
 Clarify the code slightly.  | 
3225  | 
have_revs = have_revs.union(null_set)  | 
3226  | 
                # Check if the target is missing any ghosts we need.
 | 
|
| 
3731.4.3
by Andrew Bennetts
 Rework ghost checking in _walk_to_common_revisions.  | 
3227  | 
ghosts_to_check.difference_update(have_revs)  | 
3228  | 
if ghosts_to_check:  | 
|
3229  | 
                    # One of the caller's revision_ids is a ghost in both the
 | 
|
3230  | 
                    # source and the target.
 | 
|
3231  | 
raise errors.NoSuchRevision(  | 
|
3232  | 
self.source, ghosts_to_check.pop())  | 
|
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3233  | 
missing_revs.update(next_revs - have_revs)  | 
| 
3808.1.4
by John Arbash Meinel
 make _walk_to_common responsible for stopping ancestors  | 
3234  | 
                # Because we may have walked past the original stop point, make
 | 
3235  | 
                # sure everything is stopped
 | 
|
3236  | 
stop_revs = searcher.find_seen_ancestors(have_revs)  | 
|
3237  | 
searcher.stop_searching_any(stop_revs)  | 
|
| 
3731.4.2
by Andrew Bennetts
 Move ghost check out of the inner loop.  | 
3238  | 
if searcher_exhausted:  | 
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3239  | 
                break
 | 
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3240  | 
return searcher.get_result()  | 
| 
3808.1.4
by John Arbash Meinel
 make _walk_to_common responsible for stopping ancestors  | 
3241  | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3242  | 
    @needs_read_lock
 | 
3243  | 
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):  | 
|
3244  | 
"""Return the revision ids that source has that target does not.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3245  | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3246  | 
        :param revision_id: only return revision ids included by this
 | 
3247  | 
                            revision_id.
 | 
|
3248  | 
        :param find_ghosts: If True find missing revisions in deep history
 | 
|
3249  | 
            rather than just finding the surface difference.
 | 
|
3250  | 
        :return: A bzrlib.graph.SearchResult.
 | 
|
3251  | 
        """
 | 
|
| 
3172.4.1
by Robert Collins
 * Fetching via bzr+ssh will no longer fill ghosts by default (this is  | 
3252  | 
        # stop searching at found target revisions.
 | 
3253  | 
if not find_ghosts and revision_id is not None:  | 
|
| 
3172.4.4
by Robert Collins
 Review feedback.  | 
3254  | 
return self._walk_to_common_revisions([revision_id])  | 
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3255  | 
        # generic, possibly worst case, slow code path.
 | 
3256  | 
target_ids = set(self.target.all_revision_ids())  | 
|
3257  | 
if revision_id is not None:  | 
|
3258  | 
source_ids = self.source.get_ancestry(revision_id)  | 
|
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
3259  | 
if source_ids[0] is not None:  | 
3260  | 
raise AssertionError()  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3261  | 
source_ids.pop(0)  | 
3262  | 
else:  | 
|
3263  | 
source_ids = self.source.all_revision_ids()  | 
|
3264  | 
result_set = set(source_ids).difference(target_ids)  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
3265  | 
return self.source.revision_ids_to_search_result(result_set)  | 
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3266  | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3267  | 
    @staticmethod
 | 
3268  | 
def _same_model(source, target):  | 
|
| 
3582.1.2
by Martin Pool
 Default InterRepository.fetch raises IncompatibleRepositories  | 
3269  | 
"""True if source and target have the same data representation.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3270  | 
|
| 
3582.1.2
by Martin Pool
 Default InterRepository.fetch raises IncompatibleRepositories  | 
3271  | 
        Note: this is always called on the base class; overriding it in a
 | 
3272  | 
        subclass will have no effect.
 | 
|
3273  | 
        """
 | 
|
3274  | 
try:  | 
|
3275  | 
InterRepository._assert_same_model(source, target)  | 
|
3276  | 
return True  | 
|
3277  | 
except errors.IncompatibleRepositories, e:  | 
|
3278  | 
return False  | 
|
3279  | 
||
3280  | 
    @staticmethod
 | 
|
3281  | 
def _assert_same_model(source, target):  | 
|
3282  | 
"""Raise an exception if two repositories do not use the same model.  | 
|
3283  | 
        """
 | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3284  | 
if source.supports_rich_root() != target.supports_rich_root():  | 
| 
3582.1.2
by Martin Pool
 Default InterRepository.fetch raises IncompatibleRepositories  | 
3285  | 
raise errors.IncompatibleRepositories(source, target,  | 
3286  | 
"different rich-root support")  | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3287  | 
if source._serializer != target._serializer:  | 
| 
3582.1.2
by Martin Pool
 Default InterRepository.fetch raises IncompatibleRepositories  | 
3288  | 
raise errors.IncompatibleRepositories(source, target,  | 
3289  | 
"different serializers")  | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3290  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3291  | 
|
3292  | 
class InterSameDataRepository(InterRepository):  | 
|
3293  | 
"""Code for converting between repositories that represent the same data.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3294  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3295  | 
    Data format and model must match for this to work.
 | 
3296  | 
    """
 | 
|
3297  | 
||
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
3298  | 
    @classmethod
 | 
| 
2241.1.7
by Martin Pool
 rename method  | 
3299  | 
def _get_repo_format_to_test(self):  | 
| 
2814.1.1
by Robert Collins
 * Pushing, pulling and branching branches with subtree references was not  | 
3300  | 
"""Repository format for testing with.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3301  | 
|
| 
2814.1.1
by Robert Collins
 * Pushing, pulling and branching branches with subtree references was not  | 
3302  | 
        InterSameData can pull from subtree to subtree and from non-subtree to
 | 
3303  | 
        non-subtree, so we test this with the richest repository format.
 | 
|
3304  | 
        """
 | 
|
3305  | 
from bzrlib.repofmt import knitrepo  | 
|
3306  | 
return knitrepo.RepositoryFormatKnit3()  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3307  | 
|
| 
1910.2.14
by Aaron Bentley
 Fail when trying to use interrepository on Knit2 and Knit1  | 
3308  | 
    @staticmethod
 | 
3309  | 
def is_compatible(source, target):  | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3310  | 
return InterRepository._same_model(source, target)  | 
| 
1910.2.14
by Aaron Bentley
 Fail when trying to use interrepository on Knit2 and Knit1  | 
3311  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3312  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3313  | 
class InterWeaveRepo(InterSameDataRepository):  | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
3314  | 
"""Optimised code paths between Weave based repositories.  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3315  | 
|
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
3316  | 
    This should be in bzrlib/repofmt/weaverepo.py but we have not yet
 | 
3317  | 
    implemented lazy inter-object optimisation.
 | 
|
3318  | 
    """
 | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3319  | 
|
| 
2241.1.13
by Martin Pool
 Re-register InterWeaveRepo, fix test integration, add test for it  | 
3320  | 
    @classmethod
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3321  | 
def _get_repo_format_to_test(self):  | 
3322  | 
from bzrlib.repofmt import weaverepo  | 
|
3323  | 
return weaverepo.RepositoryFormat7()  | 
|
3324  | 
||
3325  | 
    @staticmethod
 | 
|
3326  | 
def is_compatible(source, target):  | 
|
3327  | 
"""Be compatible with known Weave formats.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3328  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3329  | 
        We don't test for the stores being of specific types because that
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3330  | 
        could lead to confusing results, and there is no need to be
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3331  | 
        overly general.
 | 
3332  | 
        """
 | 
|
3333  | 
from bzrlib.repofmt.weaverepo import (  | 
|
3334  | 
RepositoryFormat5,  | 
|
3335  | 
RepositoryFormat6,  | 
|
3336  | 
RepositoryFormat7,  | 
|
3337  | 
                )
 | 
|
3338  | 
try:  | 
|
3339  | 
return (isinstance(source._format, (RepositoryFormat5,  | 
|
3340  | 
RepositoryFormat6,  | 
|
3341  | 
RepositoryFormat7)) and  | 
|
3342  | 
isinstance(target._format, (RepositoryFormat5,  | 
|
3343  | 
RepositoryFormat6,  | 
|
3344  | 
RepositoryFormat7)))  | 
|
3345  | 
except AttributeError:  | 
|
3346  | 
return False  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3347  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3348  | 
    @needs_write_lock
 | 
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3349  | 
def copy_content(self, revision_id=None):  | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3350  | 
"""See InterRepository.copy_content()."""  | 
3351  | 
        # weave specific optimised path:
 | 
|
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3352  | 
try:  | 
3353  | 
self.target.set_make_working_trees(self.source.make_working_trees())  | 
|
| 
3349.1.2
by Aaron Bentley
 Change ValueError to RepositoryUpgradeRequired  | 
3354  | 
except (errors.RepositoryUpgradeRequired, NotImplemented):  | 
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3355  | 
            pass
 | 
3356  | 
        # FIXME do not peek!
 | 
|
| 
3407.2.14
by Martin Pool
 Remove more cases of getting transport via control_files  | 
3357  | 
if self.source._transport.listable():  | 
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3358  | 
pb = ui.ui_factory.nested_progress_bar()  | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3359  | 
try:  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3360  | 
self.target.texts.insert_record_stream(  | 
3361  | 
self.source.texts.get_record_stream(  | 
|
3362  | 
self.source.texts.keys(), 'topological', False))  | 
|
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3363  | 
pb.update('copying inventory', 0, 1)  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3364  | 
self.target.inventories.insert_record_stream(  | 
3365  | 
self.source.inventories.get_record_stream(  | 
|
3366  | 
self.source.inventories.keys(), 'topological', False))  | 
|
3367  | 
self.target.signatures.insert_record_stream(  | 
|
3368  | 
self.source.signatures.get_record_stream(  | 
|
3369  | 
self.source.signatures.keys(),  | 
|
3370  | 
'unordered', True))  | 
|
3371  | 
self.target.revisions.insert_record_stream(  | 
|
3372  | 
self.source.revisions.get_record_stream(  | 
|
3373  | 
self.source.revisions.keys(),  | 
|
3374  | 
'topological', True))  | 
|
| 
2387.1.1
by Robert Collins
 Remove the --basis parameter to clone etc. (Robert Collins)  | 
3375  | 
finally:  | 
3376  | 
pb.finished()  | 
|
3377  | 
else:  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3378  | 
self.target.fetch(self.source, revision_id=revision_id)  | 
3379  | 
||
3380  | 
    @needs_read_lock
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3381  | 
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):  | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3382  | 
"""See InterRepository.missing_revision_ids()."""  | 
3383  | 
        # we want all revisions to satisfy revision_id in source.
 | 
|
3384  | 
        # but we don't want to stat every file here and there.
 | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3385  | 
        # we want then, all revisions other needs to satisfy revision_id
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3386  | 
        # checked, but not those that we have locally.
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3387  | 
        # so the first thing is to get a subset of the revisions to
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3388  | 
        # satisfy revision_id in source, and then eliminate those that
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3389  | 
        # we do already have.
 | 
| 
4031.3.1
by Frank Aspell
 Fixing various typos  | 
3390  | 
        # this is slow on high latency connection to self, but as this
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3391  | 
        # disk format scales terribly for push anyway due to rewriting
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3392  | 
        # inventory.weave, this is considered acceptable.
 | 
3393  | 
        # - RBC 20060209
 | 
|
3394  | 
if revision_id is not None:  | 
|
3395  | 
source_ids = self.source.get_ancestry(revision_id)  | 
|
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
3396  | 
if source_ids[0] is not None:  | 
3397  | 
raise AssertionError()  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3398  | 
source_ids.pop(0)  | 
3399  | 
else:  | 
|
3400  | 
source_ids = self.source._all_possible_ids()  | 
|
3401  | 
source_ids_set = set(source_ids)  | 
|
3402  | 
        # source_ids is the worst possible case we may need to pull.
 | 
|
3403  | 
        # now we want to filter source_ids against what we actually
 | 
|
3404  | 
        # have in target, but don't try to check for existence where we know
 | 
|
3405  | 
        # we do not have a revision as that would be pointless.
 | 
|
3406  | 
target_ids = set(self.target._all_possible_ids())  | 
|
3407  | 
possibly_present_revisions = target_ids.intersection(source_ids_set)  | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3408  | 
actually_present_revisions = set(  | 
3409  | 
self.target._eliminate_revisions_not_present(possibly_present_revisions))  | 
|
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3410  | 
required_revisions = source_ids_set.difference(actually_present_revisions)  | 
3411  | 
if revision_id is not None:  | 
|
3412  | 
            # we used get_ancestry to determine source_ids then we are assured all
 | 
|
3413  | 
            # revisions referenced are present as they are installed in topological order.
 | 
|
3414  | 
            # and the tip revision was validated by get_ancestry.
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3415  | 
result_set = required_revisions  | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3416  | 
else:  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3417  | 
            # if we just grabbed the possibly available ids, then
 | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3418  | 
            # we only have an estimate of whats available and need to validate
 | 
3419  | 
            # that against the revision records.
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3420  | 
result_set = set(  | 
3421  | 
self.source._eliminate_revisions_not_present(required_revisions))  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
3422  | 
return self.source.revision_ids_to_search_result(result_set)  | 
| 
2241.1.12
by Martin Pool
 Restore InterWeaveRepo  | 
3423  | 
|
3424  | 
||
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3425  | 
class InterKnitRepo(InterSameDataRepository):  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3426  | 
"""Optimised code paths between Knit based repositories."""  | 
3427  | 
||
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
3428  | 
    @classmethod
 | 
| 
2241.1.7
by Martin Pool
 rename method  | 
3429  | 
def _get_repo_format_to_test(self):  | 
| 
2241.1.6
by Martin Pool
 Move Knit repositories into the submodule bzrlib.repofmt.knitrepo and  | 
3430  | 
from bzrlib.repofmt import knitrepo  | 
3431  | 
return knitrepo.RepositoryFormatKnit1()  | 
|
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3432  | 
|
3433  | 
    @staticmethod
 | 
|
3434  | 
def is_compatible(source, target):  | 
|
3435  | 
"""Be compatible with known Knit formats.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3436  | 
|
| 
1759.2.2
by Jelmer Vernooij
 Revert some of my spelling fixes and fix some typos after review by Aaron.  | 
3437  | 
        We don't test for the stores being of specific types because that
 | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3438  | 
        could lead to confusing results, and there is no need to be
 | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3439  | 
        overly general.
 | 
3440  | 
        """
 | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3441  | 
from bzrlib.repofmt.knitrepo import RepositoryFormatKnit  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3442  | 
try:  | 
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3443  | 
are_knits = (isinstance(source._format, RepositoryFormatKnit) and  | 
3444  | 
isinstance(target._format, RepositoryFormatKnit))  | 
|
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3445  | 
except AttributeError:  | 
3446  | 
return False  | 
|
| 
2592.3.28
by Robert Collins
 Make InterKnitOptimiser be used between any same-model knit repository.  | 
3447  | 
return are_knits and InterRepository._same_model(source, target)  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3448  | 
|
3449  | 
    @needs_read_lock
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3450  | 
def search_missing_revision_ids(self, revision_id=None, find_ghosts=True):  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3451  | 
"""See InterRepository.missing_revision_ids()."""  | 
3452  | 
if revision_id is not None:  | 
|
3453  | 
source_ids = self.source.get_ancestry(revision_id)  | 
|
| 
3376.2.4
by Martin Pool
 Remove every assert statement from bzrlib!  | 
3454  | 
if source_ids[0] is not None:  | 
3455  | 
raise AssertionError()  | 
|
| 
1668.1.14
by Martin Pool
 merge olaf - InvalidRevisionId fixes  | 
3456  | 
source_ids.pop(0)  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3457  | 
else:  | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
3458  | 
source_ids = self.source.all_revision_ids()  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3459  | 
source_ids_set = set(source_ids)  | 
3460  | 
        # source_ids is the worst possible case we may need to pull.
 | 
|
3461  | 
        # now we want to filter source_ids against what we actually
 | 
|
| 
1759.2.2
by Jelmer Vernooij
 Revert some of my spelling fixes and fix some typos after review by Aaron.  | 
3462  | 
        # have in target, but don't try to check for existence where we know
 | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3463  | 
        # we do not have a revision as that would be pointless.
 | 
| 
2850.3.1
by Robert Collins
 Move various weave specific code out of the base Repository class to weaverepo.py.  | 
3464  | 
target_ids = set(self.target.all_revision_ids())  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3465  | 
possibly_present_revisions = target_ids.intersection(source_ids_set)  | 
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3466  | 
actually_present_revisions = set(  | 
3467  | 
self.target._eliminate_revisions_not_present(possibly_present_revisions))  | 
|
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3468  | 
required_revisions = source_ids_set.difference(actually_present_revisions)  | 
3469  | 
if revision_id is not None:  | 
|
3470  | 
            # we used get_ancestry to determine source_ids then we are assured all
 | 
|
3471  | 
            # revisions referenced are present as they are installed in topological order.
 | 
|
3472  | 
            # and the tip revision was validated by get_ancestry.
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3473  | 
result_set = required_revisions  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3474  | 
else:  | 
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3475  | 
            # if we just grabbed the possibly available ids, then
 | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3476  | 
            # we only have an estimate of whats available and need to validate
 | 
3477  | 
            # that against the revision records.
 | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3478  | 
result_set = set(  | 
3479  | 
self.source._eliminate_revisions_not_present(required_revisions))  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
3480  | 
return self.source.revision_ids_to_search_result(result_set)  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3481  | 
|
| 
1910.2.17
by Aaron Bentley
 Get fetching from 1 to 2 under test  | 
3482  | 
|
| 
3735.2.177
by John Arbash Meinel
 InterDifferingSerializer inherits directly from InterRepository.  | 
3483  | 
class InterDifferingSerializer(InterRepository):  | 
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3484  | 
|
3485  | 
    @classmethod
 | 
|
3486  | 
def _get_repo_format_to_test(self):  | 
|
3487  | 
return None  | 
|
3488  | 
||
3489  | 
    @staticmethod
 | 
|
3490  | 
def is_compatible(source, target):  | 
|
3491  | 
"""Be compatible with Knit2 source and Knit3 target"""  | 
|
| 
3735.2.178
by John Arbash Meinel
 Fix another 2 failing tests.  | 
3492  | 
        # This is redundant with format.check_conversion_target(), however that
 | 
3493  | 
        # raises an exception, and we just want to say "False" as in we won't
 | 
|
3494  | 
        # support converting between these formats.
 | 
|
3495  | 
if source.supports_rich_root() and not target.supports_rich_root():  | 
|
3496  | 
return False  | 
|
3497  | 
if (source._format.supports_tree_reference  | 
|
| 
3735.2.179
by John Arbash Meinel
 Fix a trivial typo.  | 
3498  | 
and not target._format.supports_tree_reference):  | 
| 
3735.2.178
by John Arbash Meinel
 Fix another 2 failing tests.  | 
3499  | 
return False  | 
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3500  | 
return True  | 
3501  | 
||
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3502  | 
def _get_delta_for_revision(self, tree, parent_ids, basis_id, cache):  | 
3503  | 
"""Get the best delta and base for this revision.  | 
|
3504  | 
||
3505  | 
        :return: (basis_id, delta)
 | 
|
3506  | 
        """
 | 
|
3507  | 
possible_trees = [(parent_id, cache[parent_id])  | 
|
3508  | 
for parent_id in parent_ids  | 
|
3509  | 
if parent_id in cache]  | 
|
3510  | 
if len(possible_trees) == 0:  | 
|
3511  | 
            # There either aren't any parents, or the parents aren't in the
 | 
|
3512  | 
            # cache, so just use the last converted tree
 | 
|
3513  | 
possible_trees.append((basis_id, cache[basis_id]))  | 
|
3514  | 
deltas = []  | 
|
3515  | 
for basis_id, basis_tree in possible_trees:  | 
|
3516  | 
delta = tree.inventory._make_delta(basis_tree.inventory)  | 
|
3517  | 
deltas.append((len(delta), basis_id, delta))  | 
|
3518  | 
deltas.sort()  | 
|
3519  | 
return deltas[0][1:]  | 
|
3520  | 
||
| 
3735.2.181
by John Arbash Meinel
 With the 'rich-root-stream' code, we also need to handle ghost parents appropriately.  | 
3521  | 
def _get_parent_keys(self, root_key, parent_map):  | 
3522  | 
"""Get the parent keys for a given root id."""  | 
|
3523  | 
root_id, rev_id = root_key  | 
|
3524  | 
        # Include direct parents of the revision, but only if they used
 | 
|
| 
4324.3.1
by Robert Collins
 When adding rich root data follow the standard revision graph rules, so it does not create 'inconstent parents'.  | 
3525  | 
        # the same root_id and are heads.
 | 
| 
3735.2.181
by John Arbash Meinel
 With the 'rich-root-stream' code, we also need to handle ghost parents appropriately.  | 
3526  | 
parent_keys = []  | 
3527  | 
for parent_id in parent_map[rev_id]:  | 
|
3528  | 
if parent_id == _mod_revision.NULL_REVISION:  | 
|
3529  | 
                continue
 | 
|
3530  | 
if parent_id not in self._revision_id_to_root_id:  | 
|
3531  | 
                # We probably didn't read this revision, go spend the
 | 
|
3532  | 
                # extra effort to actually check
 | 
|
3533  | 
try:  | 
|
3534  | 
tree = self.source.revision_tree(parent_id)  | 
|
3535  | 
except errors.NoSuchRevision:  | 
|
3536  | 
                    # Ghost, fill out _revision_id_to_root_id in case we
 | 
|
3537  | 
                    # encounter this again.
 | 
|
3538  | 
                    # But set parent_root_id to None since we don't really know
 | 
|
3539  | 
parent_root_id = None  | 
|
3540  | 
else:  | 
|
3541  | 
parent_root_id = tree.get_root_id()  | 
|
3542  | 
self._revision_id_to_root_id[parent_id] = None  | 
|
3543  | 
else:  | 
|
3544  | 
parent_root_id = self._revision_id_to_root_id[parent_id]  | 
|
| 
4324.3.1
by Robert Collins
 When adding rich root data follow the standard revision graph rules, so it does not create 'inconstent parents'.  | 
3545  | 
if root_id == parent_root_id:  | 
3546  | 
                # With stacking we _might_ want to refer to a non-local
 | 
|
3547  | 
                # revision, but this code path only applies when we have the
 | 
|
3548  | 
                # full content available, so ghosts really are ghosts, not just
 | 
|
3549  | 
                # the edge of local data.
 | 
|
3550  | 
parent_keys.append((parent_id,))  | 
|
3551  | 
else:  | 
|
3552  | 
                # root_id may be in the parent anyway.
 | 
|
3553  | 
try:  | 
|
3554  | 
tree = self.source.revision_tree(parent_id)  | 
|
3555  | 
except errors.NoSuchRevision:  | 
|
3556  | 
                    # ghost, can't refer to it.
 | 
|
3557  | 
                    pass
 | 
|
3558  | 
else:  | 
|
3559  | 
try:  | 
|
3560  | 
parent_keys.append((tree.inventory[root_id].revision,))  | 
|
3561  | 
except errors.NoSuchId:  | 
|
3562  | 
                        # not in the tree
 | 
|
3563  | 
                        pass
 | 
|
3564  | 
g = graph.Graph(self.source.revisions)  | 
|
3565  | 
heads = g.heads(parent_keys)  | 
|
3566  | 
selected_keys = []  | 
|
3567  | 
for key in parent_keys:  | 
|
3568  | 
if key in heads and key not in selected_keys:  | 
|
3569  | 
selected_keys.append(key)  | 
|
3570  | 
return tuple([(root_id,)+ key for key in selected_keys])  | 
|
| 
3735.2.181
by John Arbash Meinel
 With the 'rich-root-stream' code, we also need to handle ghost parents appropriately.  | 
3571  | 
|
3572  | 
def _new_root_data_stream(self, root_keys_to_create, parent_map):  | 
|
3573  | 
for root_key in root_keys_to_create:  | 
|
3574  | 
parent_keys = self._get_parent_keys(root_key, parent_map)  | 
|
3575  | 
yield versionedfile.FulltextContentFactory(root_key,  | 
|
3576  | 
parent_keys, None, '')  | 
|
3577  | 
||
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3578  | 
def _fetch_batch(self, revision_ids, basis_id, cache):  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3579  | 
"""Fetch across a few revisions.  | 
3580  | 
||
3581  | 
        :param revision_ids: The revisions to copy
 | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3582  | 
        :param basis_id: The revision_id of a tree that must be in cache, used
 | 
3583  | 
            as a basis for delta when no other base is available
 | 
|
3584  | 
        :param cache: A cache of RevisionTrees that we can use.
 | 
|
3585  | 
        :return: The revision_id of the last converted tree. The RevisionTree
 | 
|
3586  | 
            for it will be in cache
 | 
|
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3587  | 
        """
 | 
3588  | 
        # Walk though all revisions; get inventory deltas, copy referenced
 | 
|
3589  | 
        # texts that delta references, insert the delta, revision and
 | 
|
3590  | 
        # signature.
 | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3591  | 
root_keys_to_create = set()  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3592  | 
text_keys = set()  | 
3593  | 
pending_deltas = []  | 
|
3594  | 
pending_revisions = []  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3595  | 
parent_map = self.source.get_parent_map(revision_ids)  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3596  | 
for tree in self.source.revision_trees(revision_ids):  | 
3597  | 
current_revision_id = tree.get_revision_id()  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3598  | 
parent_ids = parent_map.get(current_revision_id, ())  | 
3599  | 
basis_id, delta = self._get_delta_for_revision(tree, parent_ids,  | 
|
3600  | 
basis_id, cache)  | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3601  | 
if self._converting_to_rich_root:  | 
3602  | 
self._revision_id_to_root_id[current_revision_id] = \  | 
|
3603  | 
tree.get_root_id()  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3604  | 
            # Find text entries that need to be copied
 | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3605  | 
for old_path, new_path, file_id, entry in delta:  | 
3606  | 
if new_path is not None:  | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3607  | 
if not new_path:  | 
3608  | 
                        # This is the root
 | 
|
3609  | 
if not self.target.supports_rich_root():  | 
|
3610  | 
                            # The target doesn't support rich root, so we don't
 | 
|
3611  | 
                            # copy
 | 
|
3612  | 
                            continue
 | 
|
3613  | 
if self._converting_to_rich_root:  | 
|
| 
3735.31.13
by John Arbash Meinel
 A couple typo/etc fixes for the InterDifferingSerializer rich-root conversions.  | 
3614  | 
                            # This can't be copied normally, we have to insert
 | 
3615  | 
                            # it specially
 | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3616  | 
root_keys_to_create.add((file_id, entry.revision))  | 
| 
3735.31.13
by John Arbash Meinel
 A couple typo/etc fixes for the InterDifferingSerializer rich-root conversions.  | 
3617  | 
                            continue
 | 
| 
3735.2.135
by Robert Collins
 Permit fetching bzr.dev [deal with inconsistent inventories.]  | 
3618  | 
text_keys.add((file_id, entry.revision))  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3619  | 
revision = self.source.get_revision(current_revision_id)  | 
3620  | 
pending_deltas.append((basis_id, delta,  | 
|
3621  | 
current_revision_id, revision.parent_ids))  | 
|
3622  | 
pending_revisions.append(revision)  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3623  | 
cache[current_revision_id] = tree  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3624  | 
basis_id = current_revision_id  | 
3625  | 
        # Copy file texts
 | 
|
3626  | 
from_texts = self.source.texts  | 
|
3627  | 
to_texts = self.target.texts  | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3628  | 
if root_keys_to_create:  | 
| 
3735.2.181
by John Arbash Meinel
 With the 'rich-root-stream' code, we also need to handle ghost parents appropriately.  | 
3629  | 
root_stream = self._new_root_data_stream(root_keys_to_create,  | 
3630  | 
parent_map)  | 
|
3631  | 
to_texts.insert_record_stream(root_stream)  | 
|
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3632  | 
to_texts.insert_record_stream(from_texts.get_record_stream(  | 
| 
4053.1.4
by Robert Collins
 Move the fetch control attributes from Repository to RepositoryFormat.  | 
3633  | 
text_keys, self.target._format._fetch_order,  | 
3634  | 
not self.target._format._fetch_uses_deltas))  | 
|
| 
4257.3.9
by Andrew Bennetts
 Add fix to InterDifferingSerializer too, although it's pretty ugly.  | 
3635  | 
        # insert inventory deltas
 | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3636  | 
for delta in pending_deltas:  | 
3637  | 
self.target.add_inventory_by_delta(*delta)  | 
|
| 
4257.3.9
by Andrew Bennetts
 Add fix to InterDifferingSerializer too, although it's pretty ugly.  | 
3638  | 
if self.target._fallback_repositories:  | 
| 
4257.3.10
by Andrew Bennetts
 Expand comment a little.  | 
3639  | 
            # Make sure this stacked repository has all the parent inventories
 | 
3640  | 
            # for the new revisions that we are about to insert.  We do this
 | 
|
3641  | 
            # before adding the revisions so that no revision is added until
 | 
|
3642  | 
            # all the inventories it may depend on are added.
 | 
|
| 
4257.3.9
by Andrew Bennetts
 Add fix to InterDifferingSerializer too, although it's pretty ugly.  | 
3643  | 
parent_ids = set()  | 
3644  | 
revision_ids = set()  | 
|
3645  | 
for revision in pending_revisions:  | 
|
3646  | 
revision_ids.add(revision.revision_id)  | 
|
3647  | 
parent_ids.update(revision.parent_ids)  | 
|
3648  | 
parent_ids.difference_update(revision_ids)  | 
|
3649  | 
parent_ids.discard(_mod_revision.NULL_REVISION)  | 
|
3650  | 
parent_map = self.source.get_parent_map(parent_ids)  | 
|
3651  | 
for parent_tree in self.source.revision_trees(parent_ids):  | 
|
3652  | 
basis_id, delta = self._get_delta_for_revision(tree, parent_ids, basis_id, cache)  | 
|
3653  | 
current_revision_id = parent_tree.get_revision_id()  | 
|
3654  | 
parents_parents = parent_map[current_revision_id]  | 
|
3655  | 
self.target.add_inventory_by_delta(  | 
|
3656  | 
basis_id, delta, current_revision_id, parents_parents)  | 
|
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3657  | 
        # insert signatures and revisions
 | 
3658  | 
for revision in pending_revisions:  | 
|
3659  | 
try:  | 
|
3660  | 
signature = self.source.get_signature_text(  | 
|
3661  | 
revision.revision_id)  | 
|
3662  | 
self.target.add_signature_text(revision.revision_id,  | 
|
3663  | 
signature)  | 
|
3664  | 
except errors.NoSuchRevision:  | 
|
3665  | 
                pass
 | 
|
3666  | 
self.target.add_revision(revision.revision_id, revision)  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3667  | 
return basis_id  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3668  | 
|
3669  | 
def _fetch_all_revisions(self, revision_ids, pb):  | 
|
3670  | 
"""Fetch everything for the list of revisions.  | 
|
3671  | 
||
3672  | 
        :param revision_ids: The list of revisions to fetch. Must be in
 | 
|
3673  | 
            topological order.
 | 
|
3674  | 
        :param pb: A ProgressBar
 | 
|
3675  | 
        :return: None
 | 
|
3676  | 
        """
 | 
|
3677  | 
basis_id, basis_tree = self._get_basis(revision_ids[0])  | 
|
3678  | 
batch_size = 100  | 
|
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3679  | 
cache = lru_cache.LRUCache(100)  | 
3680  | 
cache[basis_id] = basis_tree  | 
|
3681  | 
del basis_tree # We don't want to hang on to it here  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
3682  | 
hints = []  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3683  | 
for offset in range(0, len(revision_ids), batch_size):  | 
3684  | 
self.target.start_write_group()  | 
|
3685  | 
try:  | 
|
3686  | 
pb.update('Transferring revisions', offset,  | 
|
| 
3879.2.13
by John Arbash Meinel
 There was a test that asserted we called pb.update() with the last revision.  | 
3687  | 
len(revision_ids))  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3688  | 
batch = revision_ids[offset:offset+batch_size]  | 
| 
4017.4.1
by John Arbash Meinel
 Change the generic fetch logic to improve delta selection.  | 
3689  | 
basis_id = self._fetch_batch(batch, basis_id, cache)  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3690  | 
except:  | 
3691  | 
self.target.abort_write_group()  | 
|
3692  | 
                raise
 | 
|
3693  | 
else:  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
3694  | 
hint = self.target.commit_write_group()  | 
3695  | 
if hint:  | 
|
3696  | 
hints.extend(hint)  | 
|
3697  | 
if hints and self.target._format.pack_compresses:  | 
|
3698  | 
self.target.pack(hint=hints)  | 
|
| 
3879.2.13
by John Arbash Meinel
 There was a test that asserted we called pb.update() with the last revision.  | 
3699  | 
pb.update('Transferring revisions', len(revision_ids),  | 
3700  | 
len(revision_ids))  | 
|
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3701  | 
|
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3702  | 
    @needs_write_lock
 | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
3703  | 
def fetch(self, revision_id=None, pb=None, find_ghosts=False,  | 
3704  | 
fetch_spec=None):  | 
|
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3705  | 
"""See InterRepository.fetch()."""  | 
| 
4070.9.2
by Andrew Bennetts
 Rough prototype of allowing a SearchResult to be passed to fetch, and using that to improve network conversations.  | 
3706  | 
if fetch_spec is not None:  | 
3707  | 
raise AssertionError("Not implemented yet...")  | 
|
| 
3735.31.8
by John Arbash Meinel
 Some work on rich-root support.  | 
3708  | 
if (not self.source.supports_rich_root()  | 
3709  | 
and self.target.supports_rich_root()):  | 
|
3710  | 
self._converting_to_rich_root = True  | 
|
3711  | 
self._revision_id_to_root_id = {}  | 
|
3712  | 
else:  | 
|
3713  | 
self._converting_to_rich_root = False  | 
|
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
3714  | 
revision_ids = self.target.search_missing_revision_ids(self.source,  | 
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3715  | 
revision_id, find_ghosts=find_ghosts).get_keys()  | 
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3716  | 
if not revision_ids:  | 
3717  | 
return 0, 0  | 
|
| 
3184.1.8
by Robert Collins
 * ``InterRepository.missing_revision_ids`` is now deprecated in favour of  | 
3718  | 
revision_ids = tsort.topo_sort(  | 
| 
3184.1.9
by Robert Collins
 * ``Repository.get_data_stream`` is now deprecated in favour of  | 
3719  | 
self.source.get_graph().get_parent_map(revision_ids))  | 
| 
3735.2.15
by Robert Collins
 More direct implementation of fetch between different serializers.  | 
3720  | 
if not revision_ids:  | 
3721  | 
return 0, 0  | 
|
3722  | 
        # Walk though all revisions; get inventory deltas, copy referenced
 | 
|
3723  | 
        # texts that delta references, insert the delta, revision and
 | 
|
3724  | 
        # signature.
 | 
|
3725  | 
first_rev = self.source.get_revision(revision_ids[0])  | 
|
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
3726  | 
if pb is None:  | 
3727  | 
my_pb = ui.ui_factory.nested_progress_bar()  | 
|
3728  | 
pb = my_pb  | 
|
3729  | 
else:  | 
|
| 
4110.2.5
by Martin Pool
 Deprecate passing pbs in to fetch()  | 
3730  | 
symbol_versioning.warn(  | 
3731  | 
symbol_versioning.deprecated_in((1, 14, 0))  | 
|
3732  | 
% "pb parameter to fetch()")  | 
|
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
3733  | 
my_pb = None  | 
3734  | 
try:  | 
|
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3735  | 
self._fetch_all_revisions(revision_ids, pb)  | 
| 
3146.6.1
by Aaron Bentley
 InterDifferingSerializer shows a progress bar  | 
3736  | 
finally:  | 
3737  | 
if my_pb is not None:  | 
|
3738  | 
my_pb.finished()  | 
|
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3739  | 
return len(revision_ids), 0  | 
3740  | 
||
| 
3879.2.8
by John Arbash Meinel
 Bring in the CHK inter-differing-serializer fetch code.  | 
3741  | 
def _get_basis(self, first_revision_id):  | 
3742  | 
"""Get a revision and tree which exists in the target.  | 
|
3743  | 
||
3744  | 
        This assumes that first_revision_id is selected for transmission
 | 
|
3745  | 
        because all other ancestors are already present. If we can't find an
 | 
|
3746  | 
        ancestor we fall back to NULL_REVISION since we know that is safe.
 | 
|
3747  | 
||
3748  | 
        :return: (basis_id, basis_tree)
 | 
|
3749  | 
        """
 | 
|
3750  | 
first_rev = self.source.get_revision(first_revision_id)  | 
|
3751  | 
try:  | 
|
3752  | 
basis_id = first_rev.parent_ids[0]  | 
|
3753  | 
            # only valid as a basis if the target has it
 | 
|
3754  | 
self.target.get_revision(basis_id)  | 
|
3755  | 
            # Try to get a basis tree - if its a ghost it will hit the
 | 
|
3756  | 
            # NoSuchRevision case.
 | 
|
3757  | 
basis_tree = self.source.revision_tree(basis_id)  | 
|
3758  | 
except (IndexError, errors.NoSuchRevision):  | 
|
3759  | 
basis_id = _mod_revision.NULL_REVISION  | 
|
3760  | 
basis_tree = self.source.revision_tree(basis_id)  | 
|
3761  | 
return basis_id, basis_tree  | 
|
3762  | 
||
| 
2996.2.1
by Aaron Bentley
 Add KnitRepositoryFormat4  | 
3763  | 
|
3764  | 
InterRepository.register_optimiser(InterDifferingSerializer)  | 
|
| 
1910.2.15
by Aaron Bentley
 Back out inter.get changes, make optimizers an ordered list  | 
3765  | 
InterRepository.register_optimiser(InterSameDataRepository)  | 
| 
2241.1.13
by Martin Pool
 Re-register InterWeaveRepo, fix test integration, add test for it  | 
3766  | 
InterRepository.register_optimiser(InterWeaveRepo)  | 
| 
1563.2.31
by Robert Collins
 Convert Knit repositories to use knits.  | 
3767  | 
InterRepository.register_optimiser(InterKnitRepo)  | 
| 
1534.1.31
by Robert Collins
 Deprecated fetch.fetch and fetch.greedy_fetch for branch.fetch, and move the Repository.fetch internals to InterRepo and InterWeaveRepo.  | 
3768  | 
|
3769  | 
||
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
3770  | 
class CopyConverter(object):  | 
3771  | 
"""A repository conversion tool which just performs a copy of the content.  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3772  | 
|
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
3773  | 
    This is slow but quite reliable.
 | 
3774  | 
    """
 | 
|
3775  | 
||
3776  | 
def __init__(self, target_format):  | 
|
3777  | 
"""Create a CopyConverter.  | 
|
3778  | 
||
3779  | 
        :param target_format: The format the resulting repository should be.
 | 
|
3780  | 
        """
 | 
|
3781  | 
self.target_format = target_format  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3782  | 
|
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
3783  | 
def convert(self, repo, pb):  | 
3784  | 
"""Perform the conversion of to_convert, giving feedback via pb.  | 
|
3785  | 
||
3786  | 
        :param to_convert: The disk object to convert.
 | 
|
3787  | 
        :param pb: a progress bar to use for progress information.
 | 
|
3788  | 
        """
 | 
|
3789  | 
self.pb = pb  | 
|
3790  | 
self.count = 0  | 
|
| 
1596.2.22
by Robert Collins
 Fetch changes to use new pb.  | 
3791  | 
self.total = 4  | 
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
3792  | 
        # this is only useful with metadir layouts - separated repo content.
 | 
3793  | 
        # trigger an assertion if not such
 | 
|
3794  | 
repo._format.get_format_string()  | 
|
3795  | 
self.repo_dir = repo.bzrdir  | 
|
3796  | 
self.step('Moving repository to repository.backup')  | 
|
3797  | 
self.repo_dir.transport.move('repository', 'repository.backup')  | 
|
3798  | 
backup_transport = self.repo_dir.transport.clone('repository.backup')  | 
|
| 
1910.2.12
by Aaron Bentley
 Implement knit repo format 2  | 
3799  | 
repo._format.check_conversion_target(self.target_format)  | 
| 
1556.1.4
by Robert Collins
 Add a new format for what will become knit, and the surrounding logic to upgrade repositories within metadirs, and tests for the same.  | 
3800  | 
self.source_repo = repo._format.open(self.repo_dir,  | 
3801  | 
_found=True,  | 
|
3802  | 
_override_transport=backup_transport)  | 
|
3803  | 
self.step('Creating new repository')  | 
|
3804  | 
converted = self.target_format.initialize(self.repo_dir,  | 
|
3805  | 
self.source_repo.is_shared())  | 
|
3806  | 
converted.lock_write()  | 
|
3807  | 
try:  | 
|
3808  | 
self.step('Copying content into repository.')  | 
|
3809  | 
self.source_repo.copy_content_into(converted)  | 
|
3810  | 
finally:  | 
|
3811  | 
converted.unlock()  | 
|
3812  | 
self.step('Deleting old repository content.')  | 
|
3813  | 
self.repo_dir.transport.delete_tree('repository.backup')  | 
|
3814  | 
self.pb.note('repository converted')  | 
|
3815  | 
||
3816  | 
def step(self, message):  | 
|
3817  | 
"""Update the pb by a step."""  | 
|
3818  | 
self.count +=1  | 
|
3819  | 
self.pb.update(message, self.count, self.total)  | 
|
| 
1596.1.1
by Martin Pool
 Use simple xml unescaping rather than importing xml.sax  | 
3820  | 
|
3821  | 
||
| 
1843.2.4
by Aaron Bentley
 Switch to John Meinel's _unescape_xml implementation  | 
3822  | 
_unescape_map = {  | 
3823  | 
'apos':"'",  | 
|
3824  | 
'quot':'"',  | 
|
3825  | 
'amp':'&',  | 
|
3826  | 
'lt':'<',  | 
|
3827  | 
'gt':'>'  | 
|
3828  | 
}
 | 
|
3829  | 
||
3830  | 
||
3831  | 
def _unescaper(match, _map=_unescape_map):  | 
|
| 
2294.1.2
by John Arbash Meinel
 Track down and add tests that all tree.commit() can handle  | 
3832  | 
code = match.group(1)  | 
3833  | 
try:  | 
|
3834  | 
return _map[code]  | 
|
3835  | 
except KeyError:  | 
|
3836  | 
if not code.startswith('#'):  | 
|
3837  | 
            raise
 | 
|
| 
2294.1.10
by John Arbash Meinel
 Switch all apis over to utf8 file ids. All tests pass  | 
3838  | 
return unichr(int(code[1:])).encode('utf8')  | 
| 
1843.2.4
by Aaron Bentley
 Switch to John Meinel's _unescape_xml implementation  | 
3839  | 
|
3840  | 
||
3841  | 
_unescape_re = None  | 
|
3842  | 
||
3843  | 
||
| 
1596.1.1
by Martin Pool
 Use simple xml unescaping rather than importing xml.sax  | 
3844  | 
def _unescape_xml(data):  | 
| 
1843.2.4
by Aaron Bentley
 Switch to John Meinel's _unescape_xml implementation  | 
3845  | 
"""Unescape predefined XML entities in a string of data."""  | 
3846  | 
global _unescape_re  | 
|
3847  | 
if _unescape_re is None:  | 
|
| 
2120.2.1
by John Arbash Meinel
 Remove tabs from source files, and add a test to keep it that way.  | 
3848  | 
_unescape_re = re.compile('\&([^;]*);')  | 
| 
1843.2.4
by Aaron Bentley
 Switch to John Meinel's _unescape_xml implementation  | 
3849  | 
return _unescape_re.sub(_unescaper, data)  | 
| 
2745.6.3
by Aaron Bentley
 Implement versionedfile checking for bzr check  | 
3850  | 
|
3851  | 
||
| 
3036.1.3
by Robert Collins
 Privatise VersionedFileChecker.  | 
3852  | 
class _VersionedFileChecker(object):  | 
| 
2745.6.47
by Andrew Bennetts
 Move check_parents out of VersionedFile.  | 
3853  | 
|
| 
4145.2.1
by Ian Clatworthy
 faster check  | 
3854  | 
def __init__(self, repository, text_key_references=None):  | 
| 
2745.6.47
by Andrew Bennetts
 Move check_parents out of VersionedFile.  | 
3855  | 
self.repository = repository  | 
| 
4145.2.1
by Ian Clatworthy
 faster check  | 
3856  | 
self.text_index = self.repository._generate_text_key_index(  | 
3857  | 
text_key_references=text_key_references)  | 
|
| 
3943.8.1
by Marius Kruger
 remove all trailing whitespace from bzr source  | 
3858  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3859  | 
def calculate_file_version_parents(self, text_key):  | 
| 
2927.2.10
by Andrew Bennetts
 More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft.  | 
3860  | 
"""Calculate the correct parents for a file version according to  | 
3861  | 
        the inventories.
 | 
|
3862  | 
        """
 | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3863  | 
parent_keys = self.text_index[text_key]  | 
| 
2988.1.8
by Robert Collins
 Change check and reconcile to use the new _generate_text_key_index rather  | 
3864  | 
if parent_keys == [_mod_revision.NULL_REVISION]:  | 
3865  | 
return ()  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3866  | 
return tuple(parent_keys)  | 
| 
2745.6.47
by Andrew Bennetts
 Move check_parents out of VersionedFile.  | 
3867  | 
|
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3868  | 
def check_file_version_parents(self, texts, progress_bar=None):  | 
| 
2927.2.10
by Andrew Bennetts
 More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft.  | 
3869  | 
"""Check the parents stored in a versioned file are correct.  | 
3870  | 
||
3871  | 
        It also detects file versions that are not referenced by their
 | 
|
3872  | 
        corresponding revision's inventory.
 | 
|
3873  | 
||
| 
2927.2.14
by Andrew Bennetts
 Tweaks suggested by review.  | 
3874  | 
        :returns: A tuple of (wrong_parents, dangling_file_versions).
 | 
| 
2927.2.10
by Andrew Bennetts
 More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft.  | 
3875  | 
            wrong_parents is a dict mapping {revision_id: (stored_parents,
 | 
3876  | 
            correct_parents)} for each revision_id where the stored parents
 | 
|
| 
2927.2.14
by Andrew Bennetts
 Tweaks suggested by review.  | 
3877  | 
            are not correct.  dangling_file_versions is a set of (file_id,
 | 
3878  | 
            revision_id) tuples for versions that are present in this versioned
 | 
|
3879  | 
            file, but not used by the corresponding inventory.
 | 
|
| 
2927.2.10
by Andrew Bennetts
 More docstrings, elaborate a comment with an XXX, and remove a little bit of cruft.  | 
3880  | 
        """
 | 
| 
2927.2.3
by Andrew Bennetts
 Add fulltexts to avoid bug 155730.  | 
3881  | 
wrong_parents = {}  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3882  | 
self.file_ids = set([file_id for file_id, _ in  | 
3883  | 
self.text_index.iterkeys()])  | 
|
3884  | 
        # text keys is now grouped by file_id
 | 
|
3885  | 
n_weaves = len(self.file_ids)  | 
|
3886  | 
files_in_revisions = {}  | 
|
3887  | 
revisions_of_files = {}  | 
|
3888  | 
n_versions = len(self.text_index)  | 
|
3889  | 
progress_bar.update('loading text store', 0, n_versions)  | 
|
3890  | 
parent_map = self.repository.texts.get_parent_map(self.text_index)  | 
|
3891  | 
        # On unlistable transports this could well be empty/error...
 | 
|
3892  | 
text_keys = self.repository.texts.keys()  | 
|
3893  | 
unused_keys = frozenset(text_keys) - set(self.text_index)  | 
|
3894  | 
for num, key in enumerate(self.text_index.iterkeys()):  | 
|
3895  | 
if progress_bar is not None:  | 
|
3896  | 
progress_bar.update('checking text graph', num, n_versions)  | 
|
3897  | 
correct_parents = self.calculate_file_version_parents(key)  | 
|
| 
2927.2.6
by Andrew Bennetts
 Make some more check tests pass.  | 
3898  | 
try:  | 
| 
3350.6.4
by Robert Collins
 First cut at pluralised VersionedFiles. Some rather massive API incompatabilities, primarily because of the difficulty of coherence among competing stores.  | 
3899  | 
knit_parents = parent_map[key]  | 
3900  | 
except errors.RevisionNotPresent:  | 
|
3901  | 
                # Missing text!
 | 
|
3902  | 
knit_parents = None  | 
|
3903  | 
if correct_parents != knit_parents:  | 
|
3904  | 
wrong_parents[key] = (knit_parents, correct_parents)  | 
|
3905  | 
return wrong_parents, unused_keys  | 
|
| 
3287.6.8
by Robert Collins
 Reduce code duplication as per review.  | 
3906  | 
|
3907  | 
||
3908  | 
def _old_get_graph(repository, revision_id):  | 
|
3909  | 
"""DO NOT USE. That is all. I'm serious."""  | 
|
3910  | 
graph = repository.get_graph()  | 
|
3911  | 
revision_graph = dict(((key, value) for key, value in  | 
|
3912  | 
graph.iter_ancestry([revision_id]) if value is not None))  | 
|
3913  | 
return _strip_NULL_ghosts(revision_graph)  | 
|
3914  | 
||
3915  | 
||
3916  | 
def _strip_NULL_ghosts(revision_graph):  | 
|
3917  | 
"""Also don't use this. more compatibility code for unmigrated clients."""  | 
|
3918  | 
    # Filter ghosts, and null:
 | 
|
3919  | 
if _mod_revision.NULL_REVISION in revision_graph:  | 
|
3920  | 
del revision_graph[_mod_revision.NULL_REVISION]  | 
|
3921  | 
for key, parents in revision_graph.items():  | 
|
3922  | 
revision_graph[key] = tuple(parent for parent in parents if parent  | 
|
3923  | 
in revision_graph)  | 
|
3924  | 
return revision_graph  | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3925  | 
|
3926  | 
||
3927  | 
class StreamSink(object):  | 
|
3928  | 
"""An object that can insert a stream into a repository.  | 
|
3929  | 
||
3930  | 
    This interface handles the complexity of reserialising inventories and
 | 
|
3931  | 
    revisions from different formats, and allows unidirectional insertion into
 | 
|
3932  | 
    stacked repositories without looking for the missing basis parents
 | 
|
3933  | 
    beforehand.
 | 
|
3934  | 
    """
 | 
|
3935  | 
||
3936  | 
def __init__(self, target_repo):  | 
|
3937  | 
self.target_repo = target_repo  | 
|
3938  | 
||
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3939  | 
def insert_stream(self, stream, src_format, resume_tokens):  | 
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3940  | 
"""Insert a stream's content into the target repository.  | 
3941  | 
||
3942  | 
        :param src_format: a bzr repository format.
 | 
|
3943  | 
||
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3944  | 
        :return: a list of resume tokens and an  iterable of keys additional
 | 
3945  | 
            items required before the insertion can be completed.
 | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3946  | 
        """
 | 
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3947  | 
self.target_repo.lock_write()  | 
3948  | 
try:  | 
|
3949  | 
if resume_tokens:  | 
|
3950  | 
self.target_repo.resume_write_group(resume_tokens)  | 
|
| 
4343.3.30
by John Arbash Meinel
 Add tests that when resuming a write group, we start checking if  | 
3951  | 
is_resume = True  | 
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3952  | 
else:  | 
3953  | 
self.target_repo.start_write_group()  | 
|
| 
4343.3.30
by John Arbash Meinel
 Add tests that when resuming a write group, we start checking if  | 
3954  | 
is_resume = False  | 
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3955  | 
try:  | 
3956  | 
                # locked_insert_stream performs a commit|suspend.
 | 
|
| 
4343.3.30
by John Arbash Meinel
 Add tests that when resuming a write group, we start checking if  | 
3957  | 
return self._locked_insert_stream(stream, src_format, is_resume)  | 
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
3958  | 
except:  | 
3959  | 
self.target_repo.abort_write_group(suppress_errors=True)  | 
|
3960  | 
                raise
 | 
|
3961  | 
finally:  | 
|
3962  | 
self.target_repo.unlock()  | 
|
3963  | 
||
| 
4343.3.30
by John Arbash Meinel
 Add tests that when resuming a write group, we start checking if  | 
3964  | 
def _locked_insert_stream(self, stream, src_format, is_resume):  | 
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3965  | 
to_serializer = self.target_repo._format._serializer  | 
3966  | 
src_serializer = src_format._serializer  | 
|
| 
4309.1.7
by Andrew Bennetts
 Fix bug found by acceptance test: we need to flush writes (if we are buffering them) before trying to determine the missing_keys in _locked_insert_stream.  | 
3967  | 
new_pack = None  | 
| 
4187.3.2
by Andrew Bennetts
 Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors.  | 
3968  | 
if to_serializer == src_serializer:  | 
3969  | 
            # If serializers match and the target is a pack repository, set the
 | 
|
3970  | 
            # write cache size on the new pack.  This avoids poor performance
 | 
|
3971  | 
            # on transports where append is unbuffered (such as
 | 
|
| 
4187.3.4
by Andrew Bennetts
 Better docstrings and comments.  | 
3972  | 
            # RemoteTransport).  This is safe to do because nothing should read
 | 
| 
4187.3.2
by Andrew Bennetts
 Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors.  | 
3973  | 
            # back from the target repository while a stream with matching
 | 
3974  | 
            # serialization is being inserted.
 | 
|
| 
4187.3.4
by Andrew Bennetts
 Better docstrings and comments.  | 
3975  | 
            # The exception is that a delta record from the source that should
 | 
3976  | 
            # be a fulltext may need to be expanded by the target (see
 | 
|
3977  | 
            # test_fetch_revisions_with_deltas_into_pack); but we take care to
 | 
|
3978  | 
            # explicitly flush any buffered writes first in that rare case.
 | 
|
| 
4187.3.2
by Andrew Bennetts
 Only enable the hack when the serializers match, otherwise we cause ShortReadvErrors.  | 
3979  | 
try:  | 
3980  | 
new_pack = self.target_repo._pack_collection._new_pack  | 
|
3981  | 
except AttributeError:  | 
|
3982  | 
                # Not a pack repository
 | 
|
3983  | 
                pass
 | 
|
3984  | 
else:  | 
|
3985  | 
new_pack.set_write_cache_size(1024*1024)  | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3986  | 
for substream_type, substream in stream:  | 
3987  | 
if substream_type == 'texts':  | 
|
3988  | 
self.target_repo.texts.insert_record_stream(substream)  | 
|
3989  | 
elif substream_type == 'inventories':  | 
|
3990  | 
if src_serializer == to_serializer:  | 
|
3991  | 
self.target_repo.inventories.insert_record_stream(  | 
|
| 
4257.4.4
by Andrew Bennetts
 Remove some cruft.  | 
3992  | 
substream)  | 
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
3993  | 
else:  | 
3994  | 
self._extract_and_insert_inventories(  | 
|
3995  | 
substream, src_serializer)  | 
|
| 
3735.2.98
by John Arbash Meinel
 Merge bzr.dev 4032. Resolve the new streaming fetch.  | 
3996  | 
elif substream_type == 'chk_bytes':  | 
3997  | 
                # XXX: This doesn't support conversions, as it assumes the
 | 
|
3998  | 
                #      conversion was done in the fetch code.
 | 
|
3999  | 
self.target_repo.chk_bytes.insert_record_stream(substream)  | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
4000  | 
elif substream_type == 'revisions':  | 
4001  | 
                # This may fallback to extract-and-insert more often than
 | 
|
4002  | 
                # required if the serializers are different only in terms of
 | 
|
4003  | 
                # the inventory.
 | 
|
4004  | 
if src_serializer == to_serializer:  | 
|
4005  | 
self.target_repo.revisions.insert_record_stream(  | 
|
4006  | 
substream)  | 
|
4007  | 
else:  | 
|
4008  | 
self._extract_and_insert_revisions(substream,  | 
|
4009  | 
src_serializer)  | 
|
4010  | 
elif substream_type == 'signatures':  | 
|
4011  | 
self.target_repo.signatures.insert_record_stream(substream)  | 
|
4012  | 
else:  | 
|
4013  | 
raise AssertionError('kaboom! %s' % (substream_type,))  | 
|
| 
4309.1.7
by Andrew Bennetts
 Fix bug found by acceptance test: we need to flush writes (if we are buffering them) before trying to determine the missing_keys in _locked_insert_stream.  | 
4014  | 
        # Done inserting data, and the missing_keys calculations will try to
 | 
4015  | 
        # read back from the inserted data, so flush the writes to the new pack
 | 
|
4016  | 
        # (if this is pack format).
 | 
|
4017  | 
if new_pack is not None:  | 
|
4018  | 
new_pack._write_data('', flush=True)  | 
|
| 
4257.4.3
by Andrew Bennetts
 SinkStream.insert_stream checks for missing parent inventories, and reports them as missing_keys.  | 
4019  | 
        # Find all the new revisions (including ones from resume_tokens)
 | 
| 
4343.3.30
by John Arbash Meinel
 Add tests that when resuming a write group, we start checking if  | 
4020  | 
missing_keys = self.target_repo.get_missing_parent_inventories(  | 
4021  | 
check_for_missing_texts=is_resume)  | 
|
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
4022  | 
try:  | 
4023  | 
for prefix, versioned_file in (  | 
|
4024  | 
('texts', self.target_repo.texts),  | 
|
4025  | 
('inventories', self.target_repo.inventories),  | 
|
4026  | 
('revisions', self.target_repo.revisions),  | 
|
4027  | 
('signatures', self.target_repo.signatures),  | 
|
| 
4343.3.3
by John Arbash Meinel
 Be sure to check for missing compression parents in chk_bytes  | 
4028  | 
('chk_bytes', self.target_repo.chk_bytes),  | 
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
4029  | 
                ):
 | 
| 
4343.3.3
by John Arbash Meinel
 Be sure to check for missing compression parents in chk_bytes  | 
4030  | 
if versioned_file is None:  | 
4031  | 
                    continue
 | 
|
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
4032  | 
missing_keys.update((prefix,) + key for key in  | 
4033  | 
versioned_file.get_missing_compression_parent_keys())  | 
|
4034  | 
except NotImplementedError:  | 
|
4035  | 
            # cannot even attempt suspending, and missing would have failed
 | 
|
4036  | 
            # during stream insertion.
 | 
|
4037  | 
missing_keys = set()  | 
|
4038  | 
else:  | 
|
4039  | 
if missing_keys:  | 
|
4040  | 
                # suspend the write group and tell the caller what we is
 | 
|
4041  | 
                # missing. We know we can suspend or else we would not have
 | 
|
4042  | 
                # entered this code path. (All repositories that can handle
 | 
|
4043  | 
                # missing keys can handle suspending a write group).
 | 
|
4044  | 
write_group_tokens = self.target_repo.suspend_write_group()  | 
|
4045  | 
return write_group_tokens, missing_keys  | 
|
| 
4431.3.7
by Jonathan Lange
 Cherrypick bzr.dev 4470, resolving conflicts.  | 
4046  | 
hint = self.target_repo.commit_write_group()  | 
4047  | 
if (to_serializer != src_serializer and  | 
|
4048  | 
self.target_repo._format.pack_compresses):  | 
|
4049  | 
self.target_repo.pack(hint=hint)  | 
|
| 
4032.3.7
by Robert Collins
 Move write locking and write group responsibilities into the Sink objects themselves, allowing complete avoidance of unnecessary calls when the sink is a RemoteSink.  | 
4050  | 
return [], set()  | 
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
4051  | 
|
4052  | 
def _extract_and_insert_inventories(self, substream, serializer):  | 
|
4053  | 
"""Generate a new inventory versionedfile in target, converting data.  | 
|
| 
4032.1.1
by John Arbash Meinel
 Merge the removal of all trailing whitespace, and resolve conflicts.  | 
4054  | 
|
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
4055  | 
        The inventory is retrieved from the source, (deserializing it), and
 | 
4056  | 
        stored in the target (reserializing it in a different format).
 | 
|
4057  | 
        """
 | 
|
4058  | 
for record in substream:  | 
|
4059  | 
bytes = record.get_bytes_as('fulltext')  | 
|
4060  | 
revision_id = record.key[0]  | 
|
4061  | 
inv = serializer.read_inventory_from_string(bytes, revision_id)  | 
|
4062  | 
parents = [key[0] for key in record.parents]  | 
|
4063  | 
self.target_repo.add_inventory(revision_id, inv, parents)  | 
|
4064  | 
||
4065  | 
def _extract_and_insert_revisions(self, substream, serializer):  | 
|
4066  | 
for record in substream:  | 
|
4067  | 
bytes = record.get_bytes_as('fulltext')  | 
|
4068  | 
revision_id = record.key[0]  | 
|
4069  | 
rev = serializer.read_revision_from_string(bytes)  | 
|
4070  | 
if rev.revision_id != revision_id:  | 
|
4071  | 
raise AssertionError('wtf: %s != %s' % (rev, revision_id))  | 
|
4072  | 
self.target_repo.add_revision(revision_id, rev)  | 
|
4073  | 
||
4074  | 
def finished(self):  | 
|
| 
4053.1.4
by Robert Collins
 Move the fetch control attributes from Repository to RepositoryFormat.  | 
4075  | 
if self.target_repo._format._fetch_reconcile:  | 
| 
4022.1.1
by Robert Collins
 Refactoring of fetch to have a sender and sink component enabling splitting the logic over a network stream. (Robert Collins, Andrew Bennetts)  | 
4076  | 
self.target_repo.reconcile()  | 
4077  | 
||
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4078  | 
|
4079  | 
class StreamSource(object):  | 
|
| 
4065.1.2
by Robert Collins
 Merge bzr.dev [fix conflicts with fetch refactoring].  | 
4080  | 
"""A source of a stream for fetching between repositories."""  | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4081  | 
|
4082  | 
def __init__(self, from_repository, to_format):  | 
|
4083  | 
"""Create a StreamSource streaming from from_repository."""  | 
|
4084  | 
self.from_repository = from_repository  | 
|
4085  | 
self.to_format = to_format  | 
|
4086  | 
||
4087  | 
def delta_on_metadata(self):  | 
|
4088  | 
"""Return True if delta's are permitted on metadata streams.  | 
|
4089  | 
||
4090  | 
        That is on revisions and signatures.
 | 
|
4091  | 
        """
 | 
|
4092  | 
src_serializer = self.from_repository._format._serializer  | 
|
4093  | 
target_serializer = self.to_format._serializer  | 
|
4094  | 
return (self.to_format._fetch_uses_deltas and  | 
|
4095  | 
src_serializer == target_serializer)  | 
|
4096  | 
||
4097  | 
def _fetch_revision_texts(self, revs):  | 
|
4098  | 
        # fetch signatures first and then the revision texts
 | 
|
4099  | 
        # may need to be a InterRevisionStore call here.
 | 
|
4100  | 
from_sf = self.from_repository.signatures  | 
|
4101  | 
        # A missing signature is just skipped.
 | 
|
4102  | 
keys = [(rev_id,) for rev_id in revs]  | 
|
| 
4060.1.4
by Robert Collins
 Streaming fetch from remote servers.  | 
4103  | 
signatures = versionedfile.filter_absent(from_sf.get_record_stream(  | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4104  | 
keys,  | 
4105  | 
self.to_format._fetch_order,  | 
|
4106  | 
not self.to_format._fetch_uses_deltas))  | 
|
4107  | 
        # If a revision has a delta, this is actually expanded inside the
 | 
|
4108  | 
        # insert_record_stream code now, which is an alternate fix for
 | 
|
4109  | 
        # bug #261339
 | 
|
4110  | 
from_rf = self.from_repository.revisions  | 
|
4111  | 
revisions = from_rf.get_record_stream(  | 
|
4112  | 
keys,  | 
|
4113  | 
self.to_format._fetch_order,  | 
|
4114  | 
not self.delta_on_metadata())  | 
|
4115  | 
return [('signatures', signatures), ('revisions', revisions)]  | 
|
4116  | 
||
4117  | 
def _generate_root_texts(self, revs):  | 
|
4118  | 
"""This will be called by __fetch between fetching weave texts and  | 
|
4119  | 
        fetching the inventory weave.
 | 
|
4120  | 
||
4121  | 
        Subclasses should override this if they need to generate root texts
 | 
|
4122  | 
        after fetching weave texts.
 | 
|
4123  | 
        """
 | 
|
4124  | 
if self._rich_root_upgrade():  | 
|
4125  | 
import bzrlib.fetch  | 
|
4126  | 
return bzrlib.fetch.Inter1and2Helper(  | 
|
4127  | 
self.from_repository).generate_root_texts(revs)  | 
|
4128  | 
else:  | 
|
4129  | 
return []  | 
|
4130  | 
||
4131  | 
def get_stream(self, search):  | 
|
4132  | 
phase = 'file'  | 
|
4133  | 
revs = search.get_keys()  | 
|
4134  | 
graph = self.from_repository.get_graph()  | 
|
4135  | 
revs = list(graph.iter_topo_order(revs))  | 
|
4136  | 
data_to_fetch = self.from_repository.item_keys_introduced_by(revs)  | 
|
4137  | 
text_keys = []  | 
|
4138  | 
for knit_kind, file_id, revisions in data_to_fetch:  | 
|
4139  | 
if knit_kind != phase:  | 
|
4140  | 
phase = knit_kind  | 
|
4141  | 
                # Make a new progress bar for this phase
 | 
|
4142  | 
if knit_kind == "file":  | 
|
4143  | 
                # Accumulate file texts
 | 
|
4144  | 
text_keys.extend([(file_id, revision) for revision in  | 
|
4145  | 
revisions])  | 
|
4146  | 
elif knit_kind == "inventory":  | 
|
4147  | 
                # Now copy the file texts.
 | 
|
4148  | 
from_texts = self.from_repository.texts  | 
|
4149  | 
yield ('texts', from_texts.get_record_stream(  | 
|
4150  | 
text_keys, self.to_format._fetch_order,  | 
|
4151  | 
not self.to_format._fetch_uses_deltas))  | 
|
4152  | 
                # Cause an error if a text occurs after we have done the
 | 
|
4153  | 
                # copy.
 | 
|
4154  | 
text_keys = None  | 
|
4155  | 
                # Before we process the inventory we generate the root
 | 
|
4156  | 
                # texts (if necessary) so that the inventories references
 | 
|
4157  | 
                # will be valid.
 | 
|
4158  | 
for _ in self._generate_root_texts(revs):  | 
|
4159  | 
yield _  | 
|
4160  | 
                # NB: This currently reopens the inventory weave in source;
 | 
|
4161  | 
                # using a single stream interface instead would avoid this.
 | 
|
4162  | 
from_weave = self.from_repository.inventories  | 
|
4163  | 
                # we fetch only the referenced inventories because we do not
 | 
|
4164  | 
                # know for unselected inventories whether all their required
 | 
|
4165  | 
                # texts are present in the other repository - it could be
 | 
|
4166  | 
                # corrupt.
 | 
|
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
4167  | 
for info in self._get_inventory_stream(revs):  | 
4168  | 
yield info  | 
|
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4169  | 
elif knit_kind == "signatures":  | 
4170  | 
                # Nothing to do here; this will be taken care of when
 | 
|
4171  | 
                # _fetch_revision_texts happens.
 | 
|
4172  | 
                pass
 | 
|
4173  | 
elif knit_kind == "revisions":  | 
|
4174  | 
for record in self._fetch_revision_texts(revs):  | 
|
4175  | 
yield record  | 
|
4176  | 
else:  | 
|
4177  | 
raise AssertionError("Unknown knit kind %r" % knit_kind)  | 
|
4178  | 
||
4179  | 
def get_stream_for_missing_keys(self, missing_keys):  | 
|
4180  | 
        # missing keys can only occur when we are byte copying and not
 | 
|
4181  | 
        # translating (because translation means we don't send
 | 
|
4182  | 
        # unreconstructable deltas ever).
 | 
|
4183  | 
keys = {}  | 
|
4184  | 
keys['texts'] = set()  | 
|
4185  | 
keys['revisions'] = set()  | 
|
4186  | 
keys['inventories'] = set()  | 
|
| 
4343.3.4
by John Arbash Meinel
 Custom implementation for GroupCHKStreamSource.get_stream_for_missing_keys.  | 
4187  | 
keys['chk_bytes'] = set()  | 
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4188  | 
keys['signatures'] = set()  | 
4189  | 
for key in missing_keys:  | 
|
4190  | 
keys[key[0]].add(key[1:])  | 
|
4191  | 
if len(keys['revisions']):  | 
|
4192  | 
            # If we allowed copying revisions at this point, we could end up
 | 
|
4193  | 
            # copying a revision without copying its required texts: a
 | 
|
4194  | 
            # violation of the requirements for repository integrity.
 | 
|
4195  | 
raise AssertionError(  | 
|
4196  | 
'cannot copy revisions to fill in missing deltas %s' % (  | 
|
4197  | 
keys['revisions'],))  | 
|
4198  | 
for substream_kind, keys in keys.iteritems():  | 
|
4199  | 
vf = getattr(self.from_repository, substream_kind)  | 
|
| 
4343.3.10
by John Arbash Meinel
 Add a per_repository_reference test with real stacked repos.  | 
4200  | 
if vf is None and keys:  | 
4201  | 
raise AssertionError(  | 
|
4202  | 
                        "cannot fill in keys for a versioned file we don't"
 | 
|
4203  | 
" have: %s needs %s" % (substream_kind, keys))  | 
|
4204  | 
if not keys:  | 
|
4205  | 
                # No need to stream something we don't have
 | 
|
4206  | 
                continue
 | 
|
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4207  | 
            # Ask for full texts always so that we don't need more round trips
 | 
4208  | 
            # after this stream.
 | 
|
| 
4392.2.2
by John Arbash Meinel
 Add tests that ensure we can fetch branches with ghosts in their ancestry.  | 
4209  | 
            # Some of the missing keys are genuinely ghosts, so filter absent
 | 
4210  | 
            # records. The Sink is responsible for doing another check to
 | 
|
4211  | 
            # ensure that ghosts don't introduce missing data for future
 | 
|
4212  | 
            # fetches.
 | 
|
| 
4392.2.1
by John Arbash Meinel
 quick fix for ghosts and missing keys  | 
4213  | 
stream = versionedfile.filter_absent(vf.get_record_stream(keys,  | 
4214  | 
self.to_format._fetch_order, True))  | 
|
| 
4060.1.3
by Robert Collins
 Implement the separate source component for fetch - repository.StreamSource.  | 
4215  | 
yield substream_kind, stream  | 
4216  | 
||
4217  | 
def inventory_fetch_order(self):  | 
|
4218  | 
if self._rich_root_upgrade():  | 
|
4219  | 
return 'topological'  | 
|
4220  | 
else:  | 
|
4221  | 
return self.to_format._fetch_order  | 
|
4222  | 
||
4223  | 
def _rich_root_upgrade(self):  | 
|
4224  | 
return (not self.from_repository._format.rich_root_data and  | 
|
4225  | 
self.to_format.rich_root_data)  | 
|
4226  | 
||
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
4227  | 
def _get_inventory_stream(self, revision_ids):  | 
4228  | 
from_format = self.from_repository._format  | 
|
4229  | 
if (from_format.supports_chks and self.to_format.supports_chks  | 
|
4230  | 
and (from_format._serializer == self.to_format._serializer)):  | 
|
4231  | 
            # Both sides support chks, and they use the same serializer, so it
 | 
|
4232  | 
            # is safe to transmit the chk pages and inventory pages across
 | 
|
4233  | 
            # as-is.
 | 
|
4234  | 
return self._get_chk_inventory_stream(revision_ids)  | 
|
4235  | 
elif (not from_format.supports_chks):  | 
|
4236  | 
            # Source repository doesn't support chks. So we can transmit the
 | 
|
4237  | 
            # inventories 'as-is' and either they are just accepted on the
 | 
|
4238  | 
            # target, or the Sink will properly convert it.
 | 
|
4239  | 
return self._get_simple_inventory_stream(revision_ids)  | 
|
4240  | 
else:  | 
|
4241  | 
            # XXX: Hack to make not-chk->chk fetch: copy the inventories as
 | 
|
4242  | 
            #      inventories. Note that this should probably be done somehow
 | 
|
4243  | 
            #      as part of bzrlib.repository.StreamSink. Except JAM couldn't
 | 
|
4244  | 
            #      figure out how a non-chk repository could possibly handle
 | 
|
4245  | 
            #      deserializing an inventory stream from a chk repo, as it
 | 
|
4246  | 
            #      doesn't have a way to understand individual pages.
 | 
|
4247  | 
return self._get_convertable_inventory_stream(revision_ids)  | 
|
4248  | 
||
4249  | 
def _get_simple_inventory_stream(self, revision_ids):  | 
|
4250  | 
from_weave = self.from_repository.inventories  | 
|
4251  | 
yield ('inventories', from_weave.get_record_stream(  | 
|
4252  | 
[(rev_id,) for rev_id in revision_ids],  | 
|
4253  | 
self.inventory_fetch_order(),  | 
|
4254  | 
not self.delta_on_metadata()))  | 
|
4255  | 
||
4256  | 
def _get_chk_inventory_stream(self, revision_ids):  | 
|
4257  | 
"""Fetch the inventory texts, along with the associated chk maps."""  | 
|
4258  | 
        # We want an inventory outside of the search set, so that we can filter
 | 
|
4259  | 
        # out uninteresting chk pages. For now we use
 | 
|
4260  | 
        # _find_revision_outside_set, but if we had a Search with cut_revs, we
 | 
|
4261  | 
        # could use that instead.
 | 
|
4262  | 
start_rev_id = self.from_repository._find_revision_outside_set(  | 
|
4263  | 
revision_ids)  | 
|
4264  | 
start_rev_key = (start_rev_id,)  | 
|
4265  | 
inv_keys_to_fetch = [(rev_id,) for rev_id in revision_ids]  | 
|
4266  | 
if start_rev_id != _mod_revision.NULL_REVISION:  | 
|
4267  | 
inv_keys_to_fetch.append((start_rev_id,))  | 
|
4268  | 
        # Any repo that supports chk_bytes must also support out-of-order
 | 
|
4269  | 
        # insertion. At least, that is how we expect it to work
 | 
|
4270  | 
        # We use get_record_stream instead of iter_inventories because we want
 | 
|
4271  | 
        # to be able to insert the stream as well. We could instead fetch
 | 
|
4272  | 
        # allowing deltas, and then iter_inventories, but we don't know whether
 | 
|
4273  | 
        # source or target is more 'local' anway.
 | 
|
4274  | 
inv_stream = self.from_repository.inventories.get_record_stream(  | 
|
4275  | 
inv_keys_to_fetch, 'unordered',  | 
|
4276  | 
True) # We need them as full-texts so we can find their references  | 
|
4277  | 
uninteresting_chk_roots = set()  | 
|
4278  | 
interesting_chk_roots = set()  | 
|
4279  | 
def filter_inv_stream(inv_stream):  | 
|
4280  | 
for idx, record in enumerate(inv_stream):  | 
|
4281  | 
                ### child_pb.update('fetch inv', idx, len(inv_keys_to_fetch))
 | 
|
4282  | 
bytes = record.get_bytes_as('fulltext')  | 
|
4283  | 
chk_inv = inventory.CHKInventory.deserialise(  | 
|
4284  | 
self.from_repository.chk_bytes, bytes, record.key)  | 
|
4285  | 
if record.key == start_rev_key:  | 
|
4286  | 
uninteresting_chk_roots.add(chk_inv.id_to_entry.key())  | 
|
4287  | 
p_id_map = chk_inv.parent_id_basename_to_file_id  | 
|
4288  | 
if p_id_map is not None:  | 
|
4289  | 
uninteresting_chk_roots.add(p_id_map.key())  | 
|
4290  | 
else:  | 
|
4291  | 
yield record  | 
|
4292  | 
interesting_chk_roots.add(chk_inv.id_to_entry.key())  | 
|
4293  | 
p_id_map = chk_inv.parent_id_basename_to_file_id  | 
|
4294  | 
if p_id_map is not None:  | 
|
4295  | 
interesting_chk_roots.add(p_id_map.key())  | 
|
4296  | 
        ### pb.update('fetch inventory', 0, 2)
 | 
|
4297  | 
yield ('inventories', filter_inv_stream(inv_stream))  | 
|
4298  | 
        # Now that we have worked out all of the interesting root nodes, grab
 | 
|
4299  | 
        # all of the interesting pages and insert them
 | 
|
4300  | 
        ### pb.update('fetch inventory', 1, 2)
 | 
|
4301  | 
interesting = chk_map.iter_interesting_nodes(  | 
|
4302  | 
self.from_repository.chk_bytes, interesting_chk_roots,  | 
|
4303  | 
uninteresting_chk_roots)  | 
|
4304  | 
def to_stream_adapter():  | 
|
4305  | 
"""Adapt the iter_interesting_nodes result to a single stream.  | 
|
4306  | 
||
| 
3735.37.4
by John Arbash Meinel
 Change the iter_interesting_nodes api, it can now explicitly only return  | 
4307  | 
            iter_interesting_nodes returns records as it processes them, along
 | 
4308  | 
            with keys. However, we only want to return the records themselves.
 | 
|
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
4309  | 
            """
 | 
4310  | 
for record, items in interesting:  | 
|
| 
3735.37.4
by John Arbash Meinel
 Change the iter_interesting_nodes api, it can now explicitly only return  | 
4311  | 
if record is not None:  | 
4312  | 
yield record  | 
|
| 
3735.2.128
by Andrew Bennetts
 Merge bzr.dev, resolving fetch.py conflict.  | 
4313  | 
        # XXX: We could instead call get_record_stream(records.keys())
 | 
4314  | 
        #      ATM, this will always insert the records as fulltexts, and
 | 
|
4315  | 
        #      requires that you can hang on to records once you have gone
 | 
|
4316  | 
        #      on to the next one. Further, it causes the target to
 | 
|
4317  | 
        #      recompress the data. Testing shows it to be faster than
 | 
|
4318  | 
        #      requesting the records again, though.
 | 
|
4319  | 
yield ('chk_bytes', to_stream_adapter())  | 
|
4320  | 
        ### pb.update('fetch inventory', 2, 2)
 | 
|
4321  | 
||
4322  | 
def _get_convertable_inventory_stream(self, revision_ids):  | 
|
4323  | 
        # XXX: One of source or target is using chks, and they don't have
 | 
|
4324  | 
        #      compatible serializations. The StreamSink code expects to be
 | 
|
4325  | 
        #      able to convert on the target, so we need to put
 | 
|
4326  | 
        #      bytes-on-the-wire that can be converted
 | 
|
4327  | 
yield ('inventories', self._stream_invs_as_fulltexts(revision_ids))  | 
|
4328  | 
||
4329  | 
def _stream_invs_as_fulltexts(self, revision_ids):  | 
|
4330  | 
from_repo = self.from_repository  | 
|
4331  | 
from_serializer = from_repo._format._serializer  | 
|
4332  | 
revision_keys = [(rev_id,) for rev_id in revision_ids]  | 
|
4333  | 
parent_map = from_repo.inventories.get_parent_map(revision_keys)  | 
|
4334  | 
for inv in self.from_repository.iter_inventories(revision_ids):  | 
|
4335  | 
            # XXX: This is a bit hackish, but it works. Basically,
 | 
|
4336  | 
            #      CHKSerializer 'accidentally' supports
 | 
|
4337  | 
            #      read/write_inventory_to_string, even though that is never
 | 
|
4338  | 
            #      the format that is stored on disk. It *does* give us a
 | 
|
4339  | 
            #      single string representation for an inventory, so live with
 | 
|
4340  | 
            #      it for now.
 | 
|
4341  | 
            #      This would be far better if we had a 'serialized inventory
 | 
|
4342  | 
            #      delta' form. Then we could use 'inventory._make_delta', and
 | 
|
4343  | 
            #      transmit that. This would both be faster to generate, and
 | 
|
4344  | 
            #      result in fewer bytes-on-the-wire.
 | 
|
4345  | 
as_bytes = from_serializer.write_inventory_to_string(inv)  | 
|
4346  | 
key = (inv.revision_id,)  | 
|
4347  | 
parent_keys = parent_map.get(key, ())  | 
|
4348  | 
yield versionedfile.FulltextContentFactory(  | 
|
4349  | 
key, parent_keys, None, as_bytes)  | 
|
4350  | 
||
| 
4419.2.3
by Andrew Bennetts
 Refactor _extend_partial_history into a standalone function that can be used without a branch.  | 
4351  | 
|
4352  | 
def _iter_for_revno(repo, partial_history_cache, stop_index=None,  | 
|
4353  | 
stop_revision=None):  | 
|
4354  | 
"""Extend the partial history to include a given index  | 
|
4355  | 
||
4356  | 
    If a stop_index is supplied, stop when that index has been reached.
 | 
|
4357  | 
    If a stop_revision is supplied, stop when that revision is
 | 
|
4358  | 
    encountered.  Otherwise, stop when the beginning of history is
 | 
|
4359  | 
    reached.
 | 
|
4360  | 
||
4361  | 
    :param stop_index: The index which should be present.  When it is
 | 
|
4362  | 
        present, history extension will stop.
 | 
|
4363  | 
    :param stop_revision: The revision id which should be present.  When
 | 
|
4364  | 
        it is encountered, history extension will stop.
 | 
|
4365  | 
    """
 | 
|
4366  | 
start_revision = partial_history_cache[-1]  | 
|
4367  | 
iterator = repo.iter_reverse_revision_history(start_revision)  | 
|
4368  | 
try:  | 
|
4369  | 
        #skip the last revision in the list
 | 
|
| 
4419.2.14
by Andrew Bennetts
 Fix bug when partial_history == [stop_revision]  | 
4370  | 
iterator.next()  | 
| 
4419.2.3
by Andrew Bennetts
 Refactor _extend_partial_history into a standalone function that can be used without a branch.  | 
4371  | 
while True:  | 
4372  | 
if (stop_index is not None and  | 
|
4373  | 
len(partial_history_cache) > stop_index):  | 
|
4374  | 
                break
 | 
|
| 
4419.2.14
by Andrew Bennetts
 Fix bug when partial_history == [stop_revision]  | 
4375  | 
if partial_history_cache[-1] == stop_revision:  | 
4376  | 
                break
 | 
|
| 
4419.2.3
by Andrew Bennetts
 Refactor _extend_partial_history into a standalone function that can be used without a branch.  | 
4377  | 
revision_id = iterator.next()  | 
4378  | 
partial_history_cache.append(revision_id)  | 
|
4379  | 
except StopIteration:  | 
|
4380  | 
        # No more history
 | 
|
4381  | 
        return
 | 
|
4382  |