/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1
# Copyright (C) 2005-2011 Canonical Ltd
2
#
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
7
#
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11
# GNU General Public License for more details.
12
#
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16
6379.6.7 by Jelmer Vernooij
Move importing from future until after doc string, otherwise the doc string will disappear.
17
"""Repository formats built around versioned files."""
18
6379.6.3 by Jelmer Vernooij
Use absolute_import.
19
from __future__ import absolute_import
20
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
21
22
from bzrlib.lazy_import import lazy_import
23
lazy_import(globals(), """
5852.1.8 by Jelmer Vernooij
Simplify revision limiting.
24
import itertools
25
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
26
from bzrlib import (
5850.1.3 by Jelmer Vernooij
Add VersionedFileCheck.
27
    check,
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
28
    config as _mod_config,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
29
    debug,
30
    fetch as _mod_fetch,
31
    fifo_cache,
32
    gpg,
33
    graph,
34
    inventory_delta,
35
    lru_cache,
36
    osutils,
37
    revision as _mod_revision,
38
    serializer as _mod_serializer,
39
    static_tuple,
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
40
    symbol_versioning,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
41
    tsort,
42
    ui,
43
    versionedfile,
6341.1.4 by Jelmer Vernooij
Move more functionality to vf_search.
44
    vf_search,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
45
    )
46
47
from bzrlib.recordcounter import RecordCounter
48
from bzrlib.revisiontree import InventoryRevisionTree
49
from bzrlib.testament import Testament
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
50
from bzrlib.i18n import gettext
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
51
""")
52
53
from bzrlib import (
54
    errors,
55
    )
56
from bzrlib.decorators import (
57
    needs_read_lock,
58
    needs_write_lock,
59
    only_raises,
60
    )
61
from bzrlib.inventory import (
62
    Inventory,
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
63
    InventoryDirectory,
64
    ROOT_ID,
65
    entry_factory,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
66
    )
67
68
from bzrlib.repository import (
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
69
    CommitBuilder,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
70
    InterRepository,
71
    MetaDirRepository,
6349.2.1 by Jelmer Vernooij
Add BzrDirMetaComponentFormat.
72
    RepositoryFormatMetaDir,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
73
    Repository,
5815.4.5 by Jelmer Vernooij
Use MetaDirVersionedFileRepositoryFormat (a Soyuz worthy name).
74
    RepositoryFormat,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
75
    )
76
77
from bzrlib.trace import (
6015.23.15 by John Arbash Meinel
Clean out more of the cruft that got left by accident.
78
    mutter
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
79
    )
80
81
5815.4.5 by Jelmer Vernooij
Use MetaDirVersionedFileRepositoryFormat (a Soyuz worthy name).
82
class VersionedFileRepositoryFormat(RepositoryFormat):
5815.4.16 by Jelmer Vernooij
Move supports_versioned_files setting to VersionedFileRepositoryFormat.
83
    """Base class for all repository formats that are VersionedFiles-based."""
84
85
    supports_full_versioned_files = True
5993.3.2 by Jelmer Vernooij
Add Repository.supports_versioned_directories.
86
    supports_versioned_directories = True
6217.4.2 by Jelmer Vernooij
s/invisible/unreferenced.
87
    supports_unreferenced_revisions = True
5815.4.5 by Jelmer Vernooij
Use MetaDirVersionedFileRepositoryFormat (a Soyuz worthy name).
88
89
    # Should commit add an inventory, or an inventory delta to the repository.
90
    _commit_inv_deltas = True
91
    # What order should fetch operations request streams in?
92
    # The default is unordered as that is the cheapest for an origin to
93
    # provide.
94
    _fetch_order = 'unordered'
95
    # Does this repository format use deltas that can be fetched as-deltas ?
96
    # (E.g. knits, where the knit deltas can be transplanted intact.
97
    # We default to False, which will ensure that enough data to get
98
    # a full text out of any fetch stream will be grabbed.
99
    _fetch_uses_deltas = False
100
101
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
102
class VersionedFileCommitBuilder(CommitBuilder):
103
    """Commit builder implementation for versioned files based repositories.
104
    """
105
106
    # this commit builder supports the record_entry_contents interface
107
    supports_record_entry_contents = True
108
109
    # the default CommitBuilder does not manage trees whose root is versioned.
110
    _versioned_root = False
111
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
112
    def __init__(self, repository, parents, config_stack, timestamp=None,
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
113
                 timezone=None, committer=None, revprops=None,
114
                 revision_id=None, lossy=False):
115
        super(VersionedFileCommitBuilder, self).__init__(repository,
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
116
            parents, config_stack, timestamp, timezone, committer, revprops,
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
117
            revision_id, lossy)
5847.3.2 by Jelmer Vernooij
Move root checking to VersionedFileRepository.
118
        try:
119
            basis_id = self.parents[0]
120
        except IndexError:
121
            basis_id = _mod_revision.NULL_REVISION
122
        self.basis_delta_revision = basis_id
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
123
        self.new_inventory = Inventory(None)
124
        self._basis_delta = []
125
        self.__heads = graph.HeadsCache(repository.get_graph()).heads
126
        # memo'd check for no-op commits.
127
        self._any_changes = False
128
        # API compatibility, older code that used CommitBuilder did not call
129
        # .record_delete(), which means the delta that is computed would not be
130
        # valid. Callers that will call record_delete() should call
131
        # .will_record_deletes() to indicate that.
132
        self._recording_deletes = False
133
134
    def will_record_deletes(self):
135
        """Tell the commit builder that deletes are being notified.
136
137
        This enables the accumulation of an inventory delta; for the resulting
138
        commit to be valid, deletes against the basis MUST be recorded via
139
        builder.record_delete().
140
        """
141
        self._recording_deletes = True
142
143
    def any_changes(self):
144
        """Return True if any entries were changed.
145
146
        This includes merge-only changes. It is the core for the --unchanged
147
        detection in commit.
148
149
        :return: True if any changes have occured.
150
        """
151
        return self._any_changes
152
153
    def _ensure_fallback_inventories(self):
154
        """Ensure that appropriate inventories are available.
155
156
        This only applies to repositories that are stacked, and is about
157
        enusring the stacking invariants. Namely, that for any revision that is
158
        present, we either have all of the file content, or we have the parent
159
        inventory and the delta file content.
160
        """
161
        if not self.repository._fallback_repositories:
162
            return
163
        if not self.repository._format.supports_chks:
164
            raise errors.BzrError("Cannot commit directly to a stacked branch"
165
                " in pre-2a formats. See "
166
                "https://bugs.launchpad.net/bzr/+bug/375013 for details.")
167
        # This is a stacked repo, we need to make sure we have the parent
168
        # inventories for the parents.
169
        parent_keys = [(p,) for p in self.parents]
170
        parent_map = self.repository.inventories._index.get_parent_map(parent_keys)
6619.3.12 by Jelmer Vernooij
Use 2to3 set_literal fixer.
171
        missing_parent_keys = {pk for pk in parent_keys
172
                                       if pk not in parent_map}
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
173
        fallback_repos = list(reversed(self.repository._fallback_repositories))
174
        missing_keys = [('inventories', pk[0])
175
                        for pk in missing_parent_keys]
176
        resume_tokens = []
177
        while missing_keys and fallback_repos:
178
            fallback_repo = fallback_repos.pop()
179
            source = fallback_repo._get_source(self.repository._format)
180
            sink = self.repository._get_sink()
181
            stream = source.get_stream_for_missing_keys(missing_keys)
182
            missing_keys = sink.insert_stream_without_locking(stream,
183
                self.repository._format)
184
        if missing_keys:
185
            raise errors.BzrError('Unable to fill in parent inventories for a'
186
                                  ' stacked branch')
187
188
    def commit(self, message):
189
        """Make the actual commit.
190
191
        :return: The revision id of the recorded revision.
192
        """
193
        self._validate_unicode_text(message, 'commit message')
194
        rev = _mod_revision.Revision(
195
                       timestamp=self._timestamp,
196
                       timezone=self._timezone,
197
                       committer=self._committer,
198
                       message=message,
199
                       inventory_sha1=self.inv_sha1,
200
                       revision_id=self._new_revision_id,
201
                       properties=self._revprops)
202
        rev.parent_ids = self.parents
6421.2.1 by Jelmer Vernooij
Move revision signing to CommitBuilder.
203
        if self._config_stack.get('create_signatures') == _mod_config.SIGN_ALWAYS:
204
            testament = Testament(rev, self.revision_tree())
205
            plaintext = testament.as_short_text()
206
            self.repository.store_revision_signature(
6421.2.2 by Jelmer Vernooij
Fix config name.
207
                gpg.GPGStrategy(self._config_stack), plaintext,
208
                self._new_revision_id)
6421.2.1 by Jelmer Vernooij
Move revision signing to CommitBuilder.
209
        self.repository._add_revision(rev)
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
210
        self._ensure_fallback_inventories()
211
        self.repository.commit_write_group()
212
        return self._new_revision_id
213
214
    def abort(self):
215
        """Abort the commit that is being built.
216
        """
217
        self.repository.abort_write_group()
218
219
    def revision_tree(self):
220
        """Return the tree that was just committed.
221
222
        After calling commit() this can be called to get a
223
        RevisionTree representing the newly committed tree. This is
224
        preferred to calling Repository.revision_tree() because that may
225
        require deserializing the inventory, while we already have a copy in
226
        memory.
227
        """
228
        if self.new_inventory is None:
229
            self.new_inventory = self.repository.get_inventory(
230
                self._new_revision_id)
231
        return InventoryRevisionTree(self.repository, self.new_inventory,
232
            self._new_revision_id)
233
234
    def finish_inventory(self):
235
        """Tell the builder that the inventory is finished.
236
237
        :return: The inventory id in the repository, which can be used with
238
            repository.get_inventory.
239
        """
240
        if self.new_inventory is None:
241
            # an inventory delta was accumulated without creating a new
242
            # inventory.
243
            basis_id = self.basis_delta_revision
244
            # We ignore the 'inventory' returned by add_inventory_by_delta
245
            # because self.new_inventory is used to hint to the rest of the
246
            # system what code path was taken
247
            self.inv_sha1, _ = self.repository.add_inventory_by_delta(
248
                basis_id, self._basis_delta, self._new_revision_id,
249
                self.parents)
250
        else:
251
            if self.new_inventory.root is None:
252
                raise AssertionError('Root entry should be supplied to'
253
                    ' record_entry_contents, as of bzr 0.10.')
254
                self.new_inventory.add(InventoryDirectory(ROOT_ID, '', None))
255
            self.new_inventory.revision_id = self._new_revision_id
256
            self.inv_sha1 = self.repository.add_inventory(
257
                self._new_revision_id,
258
                self.new_inventory,
259
                self.parents
260
                )
261
        return self._new_revision_id
262
263
    def _check_root(self, ie, parent_invs, tree):
264
        """Helper for record_entry_contents.
265
266
        :param ie: An entry being added.
267
        :param parent_invs: The inventories of the parent revisions of the
268
            commit.
269
        :param tree: The tree that is being committed.
270
        """
271
        # In this revision format, root entries have no knit or weave When
272
        # serializing out to disk and back in root.revision is always
273
        # _new_revision_id
274
        ie.revision = self._new_revision_id
275
276
    def _require_root_change(self, tree):
277
        """Enforce an appropriate root object change.
278
279
        This is called once when record_iter_changes is called, if and only if
280
        the root was not in the delta calculated by record_iter_changes.
281
282
        :param tree: The tree which is being committed.
283
        """
284
        if len(self.parents) == 0:
285
            raise errors.RootMissing()
286
        entry = entry_factory['directory'](tree.path2id(''), '',
287
            None)
288
        entry.revision = self._new_revision_id
289
        self._basis_delta.append(('', '', entry.file_id, entry))
290
291
    def _get_delta(self, ie, basis_inv, path):
292
        """Get a delta against the basis inventory for ie."""
5967.7.1 by Martin Pool
Deprecate __contains__ on Tree and Inventory
293
        if not basis_inv.has_id(ie.file_id):
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
294
            # add
295
            result = (None, path, ie.file_id, ie)
296
            self._basis_delta.append(result)
297
            return result
298
        elif ie != basis_inv[ie.file_id]:
299
            # common but altered
300
            # TODO: avoid tis id2path call.
301
            result = (basis_inv.id2path(ie.file_id), path, ie.file_id, ie)
302
            self._basis_delta.append(result)
303
            return result
304
        else:
305
            # common, unaltered
306
            return None
307
308
    def _heads(self, file_id, revision_ids):
309
        """Calculate the graph heads for revision_ids in the graph of file_id.
310
311
        This can use either a per-file graph or a global revision graph as we
312
        have an identity relationship between the two graphs.
313
        """
314
        return self.__heads(revision_ids)
315
316
    def get_basis_delta(self):
317
        """Return the complete inventory delta versus the basis inventory.
318
319
        This has been built up with the calls to record_delete and
320
        record_entry_contents. The client must have already called
321
        will_record_deletes() to indicate that they will be generating a
322
        complete delta.
323
324
        :return: An inventory delta, suitable for use with apply_delta, or
325
            Repository.add_inventory_by_delta, etc.
326
        """
327
        if not self._recording_deletes:
328
            raise AssertionError("recording deletes not activated.")
329
        return self._basis_delta
330
331
    def record_delete(self, path, file_id):
332
        """Record that a delete occured against a basis tree.
333
334
        This is an optional API - when used it adds items to the basis_delta
335
        being accumulated by the commit builder. It cannot be called unless the
336
        method will_record_deletes() has been called to inform the builder that
337
        a delta is being supplied.
338
339
        :param path: The path of the thing deleted.
340
        :param file_id: The file id that was deleted.
341
        """
342
        if not self._recording_deletes:
343
            raise AssertionError("recording deletes not activated.")
344
        delta = (path, None, file_id, None)
345
        self._basis_delta.append(delta)
346
        self._any_changes = True
347
        return delta
348
349
    def record_entry_contents(self, ie, parent_invs, path, tree,
350
        content_summary):
351
        """Record the content of ie from tree into the commit if needed.
352
353
        Side effect: sets ie.revision when unchanged
354
355
        :param ie: An inventory entry present in the commit.
356
        :param parent_invs: The inventories of the parent revisions of the
357
            commit.
358
        :param path: The path the entry is at in the tree.
359
        :param tree: The tree which contains this entry and should be used to
360
            obtain content.
361
        :param content_summary: Summary data from the tree about the paths
362
            content - stat, length, exec, sha/link target. This is only
363
            accessed when the entry has a revision of None - that is when it is
364
            a candidate to commit.
365
        :return: A tuple (change_delta, version_recorded, fs_hash).
366
            change_delta is an inventory_delta change for this entry against
367
            the basis tree of the commit, or None if no change occured against
368
            the basis tree.
369
            version_recorded is True if a new version of the entry has been
370
            recorded. For instance, committing a merge where a file was only
371
            changed on the other side will return (delta, False).
372
            fs_hash is either None, or the hash details for the path (currently
373
            a tuple of the contents sha1 and the statvalue returned by
374
            tree.get_file_with_stat()).
375
        """
376
        if self.new_inventory.root is None:
377
            if ie.parent_id is not None:
378
                raise errors.RootMissing()
379
            self._check_root(ie, parent_invs, tree)
380
        if ie.revision is None:
381
            kind = content_summary[0]
382
        else:
383
            # ie is carried over from a prior commit
384
            kind = ie.kind
385
        # XXX: repository specific check for nested tree support goes here - if
386
        # the repo doesn't want nested trees we skip it ?
387
        if (kind == 'tree-reference' and
388
            not self.repository._format.supports_tree_reference):
389
            # mismatch between commit builder logic and repository:
390
            # this needs the entry creation pushed down into the builder.
391
            raise NotImplementedError('Missing repository subtree support.')
392
        self.new_inventory.add(ie)
393
394
        # TODO: slow, take it out of the inner loop.
395
        try:
396
            basis_inv = parent_invs[0]
397
        except IndexError:
398
            basis_inv = Inventory(root_id=None)
399
400
        # ie.revision is always None if the InventoryEntry is considered
401
        # for committing. We may record the previous parents revision if the
402
        # content is actually unchanged against a sole head.
403
        if ie.revision is not None:
404
            if not self._versioned_root and path == '':
405
                # repositories that do not version the root set the root's
406
                # revision to the new commit even when no change occurs (more
407
                # specifically, they do not record a revision on the root; and
408
                # the rev id is assigned to the root during deserialisation -
409
                # this masks when a change may have occurred against the basis.
410
                # To match this we always issue a delta, because the revision
411
                # of the root will always be changing.
5967.7.1 by Martin Pool
Deprecate __contains__ on Tree and Inventory
412
                if basis_inv.has_id(ie.file_id):
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
413
                    delta = (basis_inv.id2path(ie.file_id), path,
414
                        ie.file_id, ie)
415
                else:
416
                    # add
417
                    delta = (None, path, ie.file_id, ie)
418
                self._basis_delta.append(delta)
419
                return delta, False, None
420
            else:
421
                # we don't need to commit this, because the caller already
422
                # determined that an existing revision of this file is
423
                # appropriate. If it's not being considered for committing then
424
                # it and all its parents to the root must be unaltered so
425
                # no-change against the basis.
426
                if ie.revision == self._new_revision_id:
427
                    raise AssertionError("Impossible situation, a skipped "
428
                        "inventory entry (%r) claims to be modified in this "
429
                        "commit (%r).", (ie, self._new_revision_id))
430
                return None, False, None
431
        # XXX: Friction: parent_candidates should return a list not a dict
432
        #      so that we don't have to walk the inventories again.
6090.2.1 by Jelmer Vernooij
Fix some typos.
433
        parent_candidate_entries = ie.parent_candidates(parent_invs)
434
        head_set = self._heads(ie.file_id, parent_candidate_entries.keys())
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
435
        heads = []
436
        for inv in parent_invs:
5967.7.1 by Martin Pool
Deprecate __contains__ on Tree and Inventory
437
            if inv.has_id(ie.file_id):
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
438
                old_rev = inv[ie.file_id].revision
439
                if old_rev in head_set:
440
                    heads.append(inv[ie.file_id].revision)
441
                    head_set.remove(inv[ie.file_id].revision)
442
443
        store = False
444
        # now we check to see if we need to write a new record to the
445
        # file-graph.
446
        # We write a new entry unless there is one head to the ancestors, and
447
        # the kind-derived content is unchanged.
448
449
        # Cheapest check first: no ancestors, or more the one head in the
450
        # ancestors, we write a new node.
451
        if len(heads) != 1:
452
            store = True
453
        if not store:
454
            # There is a single head, look it up for comparison
6090.2.1 by Jelmer Vernooij
Fix some typos.
455
            parent_entry = parent_candidate_entries[heads[0]]
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
456
            # if the non-content specific data has changed, we'll be writing a
457
            # node:
458
            if (parent_entry.parent_id != ie.parent_id or
459
                parent_entry.name != ie.name):
460
                store = True
461
        # now we need to do content specific checks:
462
        if not store:
463
            # if the kind changed the content obviously has
464
            if kind != parent_entry.kind:
465
                store = True
466
        # Stat cache fingerprint feedback for the caller - None as we usually
467
        # don't generate one.
468
        fingerprint = None
469
        if kind == 'file':
470
            if content_summary[2] is None:
471
                raise ValueError("Files must not have executable = None")
472
            if not store:
473
                # We can't trust a check of the file length because of content
474
                # filtering...
475
                if (# if the exec bit has changed we have to store:
476
                    parent_entry.executable != content_summary[2]):
477
                    store = True
478
                elif parent_entry.text_sha1 == content_summary[3]:
479
                    # all meta and content is unchanged (using a hash cache
480
                    # hit to check the sha)
481
                    ie.revision = parent_entry.revision
482
                    ie.text_size = parent_entry.text_size
483
                    ie.text_sha1 = parent_entry.text_sha1
484
                    ie.executable = parent_entry.executable
485
                    return self._get_delta(ie, basis_inv, path), False, None
486
                else:
487
                    # Either there is only a hash change(no hash cache entry,
488
                    # or same size content change), or there is no change on
489
                    # this file at all.
490
                    # Provide the parent's hash to the store layer, so that the
491
                    # content is unchanged we will not store a new node.
492
                    nostore_sha = parent_entry.text_sha1
493
            if store:
494
                # We want to record a new node regardless of the presence or
495
                # absence of a content change in the file.
496
                nostore_sha = None
497
            ie.executable = content_summary[2]
498
            file_obj, stat_value = tree.get_file_with_stat(ie.file_id, path)
499
            try:
500
                text = file_obj.read()
501
            finally:
502
                file_obj.close()
503
            try:
504
                ie.text_sha1, ie.text_size = self._add_text_to_weave(
505
                    ie.file_id, text, heads, nostore_sha)
506
                # Let the caller know we generated a stat fingerprint.
507
                fingerprint = (ie.text_sha1, stat_value)
508
            except errors.ExistingContent:
509
                # Turns out that the file content was unchanged, and we were
510
                # only going to store a new node if it was changed. Carry over
511
                # the entry.
512
                ie.revision = parent_entry.revision
513
                ie.text_size = parent_entry.text_size
514
                ie.text_sha1 = parent_entry.text_sha1
515
                ie.executable = parent_entry.executable
516
                return self._get_delta(ie, basis_inv, path), False, None
517
        elif kind == 'directory':
518
            if not store:
519
                # all data is meta here, nothing specific to directory, so
520
                # carry over:
521
                ie.revision = parent_entry.revision
522
                return self._get_delta(ie, basis_inv, path), False, None
523
            self._add_text_to_weave(ie.file_id, '', heads, None)
524
        elif kind == 'symlink':
525
            current_link_target = content_summary[3]
526
            if not store:
527
                # symlink target is not generic metadata, check if it has
528
                # changed.
529
                if current_link_target != parent_entry.symlink_target:
530
                    store = True
531
            if not store:
532
                # unchanged, carry over.
533
                ie.revision = parent_entry.revision
534
                ie.symlink_target = parent_entry.symlink_target
535
                return self._get_delta(ie, basis_inv, path), False, None
536
            ie.symlink_target = current_link_target
537
            self._add_text_to_weave(ie.file_id, '', heads, None)
538
        elif kind == 'tree-reference':
539
            if not store:
540
                if content_summary[3] != parent_entry.reference_revision:
541
                    store = True
542
            if not store:
543
                # unchanged, carry over.
544
                ie.reference_revision = parent_entry.reference_revision
545
                ie.revision = parent_entry.revision
546
                return self._get_delta(ie, basis_inv, path), False, None
547
            ie.reference_revision = content_summary[3]
548
            if ie.reference_revision is None:
549
                raise AssertionError("invalid content_summary for nested tree: %r"
550
                    % (content_summary,))
551
            self._add_text_to_weave(ie.file_id, '', heads, None)
552
        else:
553
            raise NotImplementedError('unknown kind')
554
        ie.revision = self._new_revision_id
5847.3.2 by Jelmer Vernooij
Move root checking to VersionedFileRepository.
555
        # The initial commit adds a root directory, but this in itself is not
556
        # a worthwhile commit.
557
        if (self.basis_delta_revision != _mod_revision.NULL_REVISION or
558
            path != ""):
559
            self._any_changes = True
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
560
        return self._get_delta(ie, basis_inv, path), True, fingerprint
561
562
    def record_iter_changes(self, tree, basis_revision_id, iter_changes,
563
        _entry_factory=entry_factory):
564
        """Record a new tree via iter_changes.
565
566
        :param tree: The tree to obtain text contents from for changed objects.
567
        :param basis_revision_id: The revision id of the tree the iter_changes
568
            has been generated against. Currently assumed to be the same
569
            as self.parents[0] - if it is not, errors may occur.
570
        :param iter_changes: An iter_changes iterator with the changes to apply
571
            to basis_revision_id. The iterator must not include any items with
572
            a current kind of None - missing items must be either filtered out
6090.2.1 by Jelmer Vernooij
Fix some typos.
573
            or errored-on before record_iter_changes sees the item.
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
574
        :param _entry_factory: Private method to bind entry_factory locally for
575
            performance.
576
        :return: A generator of (file_id, relpath, fs_hash) tuples for use with
577
            tree._observed_sha1.
578
        """
579
        # Create an inventory delta based on deltas between all the parents and
580
        # deltas between all the parent inventories. We use inventory delta's 
581
        # between the inventory objects because iter_changes masks
582
        # last-changed-field only changes.
583
        # Working data:
584
        # file_id -> change map, change is fileid, paths, changed, versioneds,
585
        # parents, names, kinds, executables
586
        merged_ids = {}
587
        # {file_id -> revision_id -> inventory entry, for entries in parent
588
        # trees that are not parents[0]
589
        parent_entries = {}
590
        ghost_basis = False
591
        try:
592
            revtrees = list(self.repository.revision_trees(self.parents))
593
        except errors.NoSuchRevision:
594
            # one or more ghosts, slow path.
595
            revtrees = []
596
            for revision_id in self.parents:
597
                try:
598
                    revtrees.append(self.repository.revision_tree(revision_id))
599
                except errors.NoSuchRevision:
600
                    if not revtrees:
601
                        basis_revision_id = _mod_revision.NULL_REVISION
602
                        ghost_basis = True
603
                    revtrees.append(self.repository.revision_tree(
604
                        _mod_revision.NULL_REVISION))
605
        # The basis inventory from a repository 
606
        if revtrees:
6405.2.5 by Jelmer Vernooij
Add root_inventory.
607
            basis_tree = revtrees[0]
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
608
        else:
6405.2.5 by Jelmer Vernooij
Add root_inventory.
609
            basis_tree = self.repository.revision_tree(
610
                _mod_revision.NULL_REVISION)
611
        basis_inv = basis_tree.root_inventory
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
612
        if len(self.parents) > 0:
613
            if basis_revision_id != self.parents[0] and not ghost_basis:
614
                raise Exception(
615
                    "arbitrary basis parents not yet supported with merges")
616
            for revtree in revtrees[1:]:
6405.2.5 by Jelmer Vernooij
Add root_inventory.
617
                for change in revtree.root_inventory._make_delta(basis_inv):
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
618
                    if change[1] is None:
619
                        # Not present in this parent.
620
                        continue
621
                    if change[2] not in merged_ids:
622
                        if change[0] is not None:
623
                            basis_entry = basis_inv[change[2]]
624
                            merged_ids[change[2]] = [
625
                                # basis revid
626
                                basis_entry.revision,
627
                                # new tree revid
628
                                change[3].revision]
629
                            parent_entries[change[2]] = {
630
                                # basis parent
631
                                basis_entry.revision:basis_entry,
632
                                # this parent 
633
                                change[3].revision:change[3],
634
                                }
635
                        else:
636
                            merged_ids[change[2]] = [change[3].revision]
637
                            parent_entries[change[2]] = {change[3].revision:change[3]}
638
                    else:
639
                        merged_ids[change[2]].append(change[3].revision)
640
                        parent_entries[change[2]][change[3].revision] = change[3]
641
        else:
642
            merged_ids = {}
643
        # Setup the changes from the tree:
644
        # changes maps file_id -> (change, [parent revision_ids])
645
        changes= {}
646
        for change in iter_changes:
647
            # This probably looks up in basis_inv way to much.
648
            if change[1][0] is not None:
649
                head_candidate = [basis_inv[change[0]].revision]
650
            else:
651
                head_candidate = []
652
            changes[change[0]] = change, merged_ids.get(change[0],
653
                head_candidate)
654
        unchanged_merged = set(merged_ids) - set(changes)
655
        # Extend the changes dict with synthetic changes to record merges of
656
        # texts.
657
        for file_id in unchanged_merged:
658
            # Record a merged version of these items that did not change vs the
659
            # basis. This can be either identical parallel changes, or a revert
660
            # of a specific file after a merge. The recorded content will be
661
            # that of the current tree (which is the same as the basis), but
662
            # the per-file graph will reflect a merge.
663
            # NB:XXX: We are reconstructing path information we had, this
664
            # should be preserved instead.
665
            # inv delta  change: (file_id, (path_in_source, path_in_target),
666
            #   changed_content, versioned, parent, name, kind,
667
            #   executable)
668
            try:
669
                basis_entry = basis_inv[file_id]
670
            except errors.NoSuchId:
671
                # a change from basis->some_parents but file_id isn't in basis
672
                # so was new in the merge, which means it must have changed
673
                # from basis -> current, and as it hasn't the add was reverted
674
                # by the user. So we discard this change.
675
                pass
676
            else:
677
                change = (file_id,
678
                    (basis_inv.id2path(file_id), tree.id2path(file_id)),
679
                    False, (True, True),
680
                    (basis_entry.parent_id, basis_entry.parent_id),
681
                    (basis_entry.name, basis_entry.name),
682
                    (basis_entry.kind, basis_entry.kind),
683
                    (basis_entry.executable, basis_entry.executable))
684
                changes[file_id] = (change, merged_ids[file_id])
685
        # changes contains tuples with the change and a set of inventory
686
        # candidates for the file.
687
        # inv delta is:
688
        # old_path, new_path, file_id, new_inventory_entry
689
        seen_root = False # Is the root in the basis delta?
690
        inv_delta = self._basis_delta
691
        modified_rev = self._new_revision_id
692
        for change, head_candidates in changes.values():
693
            if change[3][1]: # versioned in target.
694
                # Several things may be happening here:
695
                # We may have a fork in the per-file graph
696
                #  - record a change with the content from tree
697
                # We may have a change against < all trees  
698
                #  - carry over the tree that hasn't changed
699
                # We may have a change against all trees
700
                #  - record the change with the content from tree
701
                kind = change[6][1]
702
                file_id = change[0]
703
                entry = _entry_factory[kind](file_id, change[5][1],
704
                    change[4][1])
705
                head_set = self._heads(change[0], set(head_candidates))
706
                heads = []
707
                # Preserve ordering.
708
                for head_candidate in head_candidates:
709
                    if head_candidate in head_set:
710
                        heads.append(head_candidate)
711
                        head_set.remove(head_candidate)
712
                carried_over = False
713
                if len(heads) == 1:
714
                    # Could be a carry-over situation:
715
                    parent_entry_revs = parent_entries.get(file_id, None)
716
                    if parent_entry_revs:
717
                        parent_entry = parent_entry_revs.get(heads[0], None)
718
                    else:
719
                        parent_entry = None
720
                    if parent_entry is None:
721
                        # The parent iter_changes was called against is the one
722
                        # that is the per-file head, so any change is relevant
723
                        # iter_changes is valid.
724
                        carry_over_possible = False
725
                    else:
726
                        # could be a carry over situation
727
                        # A change against the basis may just indicate a merge,
728
                        # we need to check the content against the source of the
729
                        # merge to determine if it was changed after the merge
730
                        # or carried over.
731
                        if (parent_entry.kind != entry.kind or
732
                            parent_entry.parent_id != entry.parent_id or
733
                            parent_entry.name != entry.name):
734
                            # Metadata common to all entries has changed
735
                            # against per-file parent
736
                            carry_over_possible = False
737
                        else:
738
                            carry_over_possible = True
739
                        # per-type checks for changes against the parent_entry
740
                        # are done below.
741
                else:
742
                    # Cannot be a carry-over situation
743
                    carry_over_possible = False
744
                # Populate the entry in the delta
745
                if kind == 'file':
746
                    # XXX: There is still a small race here: If someone reverts the content of a file
747
                    # after iter_changes examines and decides it has changed,
748
                    # we will unconditionally record a new version even if some
749
                    # other process reverts it while commit is running (with
750
                    # the revert happening after iter_changes did its
751
                    # examination).
752
                    if change[7][1]:
753
                        entry.executable = True
754
                    else:
755
                        entry.executable = False
756
                    if (carry_over_possible and
757
                        parent_entry.executable == entry.executable):
758
                            # Check the file length, content hash after reading
759
                            # the file.
760
                            nostore_sha = parent_entry.text_sha1
761
                    else:
762
                        nostore_sha = None
763
                    file_obj, stat_value = tree.get_file_with_stat(file_id, change[1][1])
764
                    try:
765
                        text = file_obj.read()
766
                    finally:
767
                        file_obj.close()
768
                    try:
769
                        entry.text_sha1, entry.text_size = self._add_text_to_weave(
770
                            file_id, text, heads, nostore_sha)
771
                        yield file_id, change[1][1], (entry.text_sha1, stat_value)
772
                    except errors.ExistingContent:
773
                        # No content change against a carry_over parent
774
                        # Perhaps this should also yield a fs hash update?
775
                        carried_over = True
776
                        entry.text_size = parent_entry.text_size
777
                        entry.text_sha1 = parent_entry.text_sha1
778
                elif kind == 'symlink':
779
                    # Wants a path hint?
780
                    entry.symlink_target = tree.get_symlink_target(file_id)
781
                    if (carry_over_possible and
782
                        parent_entry.symlink_target == entry.symlink_target):
783
                        carried_over = True
784
                    else:
785
                        self._add_text_to_weave(change[0], '', heads, None)
786
                elif kind == 'directory':
787
                    if carry_over_possible:
788
                        carried_over = True
789
                    else:
790
                        # Nothing to set on the entry.
791
                        # XXX: split into the Root and nonRoot versions.
792
                        if change[1][1] != '' or self.repository.supports_rich_root():
793
                            self._add_text_to_weave(change[0], '', heads, None)
794
                elif kind == 'tree-reference':
795
                    if not self.repository._format.supports_tree_reference:
796
                        # This isn't quite sane as an error, but we shouldn't
797
                        # ever see this code path in practice: tree's don't
798
                        # permit references when the repo doesn't support tree
799
                        # references.
800
                        raise errors.UnsupportedOperation(tree.add_reference,
801
                            self.repository)
802
                    reference_revision = tree.get_reference_revision(change[0])
803
                    entry.reference_revision = reference_revision
804
                    if (carry_over_possible and
805
                        parent_entry.reference_revision == reference_revision):
806
                        carried_over = True
807
                    else:
808
                        self._add_text_to_weave(change[0], '', heads, None)
809
                else:
810
                    raise AssertionError('unknown kind %r' % kind)
811
                if not carried_over:
812
                    entry.revision = modified_rev
813
                else:
814
                    entry.revision = parent_entry.revision
815
            else:
816
                entry = None
817
            new_path = change[1][1]
818
            inv_delta.append((change[1][0], new_path, change[0], entry))
819
            if new_path == '':
820
                seen_root = True
821
        self.new_inventory = None
5847.3.2 by Jelmer Vernooij
Move root checking to VersionedFileRepository.
822
        # The initial commit adds a root directory, but this in itself is not
823
        # a worthwhile commit.
824
        if ((len(inv_delta) > 0 and basis_revision_id != _mod_revision.NULL_REVISION) or
825
            (len(inv_delta) > 1 and basis_revision_id == _mod_revision.NULL_REVISION)):
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
826
            # This should perhaps be guarded by a check that the basis we
827
            # commit against is the basis for the commit and if not do a delta
828
            # against the basis.
829
            self._any_changes = True
830
        if not seen_root:
831
            # housekeeping root entry changes do not affect no-change commits.
832
            self._require_root_change(tree)
833
        self.basis_delta_revision = basis_revision_id
834
835
    def _add_text_to_weave(self, file_id, new_text, parents, nostore_sha):
836
        parent_keys = tuple([(file_id, parent) for parent in parents])
837
        return self.repository.texts._add_text(
838
            (file_id, self._new_revision_id), parent_keys, new_text,
839
            nostore_sha=nostore_sha, random_id=self.random_revid)[0:2]
840
841
842
class VersionedFileRootCommitBuilder(VersionedFileCommitBuilder):
843
    """This commitbuilder actually records the root id"""
844
845
    # the root entry gets versioned properly by this builder.
846
    _versioned_root = True
847
848
    def _check_root(self, ie, parent_invs, tree):
849
        """Helper for record_entry_contents.
850
851
        :param ie: An entry being added.
852
        :param parent_invs: The inventories of the parent revisions of the
853
            commit.
854
        :param tree: The tree that is being committed.
855
        """
856
857
    def _require_root_change(self, tree):
858
        """Enforce an appropriate root object change.
859
860
        This is called once when record_iter_changes is called, if and only if
861
        the root was not in the delta calculated by record_iter_changes.
862
863
        :param tree: The tree which is being committed.
864
        """
865
        # versioned roots do not change unless the tree found a change.
866
867
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
868
class VersionedFileRepository(Repository):
869
    """Repository holding history for one or more branches.
870
871
    The repository holds and retrieves historical information including
872
    revisions and file history.  It's normally accessed only by the Branch,
873
    which views a particular line of development through that history.
874
875
    The Repository builds on top of some byte storage facilies (the revisions,
876
    signatures, inventories, texts and chk_bytes attributes) and a Transport,
877
    which respectively provide byte storage and a means to access the (possibly
878
    remote) disk.
879
880
    The byte storage facilities are addressed via tuples, which we refer to
881
    as 'keys' throughout the code base. Revision_keys, inventory_keys and
882
    signature_keys are all 1-tuples: (revision_id,). text_keys are two-tuples:
883
    (file_id, revision_id). chk_bytes uses CHK keys - a 1-tuple with a single
884
    byte string made up of a hash identifier and a hash value.
885
    We use this interface because it allows low friction with the underlying
886
    code that implements disk indices, network encoding and other parts of
887
    bzrlib.
888
889
    :ivar revisions: A bzrlib.versionedfile.VersionedFiles instance containing
890
        the serialised revisions for the repository. This can be used to obtain
891
        revision graph information or to access raw serialised revisions.
892
        The result of trying to insert data into the repository via this store
893
        is undefined: it should be considered read-only except for implementors
894
        of repositories.
895
    :ivar signatures: A bzrlib.versionedfile.VersionedFiles instance containing
896
        the serialised signatures for the repository. This can be used to
897
        obtain access to raw serialised signatures.  The result of trying to
898
        insert data into the repository via this store is undefined: it should
899
        be considered read-only except for implementors of repositories.
900
    :ivar inventories: A bzrlib.versionedfile.VersionedFiles instance containing
901
        the serialised inventories for the repository. This can be used to
902
        obtain unserialised inventories.  The result of trying to insert data
903
        into the repository via this store is undefined: it should be
904
        considered read-only except for implementors of repositories.
905
    :ivar texts: A bzrlib.versionedfile.VersionedFiles instance containing the
906
        texts of files and directories for the repository. This can be used to
907
        obtain file texts or file graphs. Note that Repository.iter_file_bytes
908
        is usually a better interface for accessing file texts.
909
        The result of trying to insert data into the repository via this store
910
        is undefined: it should be considered read-only except for implementors
911
        of repositories.
912
    :ivar chk_bytes: A bzrlib.versionedfile.VersionedFiles instance containing
913
        any data the repository chooses to store or have indexed by its hash.
914
        The result of trying to insert data into the repository via this store
915
        is undefined: it should be considered read-only except for implementors
916
        of repositories.
917
    :ivar _transport: Transport for file access to repository, typically
918
        pointing to .bzr/repository.
919
    """
920
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
921
    # What class to use for a CommitBuilder. Often it's simpler to change this
922
    # in a Repository class subclass rather than to override
923
    # get_commit_builder.
924
    _commit_builder_class = VersionedFileCommitBuilder
925
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
926
    def add_fallback_repository(self, repository):
927
        """Add a repository to use for looking up data not held locally.
928
929
        :param repository: A repository.
930
        """
931
        if not self._format.supports_external_lookups:
932
            raise errors.UnstackableRepositoryFormat(self._format, self.base)
6015.29.1 by John Arbash Meinel
Merge 2.3 into 2.4, and fix up the conflict for resolving bug #835035
933
        # This can raise an exception, so should be done before we lock the
934
        # fallback repository.
935
        self._check_fallback_repository(repository)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
936
        if self.is_locked():
937
            # This repository will call fallback.unlock() when we transition to
938
            # the unlocked state, so we make sure to increment the lock count
939
            repository.lock_read()
940
        self._fallback_repositories.append(repository)
941
        self.texts.add_fallback_versioned_files(repository.texts)
942
        self.inventories.add_fallback_versioned_files(repository.inventories)
943
        self.revisions.add_fallback_versioned_files(repository.revisions)
944
        self.signatures.add_fallback_versioned_files(repository.signatures)
945
        if self.chk_bytes is not None:
946
            self.chk_bytes.add_fallback_versioned_files(repository.chk_bytes)
947
948
    @only_raises(errors.LockNotHeld, errors.LockBroken)
949
    def unlock(self):
950
        super(VersionedFileRepository, self).unlock()
951
        if self.control_files._lock_count == 0:
952
            self._inventory_entry_cache.clear()
953
954
    def add_inventory(self, revision_id, inv, parents):
955
        """Add the inventory inv to the repository as revision_id.
956
957
        :param parents: The revision ids of the parents that revision_id
958
                        is known to have and are in the repository already.
959
960
        :returns: The validator(which is a sha1 digest, though what is sha'd is
961
            repository format specific) of the serialized inventory.
962
        """
963
        if not self.is_in_write_group():
964
            raise AssertionError("%r not in write group" % (self,))
965
        _mod_revision.check_not_reserved_id(revision_id)
966
        if not (inv.revision_id is None or inv.revision_id == revision_id):
967
            raise AssertionError(
968
                "Mismatch between inventory revision"
969
                " id and insertion revid (%r, %r)"
970
                % (inv.revision_id, revision_id))
971
        if inv.root is None:
972
            raise errors.RootMissing()
973
        return self._add_inventory_checked(revision_id, inv, parents)
974
975
    def _add_inventory_checked(self, revision_id, inv, parents):
976
        """Add inv to the repository after checking the inputs.
977
978
        This function can be overridden to allow different inventory styles.
979
980
        :seealso: add_inventory, for the contract.
981
        """
982
        inv_lines = self._serializer.write_inventory_to_lines(inv)
983
        return self._inventory_add_lines(revision_id, parents,
984
            inv_lines, check_content=False)
985
986
    def add_inventory_by_delta(self, basis_revision_id, delta, new_revision_id,
987
                               parents, basis_inv=None, propagate_caches=False):
988
        """Add a new inventory expressed as a delta against another revision.
989
990
        See the inventory developers documentation for the theory behind
991
        inventory deltas.
992
993
        :param basis_revision_id: The inventory id the delta was created
994
            against. (This does not have to be a direct parent.)
995
        :param delta: The inventory delta (see Inventory.apply_delta for
996
            details).
997
        :param new_revision_id: The revision id that the inventory is being
998
            added for.
999
        :param parents: The revision ids of the parents that revision_id is
1000
            known to have and are in the repository already. These are supplied
1001
            for repositories that depend on the inventory graph for revision
1002
            graph access, as well as for those that pun ancestry with delta
1003
            compression.
1004
        :param basis_inv: The basis inventory if it is already known,
1005
            otherwise None.
1006
        :param propagate_caches: If True, the caches for this inventory are
1007
          copied to and updated for the result if possible.
1008
1009
        :returns: (validator, new_inv)
1010
            The validator(which is a sha1 digest, though what is sha'd is
1011
            repository format specific) of the serialized inventory, and the
1012
            resulting inventory.
1013
        """
1014
        if not self.is_in_write_group():
1015
            raise AssertionError("%r not in write group" % (self,))
1016
        _mod_revision.check_not_reserved_id(new_revision_id)
1017
        basis_tree = self.revision_tree(basis_revision_id)
1018
        basis_tree.lock_read()
1019
        try:
1020
            # Note that this mutates the inventory of basis_tree, which not all
1021
            # inventory implementations may support: A better idiom would be to
1022
            # return a new inventory, but as there is no revision tree cache in
1023
            # repository this is safe for now - RBC 20081013
1024
            if basis_inv is None:
6405.2.5 by Jelmer Vernooij
Add root_inventory.
1025
                basis_inv = basis_tree.root_inventory
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1026
            basis_inv.apply_delta(delta)
1027
            basis_inv.revision_id = new_revision_id
1028
            return (self.add_inventory(new_revision_id, basis_inv, parents),
1029
                    basis_inv)
1030
        finally:
1031
            basis_tree.unlock()
1032
1033
    def _inventory_add_lines(self, revision_id, parents, lines,
1034
        check_content=True):
1035
        """Store lines in inv_vf and return the sha1 of the inventory."""
1036
        parents = [(parent,) for parent in parents]
1037
        result = self.inventories.add_lines((revision_id,), parents, lines,
1038
            check_content=check_content)[0]
1039
        self.inventories._access.flush()
1040
        return result
1041
6421.2.1 by Jelmer Vernooij
Move revision signing to CommitBuilder.
1042
    def add_revision(self, revision_id, rev, inv=None):
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1043
        """Add rev to the revision store as revision_id.
1044
1045
        :param revision_id: the revision id to use.
1046
        :param rev: The revision object.
1047
        :param inv: The inventory for the revision. if None, it will be looked
1048
                    up in the inventory storer
1049
        """
1050
        # TODO: jam 20070210 Shouldn't we check rev.revision_id and
1051
        #       rev.parent_ids?
1052
        _mod_revision.check_not_reserved_id(revision_id)
1053
        # check inventory present
1054
        if not self.inventories.get_parent_map([(revision_id,)]):
1055
            if inv is None:
1056
                raise errors.WeaveRevisionNotPresent(revision_id,
1057
                                                     self.inventories)
1058
            else:
1059
                # yes, this is not suitable for adding with ghosts.
1060
                rev.inventory_sha1 = self.add_inventory(revision_id, inv,
1061
                                                        rev.parent_ids)
1062
        else:
1063
            key = (revision_id,)
1064
            rev.inventory_sha1 = self.inventories.get_sha1s([key])[key]
1065
        self._add_revision(rev)
1066
1067
    def _add_revision(self, revision):
1068
        text = self._serializer.write_revision_to_string(revision)
1069
        key = (revision.revision_id,)
1070
        parents = tuple((parent,) for parent in revision.parent_ids)
1071
        self.revisions.add_lines(key, parents, osutils.split_lines(text))
1072
1073
    def _check_inventories(self, checker):
1074
        """Check the inventories found from the revision scan.
1075
        
1076
        This is responsible for verifying the sha1 of inventories and
1077
        creating a pending_keys set that covers data referenced by inventories.
1078
        """
1079
        bar = ui.ui_factory.nested_progress_bar()
1080
        try:
1081
            self._do_check_inventories(checker, bar)
1082
        finally:
1083
            bar.finished()
1084
1085
    def _do_check_inventories(self, checker, bar):
1086
        """Helper for _check_inventories."""
1087
        revno = 0
1088
        keys = {'chk_bytes':set(), 'inventories':set(), 'texts':set()}
1089
        kinds = ['chk_bytes', 'texts']
1090
        count = len(checker.pending_keys)
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
1091
        bar.update(gettext("inventories"), 0, 2)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1092
        current_keys = checker.pending_keys
1093
        checker.pending_keys = {}
1094
        # Accumulate current checks.
1095
        for key in current_keys:
1096
            if key[0] != 'inventories' and key[0] not in kinds:
1097
                checker._report_items.append('unknown key type %r' % (key,))
1098
            keys[key[0]].add(key[1:])
1099
        if keys['inventories']:
1100
            # NB: output order *should* be roughly sorted - topo or
1101
            # inverse topo depending on repository - either way decent
1102
            # to just delta against. However, pre-CHK formats didn't
1103
            # try to optimise inventory layout on disk. As such the
1104
            # pre-CHK code path does not use inventory deltas.
1105
            last_object = None
1106
            for record in self.inventories.check(keys=keys['inventories']):
1107
                if record.storage_kind == 'absent':
1108
                    checker._report_items.append(
1109
                        'Missing inventory {%s}' % (record.key,))
1110
                else:
1111
                    last_object = self._check_record('inventories', record,
1112
                        checker, last_object,
1113
                        current_keys[('inventories',) + record.key])
1114
            del keys['inventories']
1115
        else:
1116
            return
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
1117
        bar.update(gettext("texts"), 1)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1118
        while (checker.pending_keys or keys['chk_bytes']
1119
            or keys['texts']):
1120
            # Something to check.
1121
            current_keys = checker.pending_keys
1122
            checker.pending_keys = {}
1123
            # Accumulate current checks.
1124
            for key in current_keys:
1125
                if key[0] not in kinds:
1126
                    checker._report_items.append('unknown key type %r' % (key,))
1127
                keys[key[0]].add(key[1:])
1128
            # Check the outermost kind only - inventories || chk_bytes || texts
1129
            for kind in kinds:
1130
                if keys[kind]:
1131
                    last_object = None
1132
                    for record in getattr(self, kind).check(keys=keys[kind]):
1133
                        if record.storage_kind == 'absent':
1134
                            checker._report_items.append(
1135
                                'Missing %s {%s}' % (kind, record.key,))
1136
                        else:
1137
                            last_object = self._check_record(kind, record,
1138
                                checker, last_object, current_keys[(kind,) + record.key])
1139
                    keys[kind] = set()
1140
                    break
1141
1142
    def _check_record(self, kind, record, checker, last_object, item_data):
1143
        """Check a single text from this repository."""
1144
        if kind == 'inventories':
1145
            rev_id = record.key[0]
1146
            inv = self._deserialise_inventory(rev_id,
1147
                record.get_bytes_as('fulltext'))
1148
            if last_object is not None:
1149
                delta = inv._make_delta(last_object)
1150
                for old_path, path, file_id, ie in delta:
1151
                    if ie is None:
1152
                        continue
1153
                    ie.check(checker, rev_id, inv)
1154
            else:
1155
                for path, ie in inv.iter_entries():
1156
                    ie.check(checker, rev_id, inv)
1157
            if self._format.fast_deltas:
1158
                return inv
1159
        elif kind == 'chk_bytes':
1160
            # No code written to check chk_bytes for this repo format.
1161
            checker._report_items.append(
1162
                'unsupported key type chk_bytes for %s' % (record.key,))
1163
        elif kind == 'texts':
1164
            self._check_text(record, checker, item_data)
1165
        else:
1166
            checker._report_items.append(
1167
                'unknown key type %s for %s' % (kind, record.key))
1168
1169
    def _check_text(self, record, checker, item_data):
1170
        """Check a single text."""
1171
        # Check it is extractable.
1172
        # TODO: check length.
1173
        if record.storage_kind == 'chunked':
1174
            chunks = record.get_bytes_as(record.storage_kind)
1175
            sha1 = osutils.sha_strings(chunks)
1176
            length = sum(map(len, chunks))
1177
        else:
1178
            content = record.get_bytes_as('fulltext')
1179
            sha1 = osutils.sha_string(content)
1180
            length = len(content)
1181
        if item_data and sha1 != item_data[1]:
1182
            checker._report_items.append(
1183
                'sha1 mismatch: %s has sha1 %s expected %s referenced by %s' %
1184
                (record.key, sha1, item_data[1], item_data[2]))
1185
6039.2.1 by Jelmer Vernooij
Move Repository._eliminate_revisions_not_present to VFRepository.
1186
    @needs_read_lock
1187
    def _eliminate_revisions_not_present(self, revision_ids):
1188
        """Check every revision id in revision_ids to see if we have it.
1189
1190
        Returns a set of the present revisions.
1191
        """
1192
        result = []
1193
        graph = self.get_graph()
1194
        parent_map = graph.get_parent_map(revision_ids)
1195
        # The old API returned a list, should this actually be a set?
1196
        return parent_map.keys()
1197
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1198
    def __init__(self, _format, a_bzrdir, control_files):
1199
        """Instantiate a VersionedFileRepository.
1200
1201
        :param _format: The format of the repository on disk.
6207.3.3 by jelmer at samba
Fix tests and the like.
1202
        :param controldir: The ControlDir of the repository.
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1203
        :param control_files: Control files to use for locking, etc.
1204
        """
1205
        # In the future we will have a single api for all stores for
1206
        # getting file texts, inventories and revisions, then
1207
        # this construct will accept instances of those things.
1208
        super(VersionedFileRepository, self).__init__(_format, a_bzrdir,
1209
            control_files)
6155.1.1 by Jelmer Vernooij
Allow repositories to not have a control_files attribute.
1210
        self._transport = control_files._transport
1211
        self.base = self._transport.base
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1212
        # for tests
1213
        self._reconcile_does_inventory_gc = True
1214
        self._reconcile_fixes_text_parents = False
1215
        self._reconcile_backsup_inventory = True
1216
        # An InventoryEntry cache, used during deserialization
1217
        self._inventory_entry_cache = fifo_cache.FIFOCache(10*1024)
1218
        # Is it safe to return inventory entries directly from the entry cache,
1219
        # rather copying them?
1220
        self._safe_to_return_from_cache = False
1221
6341.1.1 by Jelmer Vernooij
Make fetch_spec specific to bzr format repositories.
1222
    def fetch(self, source, revision_id=None, find_ghosts=False,
1223
            fetch_spec=None):
1224
        """Fetch the content required to construct revision_id from source.
1225
1226
        If revision_id is None and fetch_spec is None, then all content is
1227
        copied.
1228
1229
        fetch() may not be used when the repository is in a write group -
1230
        either finish the current write group before using fetch, or use
1231
        fetch before starting the write group.
1232
1233
        :param find_ghosts: Find and copy revisions in the source that are
1234
            ghosts in the target (and not reachable directly by walking out to
1235
            the first-present revision in target from revision_id).
1236
        :param revision_id: If specified, all the content needed for this
1237
            revision ID will be copied to the target.  Fetch will determine for
1238
            itself which content needs to be copied.
1239
        :param fetch_spec: If specified, a SearchResult or
1240
            PendingAncestryResult that describes which revisions to copy.  This
1241
            allows copying multiple heads at once.  Mutually exclusive with
1242
            revision_id.
1243
        """
1244
        if fetch_spec is not None and revision_id is not None:
1245
            raise AssertionError(
1246
                "fetch_spec and revision_id are mutually exclusive.")
1247
        if self.is_in_write_group():
1248
            raise errors.InternalBzrError(
1249
                "May not fetch while in a write group.")
1250
        # fast path same-url fetch operations
1251
        # TODO: lift out to somewhere common with RemoteRepository
1252
        # <https://bugs.launchpad.net/bzr/+bug/401646>
1253
        if (self.has_same_location(source)
1254
            and fetch_spec is None
1255
            and self._has_same_fallbacks(source)):
1256
            # check that last_revision is in 'from' and then return a
1257
            # no-operation.
1258
            if (revision_id is not None and
1259
                not _mod_revision.is_null(revision_id)):
1260
                self.get_revision(revision_id)
1261
            return 0, []
1262
        inter = InterRepository.get(source, self)
1263
        if (fetch_spec is not None and
1264
            not getattr(inter, "supports_fetch_spec", False)):
1265
            raise errors.UnsupportedOperation(
1266
                "fetch_spec not supported for %r" % inter)
1267
        return inter.fetch(revision_id=revision_id,
1268
            find_ghosts=find_ghosts, fetch_spec=fetch_spec)
1269
5815.4.21 by Jelmer Vernooij
Fix lock.
1270
    @needs_read_lock
1271
    def gather_stats(self, revid=None, committers=None):
5815.4.18 by Jelmer Vernooij
Move vf-specific gather_stats bits to vf_repository.
1272
        """See Repository.gather_stats()."""
5815.4.21 by Jelmer Vernooij
Fix lock.
1273
        result = super(VersionedFileRepository, self).gather_stats(revid, committers)
5815.4.18 by Jelmer Vernooij
Move vf-specific gather_stats bits to vf_repository.
1274
        # now gather global repository information
1275
        # XXX: This is available for many repos regardless of listability.
1276
        if self.user_transport.listable():
1277
            # XXX: do we want to __define len__() ?
1278
            # Maybe the versionedfiles object should provide a different
1279
            # method to get the number of keys.
1280
            result['revisions'] = len(self.revisions.keys())
1281
            # result['size'] = t
1282
        return result
1283
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
1284
    def get_commit_builder(self, branch, parents, config_stack, timestamp=None,
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
1285
                           timezone=None, committer=None, revprops=None,
1286
                           revision_id=None, lossy=False):
1287
        """Obtain a CommitBuilder for this repository.
1288
1289
        :param branch: Branch to commit to.
1290
        :param parents: Revision ids of the parents of the new revision.
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
1291
        :param config_stack: Configuration stack to use.
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
1292
        :param timestamp: Optional timestamp recorded for commit.
1293
        :param timezone: Optional timezone for timestamp.
1294
        :param committer: Optional committer to set for commit.
1295
        :param revprops: Optional dictionary of revision properties.
1296
        :param revision_id: Optional revision id.
1297
        :param lossy: Whether to discard data that can not be natively
1298
            represented, when pushing to a foreign VCS
1299
        """
1300
        if self._fallback_repositories and not self._format.supports_chks:
1301
            raise errors.BzrError("Cannot commit directly to a stacked branch"
1302
                " in pre-2a formats. See "
1303
                "https://bugs.launchpad.net/bzr/+bug/375013 for details.")
6351.3.2 by Jelmer Vernooij
Convert some gpg options to config stacks.
1304
        result = self._commit_builder_class(self, parents, config_stack,
5815.4.2 by Jelmer Vernooij
split out versionedfile-specific stuff from commitbuilder.
1305
            timestamp, timezone, committer, revprops, revision_id,
1306
            lossy)
1307
        self.start_write_group()
1308
        return result
1309
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1310
    def get_missing_parent_inventories(self, check_for_missing_texts=True):
1311
        """Return the keys of missing inventory parents for revisions added in
1312
        this write group.
1313
1314
        A revision is not complete if the inventory delta for that revision
1315
        cannot be calculated.  Therefore if the parent inventories of a
1316
        revision are not present, the revision is incomplete, and e.g. cannot
1317
        be streamed by a smart server.  This method finds missing inventory
1318
        parents for revisions added in this write group.
1319
        """
1320
        if not self._format.supports_external_lookups:
1321
            # This is only an issue for stacked repositories
1322
            return set()
1323
        if not self.is_in_write_group():
1324
            raise AssertionError('not in a write group')
1325
1326
        # XXX: We assume that every added revision already has its
1327
        # corresponding inventory, so we only check for parent inventories that
1328
        # might be missing, rather than all inventories.
1329
        parents = set(self.revisions._index.get_missing_parents())
1330
        parents.discard(_mod_revision.NULL_REVISION)
1331
        unstacked_inventories = self.inventories._index
1332
        present_inventories = unstacked_inventories.get_parent_map(
1333
            key[-1:] for key in parents)
1334
        parents.difference_update(present_inventories)
1335
        if len(parents) == 0:
1336
            # No missing parent inventories.
1337
            return set()
1338
        if not check_for_missing_texts:
1339
            return set(('inventories', rev_id) for (rev_id,) in parents)
1340
        # Ok, now we have a list of missing inventories.  But these only matter
1341
        # if the inventories that reference them are missing some texts they
1342
        # appear to introduce.
1343
        # XXX: Texts referenced by all added inventories need to be present,
1344
        # but at the moment we're only checking for texts referenced by
1345
        # inventories at the graph's edge.
1346
        key_deps = self.revisions._index._key_dependencies
1347
        key_deps.satisfy_refs_for_keys(present_inventories)
1348
        referrers = frozenset(r[0] for r in key_deps.get_referrers())
1349
        file_ids = self.fileids_altered_by_revision_ids(referrers)
1350
        missing_texts = set()
1351
        for file_id, version_ids in file_ids.iteritems():
1352
            missing_texts.update(
1353
                (file_id, version_id) for version_id in version_ids)
1354
        present_texts = self.texts.get_parent_map(missing_texts)
1355
        missing_texts.difference_update(present_texts)
1356
        if not missing_texts:
1357
            # No texts are missing, so all revisions and their deltas are
1358
            # reconstructable.
1359
            return set()
1360
        # Alternatively the text versions could be returned as the missing
1361
        # keys, but this is likely to be less data.
1362
        missing_keys = set(('inventories', rev_id) for (rev_id,) in parents)
1363
        return missing_keys
1364
1365
    @needs_read_lock
1366
    def has_revisions(self, revision_ids):
1367
        """Probe to find out the presence of multiple revisions.
1368
1369
        :param revision_ids: An iterable of revision_ids.
1370
        :return: A set of the revision_ids that were present.
1371
        """
1372
        parent_map = self.revisions.get_parent_map(
1373
            [(rev_id,) for rev_id in revision_ids])
1374
        result = set()
1375
        if _mod_revision.NULL_REVISION in revision_ids:
1376
            result.add(_mod_revision.NULL_REVISION)
1377
        result.update([key[0] for key in parent_map])
1378
        return result
1379
1380
    @needs_read_lock
1381
    def get_revision_reconcile(self, revision_id):
1382
        """'reconcile' helper routine that allows access to a revision always.
1383
1384
        This variant of get_revision does not cross check the weave graph
1385
        against the revision one as get_revision does: but it should only
1386
        be used by reconcile, or reconcile-alike commands that are correcting
1387
        or testing the revision graph.
1388
        """
1389
        return self._get_revisions([revision_id])[0]
1390
1391
    @needs_read_lock
1392
    def get_revisions(self, revision_ids):
1393
        """Get many revisions at once.
1394
        
1395
        Repositories that need to check data on every revision read should 
1396
        subclass this method.
1397
        """
1398
        return self._get_revisions(revision_ids)
1399
1400
    @needs_read_lock
1401
    def _get_revisions(self, revision_ids):
1402
        """Core work logic to get many revisions without sanity checks."""
1403
        revs = {}
1404
        for revid, rev in self._iter_revisions(revision_ids):
1405
            if rev is None:
1406
                raise errors.NoSuchRevision(self, revid)
1407
            revs[revid] = rev
1408
        return [revs[revid] for revid in revision_ids]
1409
1410
    def _iter_revisions(self, revision_ids):
1411
        """Iterate over revision objects.
1412
1413
        :param revision_ids: An iterable of revisions to examine. None may be
1414
            passed to request all revisions known to the repository. Note that
1415
            not all repositories can find unreferenced revisions; for those
1416
            repositories only referenced ones will be returned.
1417
        :return: An iterator of (revid, revision) tuples. Absent revisions (
1418
            those asked for but not available) are returned as (revid, None).
1419
        """
1420
        if revision_ids is None:
1421
            revision_ids = self.all_revision_ids()
1422
        else:
1423
            for rev_id in revision_ids:
1424
                if not rev_id or not isinstance(rev_id, basestring):
1425
                    raise errors.InvalidRevisionId(revision_id=rev_id, branch=self)
1426
        keys = [(key,) for key in revision_ids]
1427
        stream = self.revisions.get_record_stream(keys, 'unordered', True)
1428
        for record in stream:
1429
            revid = record.key[0]
1430
            if record.storage_kind == 'absent':
1431
                yield (revid, None)
1432
            else:
1433
                text = record.get_bytes_as('fulltext')
1434
                rev = self._serializer.read_revision_from_string(text)
1435
                yield (revid, rev)
1436
1437
    @needs_write_lock
1438
    def add_signature_text(self, revision_id, signature):
1439
        """Store a signature text for a revision.
1440
1441
        :param revision_id: Revision id of the revision
1442
        :param signature: Signature text.
1443
        """
1444
        self.signatures.add_lines((revision_id,), (),
1445
            osutils.split_lines(signature))
1446
1447
    def find_text_key_references(self):
1448
        """Find the text key references within the repository.
1449
1450
        :return: A dictionary mapping text keys ((fileid, revision_id) tuples)
1451
            to whether they were referred to by the inventory of the
1452
            revision_id that they contain. The inventory texts from all present
1453
            revision ids are assessed to generate this report.
1454
        """
1455
        revision_keys = self.revisions.keys()
1456
        w = self.inventories
1457
        pb = ui.ui_factory.nested_progress_bar()
1458
        try:
1459
            return self._serializer._find_text_key_references(
1460
                w.iter_lines_added_or_present_in_keys(revision_keys, pb=pb))
1461
        finally:
1462
            pb.finished()
1463
1464
    def _inventory_xml_lines_for_keys(self, keys):
1465
        """Get a line iterator of the sort needed for findind references.
1466
1467
        Not relevant for non-xml inventory repositories.
1468
1469
        Ghosts in revision_keys are ignored.
1470
1471
        :param revision_keys: The revision keys for the inventories to inspect.
1472
        :return: An iterator over (inventory line, revid) for the fulltexts of
1473
            all of the xml inventories specified by revision_keys.
1474
        """
1475
        stream = self.inventories.get_record_stream(keys, 'unordered', True)
1476
        for record in stream:
1477
            if record.storage_kind != 'absent':
1478
                chunks = record.get_bytes_as('chunked')
1479
                revid = record.key[-1]
1480
                lines = osutils.chunks_to_lines(chunks)
1481
                for line in lines:
1482
                    yield line, revid
1483
1484
    def _find_file_ids_from_xml_inventory_lines(self, line_iterator,
1485
        revision_keys):
1486
        """Helper routine for fileids_altered_by_revision_ids.
1487
1488
        This performs the translation of xml lines to revision ids.
1489
1490
        :param line_iterator: An iterator of lines, origin_version_id
1491
        :param revision_keys: The revision ids to filter for. This should be a
1492
            set or other type which supports efficient __contains__ lookups, as
1493
            the revision key from each parsed line will be looked up in the
1494
            revision_keys filter.
1495
        :return: a dictionary mapping altered file-ids to an iterable of
5891.1.3 by Andrew Bennetts
Move docstring formatting fixes.
1496
            revision_ids. Each altered file-ids has the exact revision_ids that
1497
            altered it listed explicitly.
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1498
        """
1499
        seen = set(self._serializer._find_text_key_references(
1500
                line_iterator).iterkeys())
1501
        parent_keys = self._find_parent_keys_of_revisions(revision_keys)
1502
        parent_seen = set(self._serializer._find_text_key_references(
1503
            self._inventory_xml_lines_for_keys(parent_keys)))
1504
        new_keys = seen - parent_seen
1505
        result = {}
1506
        setdefault = result.setdefault
1507
        for key in new_keys:
1508
            setdefault(key[0], set()).add(key[-1])
1509
        return result
1510
1511
    def _find_parent_keys_of_revisions(self, revision_keys):
1512
        """Similar to _find_parent_ids_of_revisions, but used with keys.
1513
1514
        :param revision_keys: An iterable of revision_keys.
1515
        :return: The parents of all revision_keys that are not already in
1516
            revision_keys
1517
        """
1518
        parent_map = self.revisions.get_parent_map(revision_keys)
1519
        parent_keys = set()
1520
        map(parent_keys.update, parent_map.itervalues())
1521
        parent_keys.difference_update(revision_keys)
1522
        parent_keys.discard(_mod_revision.NULL_REVISION)
1523
        return parent_keys
1524
1525
    def fileids_altered_by_revision_ids(self, revision_ids, _inv_weave=None):
1526
        """Find the file ids and versions affected by revisions.
1527
1528
        :param revisions: an iterable containing revision ids.
1529
        :param _inv_weave: The inventory weave from this repository or None.
1530
            If None, the inventory weave will be opened automatically.
1531
        :return: a dictionary mapping altered file-ids to an iterable of
5891.1.3 by Andrew Bennetts
Move docstring formatting fixes.
1532
            revision_ids. Each altered file-ids has the exact revision_ids that
1533
            altered it listed explicitly.
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1534
        """
1535
        selected_keys = set((revid,) for revid in revision_ids)
1536
        w = _inv_weave or self.inventories
1537
        return self._find_file_ids_from_xml_inventory_lines(
1538
            w.iter_lines_added_or_present_in_keys(
1539
                selected_keys, pb=None),
1540
            selected_keys)
1541
1542
    def iter_files_bytes(self, desired_files):
1543
        """Iterate through file versions.
1544
1545
        Files will not necessarily be returned in the order they occur in
1546
        desired_files.  No specific order is guaranteed.
1547
1548
        Yields pairs of identifier, bytes_iterator.  identifier is an opaque
1549
        value supplied by the caller as part of desired_files.  It should
1550
        uniquely identify the file version in the caller's context.  (Examples:
1551
        an index number or a TreeTransform trans_id.)
1552
1553
        bytes_iterator is an iterable of bytestrings for the file.  The
1554
        kind of iterable and length of the bytestrings are unspecified, but for
1555
        this implementation, it is a list of bytes produced by
1556
        VersionedFile.get_record_stream().
1557
1558
        :param desired_files: a list of (file_id, revision_id, identifier)
1559
            triples
1560
        """
1561
        text_keys = {}
1562
        for file_id, revision_id, callable_data in desired_files:
1563
            text_keys[(file_id, revision_id)] = callable_data
1564
        for record in self.texts.get_record_stream(text_keys, 'unordered', True):
1565
            if record.storage_kind == 'absent':
6280.10.6 by Jelmer Vernooij
Convert to iter_files_bytes_bz2.
1566
                raise errors.RevisionNotPresent(record.key[1], record.key[0])
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1567
            yield text_keys[record.key], record.get_bytes_as('chunked')
1568
1569
    def _generate_text_key_index(self, text_key_references=None,
1570
        ancestors=None):
1571
        """Generate a new text key index for the repository.
1572
1573
        This is an expensive function that will take considerable time to run.
1574
1575
        :return: A dict mapping text keys ((file_id, revision_id) tuples) to a
1576
            list of parents, also text keys. When a given key has no parents,
1577
            the parents list will be [NULL_REVISION].
1578
        """
1579
        # All revisions, to find inventory parents.
1580
        if ancestors is None:
1581
            graph = self.get_graph()
1582
            ancestors = graph.get_parent_map(self.all_revision_ids())
1583
        if text_key_references is None:
1584
            text_key_references = self.find_text_key_references()
1585
        pb = ui.ui_factory.nested_progress_bar()
1586
        try:
1587
            return self._do_generate_text_key_index(ancestors,
1588
                text_key_references, pb)
1589
        finally:
1590
            pb.finished()
1591
1592
    def _do_generate_text_key_index(self, ancestors, text_key_references, pb):
1593
        """Helper for _generate_text_key_index to avoid deep nesting."""
1594
        revision_order = tsort.topo_sort(ancestors)
1595
        invalid_keys = set()
1596
        revision_keys = {}
1597
        for revision_id in revision_order:
1598
            revision_keys[revision_id] = set()
1599
        text_count = len(text_key_references)
1600
        # a cache of the text keys to allow reuse; costs a dict of all the
1601
        # keys, but saves a 2-tuple for every child of a given key.
1602
        text_key_cache = {}
1603
        for text_key, valid in text_key_references.iteritems():
1604
            if not valid:
1605
                invalid_keys.add(text_key)
1606
            else:
1607
                revision_keys[text_key[1]].add(text_key)
1608
            text_key_cache[text_key] = text_key
1609
        del text_key_references
1610
        text_index = {}
1611
        text_graph = graph.Graph(graph.DictParentsProvider(text_index))
1612
        NULL_REVISION = _mod_revision.NULL_REVISION
1613
        # Set a cache with a size of 10 - this suffices for bzr.dev but may be
1614
        # too small for large or very branchy trees. However, for 55K path
1615
        # trees, it would be easy to use too much memory trivially. Ideally we
1616
        # could gauge this by looking at available real memory etc, but this is
1617
        # always a tricky proposition.
1618
        inventory_cache = lru_cache.LRUCache(10)
1619
        batch_size = 10 # should be ~150MB on a 55K path tree
1620
        batch_count = len(revision_order) / batch_size + 1
1621
        processed_texts = 0
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
1622
        pb.update(gettext("Calculating text parents"), processed_texts, text_count)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1623
        for offset in xrange(batch_count):
1624
            to_query = revision_order[offset * batch_size:(offset + 1) *
1625
                batch_size]
1626
            if not to_query:
1627
                break
1628
            for revision_id in to_query:
1629
                parent_ids = ancestors[revision_id]
1630
                for text_key in revision_keys[revision_id]:
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
1631
                    pb.update(gettext("Calculating text parents"), processed_texts)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1632
                    processed_texts += 1
1633
                    candidate_parents = []
1634
                    for parent_id in parent_ids:
1635
                        parent_text_key = (text_key[0], parent_id)
1636
                        try:
1637
                            check_parent = parent_text_key not in \
1638
                                revision_keys[parent_id]
1639
                        except KeyError:
1640
                            # the parent parent_id is a ghost:
1641
                            check_parent = False
1642
                            # truncate the derived graph against this ghost.
1643
                            parent_text_key = None
1644
                        if check_parent:
1645
                            # look at the parent commit details inventories to
1646
                            # determine possible candidates in the per file graph.
1647
                            # TODO: cache here.
1648
                            try:
1649
                                inv = inventory_cache[parent_id]
1650
                            except KeyError:
6405.2.7 by Jelmer Vernooij
Fix more tests.
1651
                                inv = self.revision_tree(parent_id).root_inventory
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1652
                                inventory_cache[parent_id] = inv
1653
                            try:
1654
                                parent_entry = inv[text_key[0]]
1655
                            except (KeyError, errors.NoSuchId):
1656
                                parent_entry = None
1657
                            if parent_entry is not None:
1658
                                parent_text_key = (
1659
                                    text_key[0], parent_entry.revision)
1660
                            else:
1661
                                parent_text_key = None
1662
                        if parent_text_key is not None:
1663
                            candidate_parents.append(
1664
                                text_key_cache[parent_text_key])
1665
                    parent_heads = text_graph.heads(candidate_parents)
1666
                    new_parents = list(parent_heads)
1667
                    new_parents.sort(key=lambda x:candidate_parents.index(x))
1668
                    if new_parents == []:
1669
                        new_parents = [NULL_REVISION]
1670
                    text_index[text_key] = new_parents
1671
1672
        for text_key in invalid_keys:
1673
            text_index[text_key] = [NULL_REVISION]
1674
        return text_index
1675
1676
    def item_keys_introduced_by(self, revision_ids, _files_pb=None):
1677
        """Get an iterable listing the keys of all the data introduced by a set
1678
        of revision IDs.
1679
1680
        The keys will be ordered so that the corresponding items can be safely
1681
        fetched and inserted in that order.
1682
1683
        :returns: An iterable producing tuples of (knit-kind, file-id,
1684
            versions).  knit-kind is one of 'file', 'inventory', 'signatures',
1685
            'revisions'.  file-id is None unless knit-kind is 'file'.
1686
        """
1687
        for result in self._find_file_keys_to_fetch(revision_ids, _files_pb):
1688
            yield result
1689
        del _files_pb
1690
        for result in self._find_non_file_keys_to_fetch(revision_ids):
1691
            yield result
1692
1693
    def _find_file_keys_to_fetch(self, revision_ids, pb):
1694
        # XXX: it's a bit weird to control the inventory weave caching in this
1695
        # generator.  Ideally the caching would be done in fetch.py I think.  Or
1696
        # maybe this generator should explicitly have the contract that it
1697
        # should not be iterated until the previously yielded item has been
1698
        # processed?
1699
        inv_w = self.inventories
1700
1701
        # file ids that changed
1702
        file_ids = self.fileids_altered_by_revision_ids(revision_ids, inv_w)
1703
        count = 0
1704
        num_file_ids = len(file_ids)
1705
        for file_id, altered_versions in file_ids.iteritems():
1706
            if pb is not None:
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
1707
                pb.update(gettext("Fetch texts"), count, num_file_ids)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1708
            count += 1
1709
            yield ("file", file_id, altered_versions)
1710
1711
    def _find_non_file_keys_to_fetch(self, revision_ids):
1712
        # inventory
1713
        yield ("inventory", None, revision_ids)
1714
1715
        # signatures
1716
        # XXX: Note ATM no callers actually pay attention to this return
1717
        #      instead they just use the list of revision ids and ignore
1718
        #      missing sigs. Consider removing this work entirely
1719
        revisions_with_signatures = set(self.signatures.get_parent_map(
1720
            [(r,) for r in revision_ids]))
6619.3.12 by Jelmer Vernooij
Use 2to3 set_literal fixer.
1721
        revisions_with_signatures = {r for (r,) in revisions_with_signatures}
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1722
        revisions_with_signatures.intersection_update(revision_ids)
1723
        yield ("signatures", None, revisions_with_signatures)
1724
1725
        # revisions
1726
        yield ("revisions", None, revision_ids)
1727
1728
    @needs_read_lock
1729
    def get_inventory(self, revision_id):
1730
        """Get Inventory object by revision id."""
1731
        return self.iter_inventories([revision_id]).next()
1732
1733
    def iter_inventories(self, revision_ids, ordering=None):
1734
        """Get many inventories by revision_ids.
1735
1736
        This will buffer some or all of the texts used in constructing the
1737
        inventories in memory, but will only parse a single inventory at a
1738
        time.
1739
1740
        :param revision_ids: The expected revision ids of the inventories.
1741
        :param ordering: optional ordering, e.g. 'topological'.  If not
1742
            specified, the order of revision_ids will be preserved (by
1743
            buffering if necessary).
1744
        :return: An iterator of inventories.
1745
        """
1746
        if ((None in revision_ids)
1747
            or (_mod_revision.NULL_REVISION in revision_ids)):
1748
            raise ValueError('cannot get null revision inventory')
6282.6.8 by Jelmer Vernooij
Adjust _iter_inventories contract slightly, don't raise NoSuchRevision immediately
1749
        for inv, revid in self._iter_inventories(revision_ids, ordering):
1750
            if inv is None:
6282.6.9 by Jelmer Vernooij
More tests.
1751
                raise errors.NoSuchRevision(self, revid)
6282.6.8 by Jelmer Vernooij
Adjust _iter_inventories contract slightly, don't raise NoSuchRevision immediately
1752
            yield inv
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1753
1754
    def _iter_inventories(self, revision_ids, ordering):
1755
        """single-document based inventory iteration."""
1756
        inv_xmls = self._iter_inventory_xmls(revision_ids, ordering)
1757
        for text, revision_id in inv_xmls:
6282.6.8 by Jelmer Vernooij
Adjust _iter_inventories contract slightly, don't raise NoSuchRevision immediately
1758
            if text is None:
1759
                yield None, revision_id
1760
            else:
1761
                yield self._deserialise_inventory(revision_id, text), revision_id
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1762
1763
    def _iter_inventory_xmls(self, revision_ids, ordering):
1764
        if ordering is None:
1765
            order_as_requested = True
1766
            ordering = 'unordered'
1767
        else:
1768
            order_as_requested = False
1769
        keys = [(revision_id,) for revision_id in revision_ids]
1770
        if not keys:
1771
            return
1772
        if order_as_requested:
1773
            key_iter = iter(keys)
1774
            next_key = key_iter.next()
1775
        stream = self.inventories.get_record_stream(keys, ordering, True)
1776
        text_chunks = {}
1777
        for record in stream:
1778
            if record.storage_kind != 'absent':
1779
                chunks = record.get_bytes_as('chunked')
1780
                if order_as_requested:
1781
                    text_chunks[record.key] = chunks
1782
                else:
1783
                    yield ''.join(chunks), record.key[-1]
1784
            else:
6282.6.8 by Jelmer Vernooij
Adjust _iter_inventories contract slightly, don't raise NoSuchRevision immediately
1785
                yield None, record.key[-1]
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1786
            if order_as_requested:
1787
                # Yield as many results as we can while preserving order.
1788
                while next_key in text_chunks:
1789
                    chunks = text_chunks.pop(next_key)
1790
                    yield ''.join(chunks), next_key[-1]
1791
                    try:
1792
                        next_key = key_iter.next()
1793
                    except StopIteration:
1794
                        # We still want to fully consume the get_record_stream,
1795
                        # just in case it is not actually finished at this point
1796
                        next_key = None
1797
                        break
1798
1799
    def _deserialise_inventory(self, revision_id, xml):
1800
        """Transform the xml into an inventory object.
1801
1802
        :param revision_id: The expected revision id of the inventory.
1803
        :param xml: A serialised inventory.
1804
        """
1805
        result = self._serializer.read_inventory_from_string(xml, revision_id,
1806
                    entry_cache=self._inventory_entry_cache,
1807
                    return_from_cache=self._safe_to_return_from_cache)
1808
        if result.revision_id != revision_id:
1809
            raise AssertionError('revision id mismatch %s != %s' % (
1810
                result.revision_id, revision_id))
1811
        return result
1812
1813
    def get_serializer_format(self):
1814
        return self._serializer.format_num
1815
1816
    @needs_read_lock
1817
    def _get_inventory_xml(self, revision_id):
1818
        """Get serialized inventory as a string."""
1819
        texts = self._iter_inventory_xmls([revision_id], 'unordered')
6282.6.8 by Jelmer Vernooij
Adjust _iter_inventories contract slightly, don't raise NoSuchRevision immediately
1820
        text, revision_id = texts.next()
1821
        if text is None:
6282.6.19 by Jelmer Vernooij
More test fixes.
1822
            raise errors.NoSuchRevision(self, revision_id)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1823
        return text
1824
1825
    @needs_read_lock
1826
    def revision_tree(self, revision_id):
1827
        """Return Tree for a revision on this branch.
1828
1829
        `revision_id` may be NULL_REVISION for the empty tree revision.
1830
        """
1831
        revision_id = _mod_revision.ensure_null(revision_id)
1832
        # TODO: refactor this to use an existing revision object
1833
        # so we don't need to read it in twice.
1834
        if revision_id == _mod_revision.NULL_REVISION:
1835
            return InventoryRevisionTree(self,
1836
                Inventory(root_id=None), _mod_revision.NULL_REVISION)
1837
        else:
1838
            inv = self.get_inventory(revision_id)
1839
            return InventoryRevisionTree(self, inv, revision_id)
1840
1841
    def revision_trees(self, revision_ids):
1842
        """Return Trees for revisions in this repository.
1843
1844
        :param revision_ids: a sequence of revision-ids;
1845
          a revision-id may not be None or 'null:'
1846
        """
1847
        inventories = self.iter_inventories(revision_ids)
1848
        for inv in inventories:
1849
            yield InventoryRevisionTree(self, inv, inv.revision_id)
1850
1851
    def _filtered_revision_trees(self, revision_ids, file_ids):
1852
        """Return Tree for a revision on this branch with only some files.
1853
1854
        :param revision_ids: a sequence of revision-ids;
1855
          a revision-id may not be None or 'null:'
1856
        :param file_ids: if not None, the result is filtered
1857
          so that only those file-ids, their parents and their
1858
          children are included.
1859
        """
1860
        inventories = self.iter_inventories(revision_ids)
1861
        for inv in inventories:
1862
            # Should we introduce a FilteredRevisionTree class rather
1863
            # than pre-filter the inventory here?
1864
            filtered_inv = inv.filter(file_ids)
1865
            yield InventoryRevisionTree(self, filtered_inv, filtered_inv.revision_id)
1866
1867
    def get_parent_map(self, revision_ids):
1868
        """See graph.StackedParentsProvider.get_parent_map"""
1869
        # revisions index works in keys; this just works in revisions
1870
        # therefore wrap and unwrap
1871
        query_keys = []
1872
        result = {}
1873
        for revision_id in revision_ids:
1874
            if revision_id == _mod_revision.NULL_REVISION:
1875
                result[revision_id] = ()
1876
            elif revision_id is None:
1877
                raise ValueError('get_parent_map(None) is not valid')
1878
            else:
1879
                query_keys.append((revision_id ,))
1880
        for ((revision_id,), parent_keys) in \
1881
                self.revisions.get_parent_map(query_keys).iteritems():
1882
            if parent_keys:
1883
                result[revision_id] = tuple([parent_revid
1884
                    for (parent_revid,) in parent_keys])
1885
            else:
1886
                result[revision_id] = (_mod_revision.NULL_REVISION,)
1887
        return result
1888
1889
    @needs_read_lock
1890
    def get_known_graph_ancestry(self, revision_ids):
1891
        """Return the known graph for a set of revision ids and their ancestors.
1892
        """
1893
        st = static_tuple.StaticTuple
1894
        revision_keys = [st(r_id).intern() for r_id in revision_ids]
1895
        known_graph = self.revisions.get_known_graph_ancestry(revision_keys)
1896
        return graph.GraphThunkIdsToKeys(known_graph)
1897
5815.5.5 by Jelmer Vernooij
Add more tests.
1898
    @needs_read_lock
5815.5.8 by Jelmer Vernooij
Use traditional (fileid, revision) entries in file graph.
1899
    def get_file_graph(self):
1900
        """Return the graph walker for text revisions."""
1901
        return graph.Graph(self.texts)
5815.5.5 by Jelmer Vernooij
Add more tests.
1902
6341.1.4 by Jelmer Vernooij
Move more functionality to vf_search.
1903
    def revision_ids_to_search_result(self, result_set):
1904
        """Convert a set of revision ids to a graph SearchResult."""
1905
        result_parents = set()
1906
        for parents in self.get_graph().get_parent_map(
1907
            result_set).itervalues():
1908
            result_parents.update(parents)
1909
        included_keys = result_set.intersection(result_parents)
1910
        start_keys = result_set.difference(included_keys)
1911
        exclude_keys = result_parents.difference(result_set)
1912
        result = vf_search.SearchResult(start_keys, exclude_keys,
1913
            len(result_set), result_set)
1914
        return result
1915
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1916
    def _get_versioned_file_checker(self, text_key_references=None,
1917
        ancestors=None):
1918
        """Return an object suitable for checking versioned files.
1919
        
1920
        :param text_key_references: if non-None, an already built
1921
            dictionary mapping text keys ((fileid, revision_id) tuples)
1922
            to whether they were referred to by the inventory of the
1923
            revision_id that they contain. If None, this will be
1924
            calculated.
1925
        :param ancestors: Optional result from
1926
            self.get_graph().get_parent_map(self.all_revision_ids()) if already
1927
            available.
1928
        """
1929
        return _VersionedFileChecker(self,
1930
            text_key_references=text_key_references, ancestors=ancestors)
1931
1932
    @needs_read_lock
1933
    def has_signature_for_revision_id(self, revision_id):
1934
        """Query for a revision signature for revision_id in the repository."""
1935
        if not self.has_revision(revision_id):
1936
            raise errors.NoSuchRevision(self, revision_id)
1937
        sig_present = (1 == len(
1938
            self.signatures.get_parent_map([(revision_id,)])))
1939
        return sig_present
1940
1941
    @needs_read_lock
1942
    def get_signature_text(self, revision_id):
1943
        """Return the text for a signature."""
1944
        stream = self.signatures.get_record_stream([(revision_id,)],
1945
            'unordered', True)
1946
        record = stream.next()
1947
        if record.storage_kind == 'absent':
1948
            raise errors.NoSuchRevision(self, revision_id)
1949
        return record.get_bytes_as('fulltext')
1950
5850.1.6 by Jelmer Vernooij
Reintroduce double indirection, raise NotImplementedError from _check rather than
1951
    @needs_read_lock
1952
    def _check(self, revision_ids, callback_refs, check_repo):
5850.1.3 by Jelmer Vernooij
Add VersionedFileCheck.
1953
        result = check.VersionedFileCheck(self, check_repo=check_repo)
1954
        result.check(callback_refs)
1955
        return result
1956
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
1957
    def _find_inconsistent_revision_parents(self, revisions_iterator=None):
1958
        """Find revisions with different parent lists in the revision object
1959
        and in the index graph.
1960
1961
        :param revisions_iterator: None, or an iterator of (revid,
1962
            Revision-or-None). This iterator controls the revisions checked.
1963
        :returns: an iterator yielding tuples of (revison-id, parents-in-index,
1964
            parents-in-revision).
1965
        """
1966
        if not self.is_locked():
1967
            raise AssertionError()
1968
        vf = self.revisions
1969
        if revisions_iterator is None:
1970
            revisions_iterator = self._iter_revisions(None)
1971
        for revid, revision in revisions_iterator:
1972
            if revision is None:
1973
                pass
1974
            parent_map = vf.get_parent_map([(revid,)])
1975
            parents_according_to_index = tuple(parent[-1] for parent in
1976
                parent_map[(revid,)])
1977
            parents_according_to_revision = tuple(revision.parent_ids)
1978
            if parents_according_to_index != parents_according_to_revision:
1979
                yield (revid, parents_according_to_index,
1980
                    parents_according_to_revision)
1981
1982
    def _check_for_inconsistent_revision_parents(self):
1983
        inconsistencies = list(self._find_inconsistent_revision_parents())
1984
        if inconsistencies:
1985
            raise errors.BzrCheckError(
1986
                "Revision knit has inconsistent parents.")
1987
1988
    def _get_sink(self):
1989
        """Return a sink for streaming into this repository."""
1990
        return StreamSink(self)
1991
1992
    def _get_source(self, to_format):
1993
        """Return a source for streaming from this repository."""
1994
        return StreamSource(self, to_format)
1995
1996
1997
class MetaDirVersionedFileRepository(MetaDirRepository,
1998
                                     VersionedFileRepository):
1999
    """Repositories in a meta-dir, that work via versioned file objects."""
2000
2001
    def __init__(self, _format, a_bzrdir, control_files):
2002
        super(MetaDirVersionedFileRepository, self).__init__(_format, a_bzrdir,
2003
            control_files)
2004
2005
6349.2.1 by Jelmer Vernooij
Add BzrDirMetaComponentFormat.
2006
class MetaDirVersionedFileRepositoryFormat(RepositoryFormatMetaDir,
5815.4.5 by Jelmer Vernooij
Use MetaDirVersionedFileRepositoryFormat (a Soyuz worthy name).
2007
        VersionedFileRepositoryFormat):
2008
    """Base class for repository formats using versioned files in metadirs."""
2009
2010
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2011
class StreamSink(object):
2012
    """An object that can insert a stream into a repository.
2013
2014
    This interface handles the complexity of reserialising inventories and
2015
    revisions from different formats, and allows unidirectional insertion into
2016
    stacked repositories without looking for the missing basis parents
2017
    beforehand.
2018
    """
2019
2020
    def __init__(self, target_repo):
2021
        self.target_repo = target_repo
2022
2023
    def insert_stream(self, stream, src_format, resume_tokens):
2024
        """Insert a stream's content into the target repository.
2025
2026
        :param src_format: a bzr repository format.
2027
2028
        :return: a list of resume tokens and an  iterable of keys additional
2029
            items required before the insertion can be completed.
2030
        """
2031
        self.target_repo.lock_write()
2032
        try:
2033
            if resume_tokens:
2034
                self.target_repo.resume_write_group(resume_tokens)
2035
                is_resume = True
2036
            else:
2037
                self.target_repo.start_write_group()
2038
                is_resume = False
2039
            try:
2040
                # locked_insert_stream performs a commit|suspend.
2041
                missing_keys = self.insert_stream_without_locking(stream,
2042
                                    src_format, is_resume)
2043
                if missing_keys:
2044
                    # suspend the write group and tell the caller what we is
2045
                    # missing. We know we can suspend or else we would not have
2046
                    # entered this code path. (All repositories that can handle
2047
                    # missing keys can handle suspending a write group).
2048
                    write_group_tokens = self.target_repo.suspend_write_group()
2049
                    return write_group_tokens, missing_keys
2050
                hint = self.target_repo.commit_write_group()
2051
                to_serializer = self.target_repo._format._serializer
2052
                src_serializer = src_format._serializer
2053
                if (to_serializer != src_serializer and
2054
                    self.target_repo._format.pack_compresses):
2055
                    self.target_repo.pack(hint=hint)
2056
                return [], set()
2057
            except:
2058
                self.target_repo.abort_write_group(suppress_errors=True)
2059
                raise
2060
        finally:
2061
            self.target_repo.unlock()
2062
2063
    def insert_stream_without_locking(self, stream, src_format,
2064
                                      is_resume=False):
2065
        """Insert a stream's content into the target repository.
2066
2067
        This assumes that you already have a locked repository and an active
2068
        write group.
2069
2070
        :param src_format: a bzr repository format.
2071
        :param is_resume: Passed down to get_missing_parent_inventories to
2072
            indicate if we should be checking for missing texts at the same
2073
            time.
2074
2075
        :return: A set of keys that are missing.
2076
        """
2077
        if not self.target_repo.is_write_locked():
2078
            raise errors.ObjectNotLocked(self)
2079
        if not self.target_repo.is_in_write_group():
2080
            raise errors.BzrError('you must already be in a write group')
2081
        to_serializer = self.target_repo._format._serializer
2082
        src_serializer = src_format._serializer
2083
        new_pack = None
2084
        if to_serializer == src_serializer:
2085
            # If serializers match and the target is a pack repository, set the
2086
            # write cache size on the new pack.  This avoids poor performance
2087
            # on transports where append is unbuffered (such as
2088
            # RemoteTransport).  This is safe to do because nothing should read
2089
            # back from the target repository while a stream with matching
2090
            # serialization is being inserted.
2091
            # The exception is that a delta record from the source that should
2092
            # be a fulltext may need to be expanded by the target (see
2093
            # test_fetch_revisions_with_deltas_into_pack); but we take care to
2094
            # explicitly flush any buffered writes first in that rare case.
2095
            try:
2096
                new_pack = self.target_repo._pack_collection._new_pack
2097
            except AttributeError:
2098
                # Not a pack repository
2099
                pass
2100
            else:
2101
                new_pack.set_write_cache_size(1024*1024)
2102
        for substream_type, substream in stream:
2103
            if 'stream' in debug.debug_flags:
2104
                mutter('inserting substream: %s', substream_type)
2105
            if substream_type == 'texts':
2106
                self.target_repo.texts.insert_record_stream(substream)
2107
            elif substream_type == 'inventories':
2108
                if src_serializer == to_serializer:
2109
                    self.target_repo.inventories.insert_record_stream(
2110
                        substream)
2111
                else:
2112
                    self._extract_and_insert_inventories(
2113
                        substream, src_serializer)
2114
            elif substream_type == 'inventory-deltas':
2115
                self._extract_and_insert_inventory_deltas(
2116
                    substream, src_serializer)
2117
            elif substream_type == 'chk_bytes':
2118
                # XXX: This doesn't support conversions, as it assumes the
2119
                #      conversion was done in the fetch code.
2120
                self.target_repo.chk_bytes.insert_record_stream(substream)
2121
            elif substream_type == 'revisions':
2122
                # This may fallback to extract-and-insert more often than
2123
                # required if the serializers are different only in terms of
2124
                # the inventory.
2125
                if src_serializer == to_serializer:
2126
                    self.target_repo.revisions.insert_record_stream(substream)
2127
                else:
2128
                    self._extract_and_insert_revisions(substream,
2129
                        src_serializer)
2130
            elif substream_type == 'signatures':
2131
                self.target_repo.signatures.insert_record_stream(substream)
2132
            else:
2133
                raise AssertionError('kaboom! %s' % (substream_type,))
2134
        # Done inserting data, and the missing_keys calculations will try to
2135
        # read back from the inserted data, so flush the writes to the new pack
2136
        # (if this is pack format).
2137
        if new_pack is not None:
2138
            new_pack._write_data('', flush=True)
2139
        # Find all the new revisions (including ones from resume_tokens)
2140
        missing_keys = self.target_repo.get_missing_parent_inventories(
2141
            check_for_missing_texts=is_resume)
2142
        try:
2143
            for prefix, versioned_file in (
2144
                ('texts', self.target_repo.texts),
2145
                ('inventories', self.target_repo.inventories),
2146
                ('revisions', self.target_repo.revisions),
2147
                ('signatures', self.target_repo.signatures),
2148
                ('chk_bytes', self.target_repo.chk_bytes),
2149
                ):
2150
                if versioned_file is None:
2151
                    continue
2152
                # TODO: key is often going to be a StaticTuple object
2153
                #       I don't believe we can define a method by which
2154
                #       (prefix,) + StaticTuple will work, though we could
2155
                #       define a StaticTuple.sq_concat that would allow you to
2156
                #       pass in either a tuple or a StaticTuple as the second
2157
                #       object, so instead we could have:
2158
                #       StaticTuple(prefix) + key here...
2159
                missing_keys.update((prefix,) + key for key in
2160
                    versioned_file.get_missing_compression_parent_keys())
2161
        except NotImplementedError:
2162
            # cannot even attempt suspending, and missing would have failed
2163
            # during stream insertion.
2164
            missing_keys = set()
2165
        return missing_keys
2166
2167
    def _extract_and_insert_inventory_deltas(self, substream, serializer):
2168
        target_rich_root = self.target_repo._format.rich_root_data
2169
        target_tree_refs = self.target_repo._format.supports_tree_reference
2170
        for record in substream:
2171
            # Insert the delta directly
2172
            inventory_delta_bytes = record.get_bytes_as('fulltext')
2173
            deserialiser = inventory_delta.InventoryDeltaDeserializer()
2174
            try:
2175
                parse_result = deserialiser.parse_text_bytes(
2176
                    inventory_delta_bytes)
6619.3.2 by Jelmer Vernooij
Apply 2to3 except fix.
2177
            except inventory_delta.IncompatibleInventoryDelta as err:
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2178
                mutter("Incompatible delta: %s", err.msg)
2179
                raise errors.IncompatibleRevision(self.target_repo._format)
2180
            basis_id, new_id, rich_root, tree_refs, inv_delta = parse_result
2181
            revision_id = new_id
2182
            parents = [key[0] for key in record.parents]
2183
            self.target_repo.add_inventory_by_delta(
2184
                basis_id, inv_delta, revision_id, parents)
2185
2186
    def _extract_and_insert_inventories(self, substream, serializer,
2187
            parse_delta=None):
2188
        """Generate a new inventory versionedfile in target, converting data.
2189
2190
        The inventory is retrieved from the source, (deserializing it), and
2191
        stored in the target (reserializing it in a different format).
2192
        """
2193
        target_rich_root = self.target_repo._format.rich_root_data
2194
        target_tree_refs = self.target_repo._format.supports_tree_reference
2195
        for record in substream:
2196
            # It's not a delta, so it must be a fulltext in the source
2197
            # serializer's format.
2198
            bytes = record.get_bytes_as('fulltext')
2199
            revision_id = record.key[0]
2200
            inv = serializer.read_inventory_from_string(bytes, revision_id)
2201
            parents = [key[0] for key in record.parents]
2202
            self.target_repo.add_inventory(revision_id, inv, parents)
2203
            # No need to keep holding this full inv in memory when the rest of
2204
            # the substream is likely to be all deltas.
2205
            del inv
2206
2207
    def _extract_and_insert_revisions(self, substream, serializer):
2208
        for record in substream:
2209
            bytes = record.get_bytes_as('fulltext')
2210
            revision_id = record.key[0]
2211
            rev = serializer.read_revision_from_string(bytes)
2212
            if rev.revision_id != revision_id:
2213
                raise AssertionError('wtf: %s != %s' % (rev, revision_id))
2214
            self.target_repo.add_revision(revision_id, rev)
2215
2216
    def finished(self):
2217
        if self.target_repo._format._fetch_reconcile:
2218
            self.target_repo.reconcile()
2219
2220
2221
class StreamSource(object):
2222
    """A source of a stream for fetching between repositories."""
2223
2224
    def __init__(self, from_repository, to_format):
2225
        """Create a StreamSource streaming from from_repository."""
2226
        self.from_repository = from_repository
2227
        self.to_format = to_format
2228
        self._record_counter = RecordCounter()
2229
2230
    def delta_on_metadata(self):
2231
        """Return True if delta's are permitted on metadata streams.
2232
2233
        That is on revisions and signatures.
2234
        """
2235
        src_serializer = self.from_repository._format._serializer
2236
        target_serializer = self.to_format._serializer
2237
        return (self.to_format._fetch_uses_deltas and
2238
            src_serializer == target_serializer)
2239
2240
    def _fetch_revision_texts(self, revs):
2241
        # fetch signatures first and then the revision texts
2242
        # may need to be a InterRevisionStore call here.
2243
        from_sf = self.from_repository.signatures
2244
        # A missing signature is just skipped.
2245
        keys = [(rev_id,) for rev_id in revs]
2246
        signatures = versionedfile.filter_absent(from_sf.get_record_stream(
2247
            keys,
2248
            self.to_format._fetch_order,
2249
            not self.to_format._fetch_uses_deltas))
2250
        # If a revision has a delta, this is actually expanded inside the
2251
        # insert_record_stream code now, which is an alternate fix for
2252
        # bug #261339
2253
        from_rf = self.from_repository.revisions
2254
        revisions = from_rf.get_record_stream(
2255
            keys,
2256
            self.to_format._fetch_order,
2257
            not self.delta_on_metadata())
2258
        return [('signatures', signatures), ('revisions', revisions)]
2259
2260
    def _generate_root_texts(self, revs):
2261
        """This will be called by get_stream between fetching weave texts and
2262
        fetching the inventory weave.
2263
        """
2264
        if self._rich_root_upgrade():
2265
            return _mod_fetch.Inter1and2Helper(
2266
                self.from_repository).generate_root_texts(revs)
2267
        else:
2268
            return []
2269
2270
    def get_stream(self, search):
2271
        phase = 'file'
2272
        revs = search.get_keys()
2273
        graph = self.from_repository.get_graph()
2274
        revs = tsort.topo_sort(graph.get_parent_map(revs))
2275
        data_to_fetch = self.from_repository.item_keys_introduced_by(revs)
2276
        text_keys = []
2277
        for knit_kind, file_id, revisions in data_to_fetch:
2278
            if knit_kind != phase:
2279
                phase = knit_kind
2280
                # Make a new progress bar for this phase
2281
            if knit_kind == "file":
2282
                # Accumulate file texts
2283
                text_keys.extend([(file_id, revision) for revision in
2284
                    revisions])
2285
            elif knit_kind == "inventory":
2286
                # Now copy the file texts.
2287
                from_texts = self.from_repository.texts
2288
                yield ('texts', from_texts.get_record_stream(
2289
                    text_keys, self.to_format._fetch_order,
2290
                    not self.to_format._fetch_uses_deltas))
2291
                # Cause an error if a text occurs after we have done the
2292
                # copy.
2293
                text_keys = None
2294
                # Before we process the inventory we generate the root
2295
                # texts (if necessary) so that the inventories references
2296
                # will be valid.
2297
                for _ in self._generate_root_texts(revs):
2298
                    yield _
2299
                # we fetch only the referenced inventories because we do not
2300
                # know for unselected inventories whether all their required
2301
                # texts are present in the other repository - it could be
2302
                # corrupt.
2303
                for info in self._get_inventory_stream(revs):
2304
                    yield info
2305
            elif knit_kind == "signatures":
2306
                # Nothing to do here; this will be taken care of when
2307
                # _fetch_revision_texts happens.
2308
                pass
2309
            elif knit_kind == "revisions":
2310
                for record in self._fetch_revision_texts(revs):
2311
                    yield record
2312
            else:
2313
                raise AssertionError("Unknown knit kind %r" % knit_kind)
2314
2315
    def get_stream_for_missing_keys(self, missing_keys):
2316
        # missing keys can only occur when we are byte copying and not
2317
        # translating (because translation means we don't send
2318
        # unreconstructable deltas ever).
2319
        keys = {}
2320
        keys['texts'] = set()
2321
        keys['revisions'] = set()
2322
        keys['inventories'] = set()
2323
        keys['chk_bytes'] = set()
2324
        keys['signatures'] = set()
2325
        for key in missing_keys:
2326
            keys[key[0]].add(key[1:])
2327
        if len(keys['revisions']):
2328
            # If we allowed copying revisions at this point, we could end up
2329
            # copying a revision without copying its required texts: a
2330
            # violation of the requirements for repository integrity.
2331
            raise AssertionError(
2332
                'cannot copy revisions to fill in missing deltas %s' % (
2333
                    keys['revisions'],))
2334
        for substream_kind, keys in keys.iteritems():
2335
            vf = getattr(self.from_repository, substream_kind)
2336
            if vf is None and keys:
2337
                    raise AssertionError(
2338
                        "cannot fill in keys for a versioned file we don't"
2339
                        " have: %s needs %s" % (substream_kind, keys))
2340
            if not keys:
2341
                # No need to stream something we don't have
2342
                continue
2343
            if substream_kind == 'inventories':
2344
                # Some missing keys are genuinely ghosts, filter those out.
2345
                present = self.from_repository.inventories.get_parent_map(keys)
2346
                revs = [key[0] for key in present]
2347
                # Get the inventory stream more-or-less as we do for the
2348
                # original stream; there's no reason to assume that records
2349
                # direct from the source will be suitable for the sink.  (Think
2350
                # e.g. 2a -> 1.9-rich-root).
2351
                for info in self._get_inventory_stream(revs, missing=True):
2352
                    yield info
2353
                continue
2354
2355
            # Ask for full texts always so that we don't need more round trips
2356
            # after this stream.
2357
            # Some of the missing keys are genuinely ghosts, so filter absent
2358
            # records. The Sink is responsible for doing another check to
2359
            # ensure that ghosts don't introduce missing data for future
2360
            # fetches.
2361
            stream = versionedfile.filter_absent(vf.get_record_stream(keys,
2362
                self.to_format._fetch_order, True))
2363
            yield substream_kind, stream
2364
2365
    def inventory_fetch_order(self):
2366
        if self._rich_root_upgrade():
2367
            return 'topological'
2368
        else:
2369
            return self.to_format._fetch_order
2370
2371
    def _rich_root_upgrade(self):
2372
        return (not self.from_repository._format.rich_root_data and
2373
            self.to_format.rich_root_data)
2374
2375
    def _get_inventory_stream(self, revision_ids, missing=False):
2376
        from_format = self.from_repository._format
2377
        if (from_format.supports_chks and self.to_format.supports_chks and
2378
            from_format.network_name() == self.to_format.network_name()):
2379
            raise AssertionError(
2380
                "this case should be handled by GroupCHKStreamSource")
2381
        elif 'forceinvdeltas' in debug.debug_flags:
2382
            return self._get_convertable_inventory_stream(revision_ids,
2383
                    delta_versus_null=missing)
2384
        elif from_format.network_name() == self.to_format.network_name():
2385
            # Same format.
2386
            return self._get_simple_inventory_stream(revision_ids,
2387
                    missing=missing)
2388
        elif (not from_format.supports_chks and not self.to_format.supports_chks
2389
                and from_format._serializer == self.to_format._serializer):
2390
            # Essentially the same format.
2391
            return self._get_simple_inventory_stream(revision_ids,
2392
                    missing=missing)
2393
        else:
2394
            # Any time we switch serializations, we want to use an
2395
            # inventory-delta based approach.
2396
            return self._get_convertable_inventory_stream(revision_ids,
2397
                    delta_versus_null=missing)
2398
2399
    def _get_simple_inventory_stream(self, revision_ids, missing=False):
2400
        # NB: This currently reopens the inventory weave in source;
2401
        # using a single stream interface instead would avoid this.
2402
        from_weave = self.from_repository.inventories
2403
        if missing:
2404
            delta_closure = True
2405
        else:
2406
            delta_closure = not self.delta_on_metadata()
2407
        yield ('inventories', from_weave.get_record_stream(
2408
            [(rev_id,) for rev_id in revision_ids],
2409
            self.inventory_fetch_order(), delta_closure))
2410
2411
    def _get_convertable_inventory_stream(self, revision_ids,
2412
                                          delta_versus_null=False):
2413
        # The two formats are sufficiently different that there is no fast
2414
        # path, so we need to send just inventorydeltas, which any
2415
        # sufficiently modern client can insert into any repository.
2416
        # The StreamSink code expects to be able to
2417
        # convert on the target, so we need to put bytes-on-the-wire that can
2418
        # be converted.  That means inventory deltas (if the remote is <1.19,
2419
        # RemoteStreamSink will fallback to VFS to insert the deltas).
2420
        yield ('inventory-deltas',
2421
           self._stream_invs_as_deltas(revision_ids,
2422
                                       delta_versus_null=delta_versus_null))
2423
2424
    def _stream_invs_as_deltas(self, revision_ids, delta_versus_null=False):
2425
        """Return a stream of inventory-deltas for the given rev ids.
2426
2427
        :param revision_ids: The list of inventories to transmit
2428
        :param delta_versus_null: Don't try to find a minimal delta for this
2429
            entry, instead compute the delta versus the NULL_REVISION. This
2430
            effectively streams a complete inventory. Used for stuff like
2431
            filling in missing parents, etc.
2432
        """
2433
        from_repo = self.from_repository
2434
        revision_keys = [(rev_id,) for rev_id in revision_ids]
2435
        parent_map = from_repo.inventories.get_parent_map(revision_keys)
2436
        # XXX: possibly repos could implement a more efficient iter_inv_deltas
2437
        # method...
2438
        inventories = self.from_repository.iter_inventories(
2439
            revision_ids, 'topological')
2440
        format = from_repo._format
6619.3.12 by Jelmer Vernooij
Use 2to3 set_literal fixer.
2441
        invs_sent_so_far = {_mod_revision.NULL_REVISION}
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2442
        inventory_cache = lru_cache.LRUCache(50)
2443
        null_inventory = from_repo.revision_tree(
6405.2.6 by Jelmer Vernooij
Lots of test fixes.
2444
            _mod_revision.NULL_REVISION).root_inventory
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2445
        # XXX: ideally the rich-root/tree-refs flags would be per-revision, not
2446
        # per-repo (e.g.  streaming a non-rich-root revision out of a rich-root
2447
        # repo back into a non-rich-root repo ought to be allowed)
2448
        serializer = inventory_delta.InventoryDeltaSerializer(
2449
            versioned_root=format.rich_root_data,
2450
            tree_references=format.supports_tree_reference)
2451
        for inv in inventories:
2452
            key = (inv.revision_id,)
2453
            parent_keys = parent_map.get(key, ())
2454
            delta = None
2455
            if not delta_versus_null and parent_keys:
2456
                # The caller did not ask for complete inventories and we have
2457
                # some parents that we can delta against.  Make a delta against
2458
                # each parent so that we can find the smallest.
2459
                parent_ids = [parent_key[0] for parent_key in parent_keys]
2460
                for parent_id in parent_ids:
2461
                    if parent_id not in invs_sent_so_far:
2462
                        # We don't know that the remote side has this basis, so
2463
                        # we can't use it.
2464
                        continue
2465
                    if parent_id == _mod_revision.NULL_REVISION:
2466
                        parent_inv = null_inventory
2467
                    else:
2468
                        parent_inv = inventory_cache.get(parent_id, None)
2469
                        if parent_inv is None:
2470
                            parent_inv = from_repo.get_inventory(parent_id)
2471
                    candidate_delta = inv._make_delta(parent_inv)
2472
                    if (delta is None or
2473
                        len(delta) > len(candidate_delta)):
2474
                        delta = candidate_delta
2475
                        basis_id = parent_id
2476
            if delta is None:
2477
                # Either none of the parents ended up being suitable, or we
2478
                # were asked to delta against NULL
2479
                basis_id = _mod_revision.NULL_REVISION
2480
                delta = inv._make_delta(null_inventory)
2481
            invs_sent_so_far.add(inv.revision_id)
2482
            inventory_cache[inv.revision_id] = inv
2483
            delta_serialized = ''.join(
2484
                serializer.delta_to_lines(basis_id, key[-1], delta))
2485
            yield versionedfile.FulltextContentFactory(
2486
                key, parent_keys, None, delta_serialized)
2487
2488
2489
class _VersionedFileChecker(object):
2490
2491
    def __init__(self, repository, text_key_references=None, ancestors=None):
2492
        self.repository = repository
2493
        self.text_index = self.repository._generate_text_key_index(
2494
            text_key_references=text_key_references, ancestors=ancestors)
2495
2496
    def calculate_file_version_parents(self, text_key):
2497
        """Calculate the correct parents for a file version according to
2498
        the inventories.
2499
        """
2500
        parent_keys = self.text_index[text_key]
2501
        if parent_keys == [_mod_revision.NULL_REVISION]:
2502
            return ()
2503
        return tuple(parent_keys)
2504
2505
    def check_file_version_parents(self, texts, progress_bar=None):
2506
        """Check the parents stored in a versioned file are correct.
2507
2508
        It also detects file versions that are not referenced by their
2509
        corresponding revision's inventory.
2510
2511
        :returns: A tuple of (wrong_parents, dangling_file_versions).
2512
            wrong_parents is a dict mapping {revision_id: (stored_parents,
2513
            correct_parents)} for each revision_id where the stored parents
2514
            are not correct.  dangling_file_versions is a set of (file_id,
2515
            revision_id) tuples for versions that are present in this versioned
2516
            file, but not used by the corresponding inventory.
2517
        """
2518
        local_progress = None
2519
        if progress_bar is None:
2520
            local_progress = ui.ui_factory.nested_progress_bar()
2521
            progress_bar = local_progress
2522
        try:
2523
            return self._check_file_version_parents(texts, progress_bar)
2524
        finally:
2525
            if local_progress:
2526
                local_progress.finished()
2527
2528
    def _check_file_version_parents(self, texts, progress_bar):
2529
        """See check_file_version_parents."""
2530
        wrong_parents = {}
6619.3.12 by Jelmer Vernooij
Use 2to3 set_literal fixer.
2531
        self.file_ids = {file_id for file_id, _ in
2532
            self.text_index.iterkeys()}
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2533
        # text keys is now grouped by file_id
2534
        n_versions = len(self.text_index)
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
2535
        progress_bar.update(gettext('loading text store'), 0, n_versions)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2536
        parent_map = self.repository.texts.get_parent_map(self.text_index)
2537
        # On unlistable transports this could well be empty/error...
2538
        text_keys = self.repository.texts.keys()
2539
        unused_keys = frozenset(text_keys) - set(self.text_index)
2540
        for num, key in enumerate(self.text_index.iterkeys()):
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
2541
            progress_bar.update(gettext('checking text graph'), num, n_versions)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2542
            correct_parents = self.calculate_file_version_parents(key)
2543
            try:
2544
                knit_parents = parent_map[key]
2545
            except errors.RevisionNotPresent:
2546
                # Missing text!
2547
                knit_parents = None
2548
            if correct_parents != knit_parents:
2549
                wrong_parents[key] = (knit_parents, correct_parents)
2550
        return wrong_parents, unused_keys
2551
2552
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2553
class InterVersionedFileRepository(InterRepository):
2554
2555
    _walk_to_common_revisions_batch_size = 50
2556
6341.1.1 by Jelmer Vernooij
Make fetch_spec specific to bzr format repositories.
2557
    supports_fetch_spec = True
2558
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2559
    @needs_write_lock
2560
    def fetch(self, revision_id=None, find_ghosts=False,
2561
            fetch_spec=None):
2562
        """Fetch the content required to construct revision_id.
2563
2564
        The content is copied from self.source to self.target.
2565
2566
        :param revision_id: if None all content is copied, if NULL_REVISION no
2567
                            content is copied.
2568
        :return: None.
2569
        """
6047.1.1 by Jelmer Vernooij
Use show_user_warning rather than custom warning methods on UIFactory.
2570
        if self.target._format.experimental:
2571
            ui.ui_factory.show_user_warning('experimental_format_fetch',
2572
                from_format=self.source._format,
2573
                to_format=self.target._format)
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2574
        from bzrlib.fetch import RepoFetcher
2575
        # See <https://launchpad.net/bugs/456077> asking for a warning here
2576
        if self.source._format.network_name() != self.target._format.network_name():
2577
            ui.ui_factory.show_user_warning('cross_format_fetch',
2578
                from_format=self.source._format,
2579
                to_format=self.target._format)
2580
        f = RepoFetcher(to_repository=self.target,
2581
                               from_repository=self.source,
2582
                               last_revision=revision_id,
2583
                               fetch_spec=fetch_spec,
2584
                               find_ghosts=find_ghosts)
2585
2586
    def _walk_to_common_revisions(self, revision_ids, if_present_ids=None):
2587
        """Walk out from revision_ids in source to revisions target has.
2588
2589
        :param revision_ids: The start point for the search.
2590
        :return: A set of revision ids.
2591
        """
2592
        target_graph = self.target.get_graph()
2593
        revision_ids = frozenset(revision_ids)
2594
        if if_present_ids:
2595
            all_wanted_revs = revision_ids.union(if_present_ids)
2596
        else:
2597
            all_wanted_revs = revision_ids
2598
        missing_revs = set()
2599
        source_graph = self.source.get_graph()
2600
        # ensure we don't pay silly lookup costs.
2601
        searcher = source_graph._make_breadth_first_searcher(all_wanted_revs)
2602
        null_set = frozenset([_mod_revision.NULL_REVISION])
2603
        searcher_exhausted = False
2604
        while True:
2605
            next_revs = set()
2606
            ghosts = set()
2607
            # Iterate the searcher until we have enough next_revs
2608
            while len(next_revs) < self._walk_to_common_revisions_batch_size:
2609
                try:
2610
                    next_revs_part, ghosts_part = searcher.next_with_ghosts()
2611
                    next_revs.update(next_revs_part)
2612
                    ghosts.update(ghosts_part)
2613
                except StopIteration:
2614
                    searcher_exhausted = True
2615
                    break
2616
            # If there are ghosts in the source graph, and the caller asked for
2617
            # them, make sure that they are present in the target.
2618
            # We don't care about other ghosts as we can't fetch them and
2619
            # haven't been asked to.
2620
            ghosts_to_check = set(revision_ids.intersection(ghosts))
2621
            revs_to_get = set(next_revs).union(ghosts_to_check)
2622
            if revs_to_get:
2623
                have_revs = set(target_graph.get_parent_map(revs_to_get))
2624
                # we always have NULL_REVISION present.
2625
                have_revs = have_revs.union(null_set)
2626
                # Check if the target is missing any ghosts we need.
2627
                ghosts_to_check.difference_update(have_revs)
2628
                if ghosts_to_check:
2629
                    # One of the caller's revision_ids is a ghost in both the
2630
                    # source and the target.
2631
                    raise errors.NoSuchRevision(
2632
                        self.source, ghosts_to_check.pop())
2633
                missing_revs.update(next_revs - have_revs)
2634
                # Because we may have walked past the original stop point, make
2635
                # sure everything is stopped
2636
                stop_revs = searcher.find_seen_ancestors(have_revs)
2637
                searcher.stop_searching_any(stop_revs)
2638
            if searcher_exhausted:
2639
                break
6341.1.5 by Jelmer Vernooij
Fix get_state().
2640
        (started_keys, excludes, included_keys) = searcher.get_state()
2641
        return vf_search.SearchResult(started_keys, excludes,
2642
            len(included_keys), included_keys)
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2643
2644
    @needs_read_lock
2645
    def search_missing_revision_ids(self,
2646
            revision_id=symbol_versioning.DEPRECATED_PARAMETER,
5852.1.7 by Jelmer Vernooij
merge bzr.dev.
2647
            find_ghosts=True, revision_ids=None, if_present_ids=None,
2648
            limit=None):
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2649
        """Return the revision ids that source has that target does not.
2650
2651
        :param revision_id: only return revision ids included by this
2652
            revision_id.
2653
        :param revision_ids: return revision ids included by these
2654
            revision_ids.  NoSuchRevision will be raised if any of these
2655
            revisions are not present.
2656
        :param if_present_ids: like revision_ids, but will not cause
2657
            NoSuchRevision if any of these are absent, instead they will simply
2658
            not be in the result.  This is useful for e.g. finding revisions
2659
            to fetch for tags, which may reference absent revisions.
2660
        :param find_ghosts: If True find missing revisions in deep history
2661
            rather than just finding the surface difference.
2662
        :return: A bzrlib.graph.SearchResult.
2663
        """
2664
        if symbol_versioning.deprecated_passed(revision_id):
2665
            symbol_versioning.warn(
2666
                'search_missing_revision_ids(revision_id=...) was '
2667
                'deprecated in 2.4.  Use revision_ids=[...] instead.',
2668
                DeprecationWarning, stacklevel=2)
2669
            if revision_ids is not None:
2670
                raise AssertionError(
2671
                    'revision_ids is mutually exclusive with revision_id')
2672
            if revision_id is not None:
2673
                revision_ids = [revision_id]
2674
        del revision_id
2675
        # stop searching at found target revisions.
2676
        if not find_ghosts and (revision_ids is not None or if_present_ids is
2677
                not None):
5852.1.7 by Jelmer Vernooij
merge bzr.dev.
2678
            result = self._walk_to_common_revisions(revision_ids,
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2679
                    if_present_ids=if_present_ids)
5852.1.8 by Jelmer Vernooij
Simplify revision limiting.
2680
            if limit is None:
2681
                return result
5852.1.7 by Jelmer Vernooij
merge bzr.dev.
2682
            result_set = result.get_keys()
2683
        else:
2684
            # generic, possibly worst case, slow code path.
2685
            target_ids = set(self.target.all_revision_ids())
2686
            source_ids = self._present_source_revisions_for(
2687
                revision_ids, if_present_ids)
2688
            result_set = set(source_ids).difference(target_ids)
2689
        if limit is not None:
5852.1.8 by Jelmer Vernooij
Simplify revision limiting.
2690
            topo_ordered = self.source.get_graph().iter_topo_order(result_set)
2691
            result_set = set(itertools.islice(topo_ordered, limit))
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
2692
        return self.source.revision_ids_to_search_result(result_set)
2693
2694
    def _present_source_revisions_for(self, revision_ids, if_present_ids=None):
2695
        """Returns set of all revisions in ancestry of revision_ids present in
2696
        the source repo.
2697
2698
        :param revision_ids: if None, all revisions in source are returned.
2699
        :param if_present_ids: like revision_ids, but if any/all of these are
2700
            absent no error is raised.
2701
        """
2702
        if revision_ids is not None or if_present_ids is not None:
2703
            # First, ensure all specified revisions exist.  Callers expect
2704
            # NoSuchRevision when they pass absent revision_ids here.
2705
            if revision_ids is None:
2706
                revision_ids = set()
2707
            if if_present_ids is None:
2708
                if_present_ids = set()
2709
            revision_ids = set(revision_ids)
2710
            if_present_ids = set(if_present_ids)
2711
            all_wanted_ids = revision_ids.union(if_present_ids)
2712
            graph = self.source.get_graph()
2713
            present_revs = set(graph.get_parent_map(all_wanted_ids))
2714
            missing = revision_ids.difference(present_revs)
2715
            if missing:
2716
                raise errors.NoSuchRevision(self.source, missing.pop())
2717
            found_ids = all_wanted_ids.intersection(present_revs)
2718
            source_ids = [rev_id for (rev_id, parents) in
2719
                          graph.iter_ancestry(found_ids)
2720
                          if rev_id != _mod_revision.NULL_REVISION
2721
                          and parents is not None]
2722
        else:
2723
            source_ids = self.source.all_revision_ids()
2724
        return set(source_ids)
2725
2726
    @classmethod
2727
    def _get_repo_format_to_test(self):
2728
        return None
2729
2730
    @classmethod
2731
    def is_compatible(cls, source, target):
2732
        # The default implementation is compatible with everything
2733
        return (source._format.supports_full_versioned_files and
2734
                target._format.supports_full_versioned_files)
2735
2736
2737
class InterDifferingSerializer(InterVersionedFileRepository):
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2738
2739
    @classmethod
2740
    def _get_repo_format_to_test(self):
2741
        return None
2742
2743
    @staticmethod
2744
    def is_compatible(source, target):
5815.4.19 by Jelmer Vernooij
Fix test failures.
2745
        if not source._format.supports_full_versioned_files:
2746
            return False
2747
        if not target._format.supports_full_versioned_files:
2748
            return False
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2749
        # This is redundant with format.check_conversion_target(), however that
2750
        # raises an exception, and we just want to say "False" as in we won't
2751
        # support converting between these formats.
2752
        if 'IDS_never' in debug.debug_flags:
2753
            return False
2754
        if source.supports_rich_root() and not target.supports_rich_root():
2755
            return False
2756
        if (source._format.supports_tree_reference
2757
            and not target._format.supports_tree_reference):
2758
            return False
2759
        if target._fallback_repositories and target._format.supports_chks:
2760
            # IDS doesn't know how to copy CHKs for the parent inventories it
2761
            # adds to stacked repos.
2762
            return False
2763
        if 'IDS_always' in debug.debug_flags:
2764
            return True
2765
        # Only use this code path for local source and target.  IDS does far
2766
        # too much IO (both bandwidth and roundtrips) over a network.
2767
        if not source.bzrdir.transport.base.startswith('file:///'):
2768
            return False
2769
        if not target.bzrdir.transport.base.startswith('file:///'):
2770
            return False
2771
        return True
2772
2773
    def _get_trees(self, revision_ids, cache):
2774
        possible_trees = []
2775
        for rev_id in revision_ids:
2776
            if rev_id in cache:
2777
                possible_trees.append((rev_id, cache[rev_id]))
2778
            else:
2779
                # Not cached, but inventory might be present anyway.
2780
                try:
2781
                    tree = self.source.revision_tree(rev_id)
2782
                except errors.NoSuchRevision:
2783
                    # Nope, parent is ghost.
2784
                    pass
2785
                else:
2786
                    cache[rev_id] = tree
2787
                    possible_trees.append((rev_id, tree))
2788
        return possible_trees
2789
2790
    def _get_delta_for_revision(self, tree, parent_ids, possible_trees):
2791
        """Get the best delta and base for this revision.
2792
2793
        :return: (basis_id, delta)
2794
        """
2795
        deltas = []
2796
        # Generate deltas against each tree, to find the shortest.
6405.2.10 by Jelmer Vernooij
Fix more tests.
2797
        # FIXME: Support nested trees
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2798
        texts_possibly_new_in_tree = set()
2799
        for basis_id, basis_tree in possible_trees:
6405.2.10 by Jelmer Vernooij
Fix more tests.
2800
            delta = tree.root_inventory._make_delta(basis_tree.root_inventory)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2801
            for old_path, new_path, file_id, new_entry in delta:
2802
                if new_path is None:
2803
                    # This file_id isn't present in the new rev, so we don't
2804
                    # care about it.
2805
                    continue
2806
                if not new_path:
2807
                    # Rich roots are handled elsewhere...
2808
                    continue
2809
                kind = new_entry.kind
2810
                if kind != 'directory' and kind != 'file':
2811
                    # No text record associated with this inventory entry.
2812
                    continue
2813
                # This is a directory or file that has changed somehow.
2814
                texts_possibly_new_in_tree.add((file_id, new_entry.revision))
2815
            deltas.append((len(delta), basis_id, delta))
2816
        deltas.sort()
2817
        return deltas[0][1:]
2818
2819
    def _fetch_parent_invs_for_stacking(self, parent_map, cache):
2820
        """Find all parent revisions that are absent, but for which the
2821
        inventory is present, and copy those inventories.
2822
2823
        This is necessary to preserve correctness when the source is stacked
2824
        without fallbacks configured.  (Note that in cases like upgrade the
2825
        source may be not have _fallback_repositories even though it is
2826
        stacked.)
2827
        """
2828
        parent_revs = set()
2829
        for parents in parent_map.values():
2830
            parent_revs.update(parents)
2831
        present_parents = self.source.get_parent_map(parent_revs)
2832
        absent_parents = set(parent_revs).difference(present_parents)
2833
        parent_invs_keys_for_stacking = self.source.inventories.get_parent_map(
2834
            (rev_id,) for rev_id in absent_parents)
2835
        parent_inv_ids = [key[-1] for key in parent_invs_keys_for_stacking]
2836
        for parent_tree in self.source.revision_trees(parent_inv_ids):
2837
            current_revision_id = parent_tree.get_revision_id()
2838
            parents_parents_keys = parent_invs_keys_for_stacking[
2839
                (current_revision_id,)]
2840
            parents_parents = [key[-1] for key in parents_parents_keys]
2841
            basis_id = _mod_revision.NULL_REVISION
2842
            basis_tree = self.source.revision_tree(basis_id)
6405.2.10 by Jelmer Vernooij
Fix more tests.
2843
            delta = parent_tree.root_inventory._make_delta(
2844
                basis_tree.root_inventory)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2845
            self.target.add_inventory_by_delta(
2846
                basis_id, delta, current_revision_id, parents_parents)
2847
            cache[current_revision_id] = parent_tree
2848
2849
    def _fetch_batch(self, revision_ids, basis_id, cache):
2850
        """Fetch across a few revisions.
2851
2852
        :param revision_ids: The revisions to copy
2853
        :param basis_id: The revision_id of a tree that must be in cache, used
2854
            as a basis for delta when no other base is available
2855
        :param cache: A cache of RevisionTrees that we can use.
2856
        :return: The revision_id of the last converted tree. The RevisionTree
2857
            for it will be in cache
2858
        """
2859
        # Walk though all revisions; get inventory deltas, copy referenced
2860
        # texts that delta references, insert the delta, revision and
2861
        # signature.
2862
        root_keys_to_create = set()
2863
        text_keys = set()
2864
        pending_deltas = []
2865
        pending_revisions = []
2866
        parent_map = self.source.get_parent_map(revision_ids)
2867
        self._fetch_parent_invs_for_stacking(parent_map, cache)
2868
        self.source._safe_to_return_from_cache = True
2869
        for tree in self.source.revision_trees(revision_ids):
2870
            # Find a inventory delta for this revision.
2871
            # Find text entries that need to be copied, too.
2872
            current_revision_id = tree.get_revision_id()
2873
            parent_ids = parent_map.get(current_revision_id, ())
2874
            parent_trees = self._get_trees(parent_ids, cache)
2875
            possible_trees = list(parent_trees)
2876
            if len(possible_trees) == 0:
2877
                # There either aren't any parents, or the parents are ghosts,
2878
                # so just use the last converted tree.
2879
                possible_trees.append((basis_id, cache[basis_id]))
2880
            basis_id, delta = self._get_delta_for_revision(tree, parent_ids,
2881
                                                           possible_trees)
2882
            revision = self.source.get_revision(current_revision_id)
2883
            pending_deltas.append((basis_id, delta,
2884
                current_revision_id, revision.parent_ids))
2885
            if self._converting_to_rich_root:
2886
                self._revision_id_to_root_id[current_revision_id] = \
2887
                    tree.get_root_id()
2888
            # Determine which texts are in present in this revision but not in
2889
            # any of the available parents.
2890
            texts_possibly_new_in_tree = set()
2891
            for old_path, new_path, file_id, entry in delta:
2892
                if new_path is None:
2893
                    # This file_id isn't present in the new rev
2894
                    continue
2895
                if not new_path:
2896
                    # This is the root
2897
                    if not self.target.supports_rich_root():
2898
                        # The target doesn't support rich root, so we don't
2899
                        # copy
2900
                        continue
2901
                    if self._converting_to_rich_root:
2902
                        # This can't be copied normally, we have to insert
2903
                        # it specially
2904
                        root_keys_to_create.add((file_id, entry.revision))
2905
                        continue
2906
                kind = entry.kind
2907
                texts_possibly_new_in_tree.add((file_id, entry.revision))
2908
            for basis_id, basis_tree in possible_trees:
6405.2.9 by Jelmer Vernooij
More test fixes.
2909
                basis_inv = basis_tree.root_inventory
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2910
                for file_key in list(texts_possibly_new_in_tree):
2911
                    file_id, file_revision = file_key
2912
                    try:
2913
                        entry = basis_inv[file_id]
2914
                    except errors.NoSuchId:
2915
                        continue
2916
                    if entry.revision == file_revision:
2917
                        texts_possibly_new_in_tree.remove(file_key)
2918
            text_keys.update(texts_possibly_new_in_tree)
2919
            pending_revisions.append(revision)
2920
            cache[current_revision_id] = tree
2921
            basis_id = current_revision_id
2922
        self.source._safe_to_return_from_cache = False
2923
        # Copy file texts
2924
        from_texts = self.source.texts
2925
        to_texts = self.target.texts
2926
        if root_keys_to_create:
2927
            root_stream = _mod_fetch._new_root_data_stream(
2928
                root_keys_to_create, self._revision_id_to_root_id, parent_map,
2929
                self.source)
2930
            to_texts.insert_record_stream(root_stream)
2931
        to_texts.insert_record_stream(from_texts.get_record_stream(
2932
            text_keys, self.target._format._fetch_order,
2933
            not self.target._format._fetch_uses_deltas))
2934
        # insert inventory deltas
2935
        for delta in pending_deltas:
2936
            self.target.add_inventory_by_delta(*delta)
2937
        if self.target._fallback_repositories:
2938
            # Make sure this stacked repository has all the parent inventories
2939
            # for the new revisions that we are about to insert.  We do this
2940
            # before adding the revisions so that no revision is added until
2941
            # all the inventories it may depend on are added.
2942
            # Note that this is overzealous, as we may have fetched these in an
2943
            # earlier batch.
2944
            parent_ids = set()
2945
            revision_ids = set()
2946
            for revision in pending_revisions:
2947
                revision_ids.add(revision.revision_id)
2948
                parent_ids.update(revision.parent_ids)
2949
            parent_ids.difference_update(revision_ids)
2950
            parent_ids.discard(_mod_revision.NULL_REVISION)
2951
            parent_map = self.source.get_parent_map(parent_ids)
2952
            # we iterate over parent_map and not parent_ids because we don't
2953
            # want to try copying any revision which is a ghost
2954
            for parent_tree in self.source.revision_trees(parent_map):
2955
                current_revision_id = parent_tree.get_revision_id()
2956
                parents_parents = parent_map[current_revision_id]
2957
                possible_trees = self._get_trees(parents_parents, cache)
2958
                if len(possible_trees) == 0:
2959
                    # There either aren't any parents, or the parents are
2960
                    # ghosts, so just use the last converted tree.
2961
                    possible_trees.append((basis_id, cache[basis_id]))
2962
                basis_id, delta = self._get_delta_for_revision(parent_tree,
2963
                    parents_parents, possible_trees)
2964
                self.target.add_inventory_by_delta(
2965
                    basis_id, delta, current_revision_id, parents_parents)
2966
        # insert signatures and revisions
2967
        for revision in pending_revisions:
2968
            try:
2969
                signature = self.source.get_signature_text(
2970
                    revision.revision_id)
2971
                self.target.add_signature_text(revision.revision_id,
2972
                    signature)
2973
            except errors.NoSuchRevision:
2974
                pass
2975
            self.target.add_revision(revision.revision_id, revision)
2976
        return basis_id
2977
2978
    def _fetch_all_revisions(self, revision_ids, pb):
2979
        """Fetch everything for the list of revisions.
2980
2981
        :param revision_ids: The list of revisions to fetch. Must be in
2982
            topological order.
2983
        :param pb: A ProgressTask
2984
        :return: None
2985
        """
2986
        basis_id, basis_tree = self._get_basis(revision_ids[0])
2987
        batch_size = 100
2988
        cache = lru_cache.LRUCache(100)
2989
        cache[basis_id] = basis_tree
2990
        del basis_tree # We don't want to hang on to it here
2991
        hints = []
2992
        a_graph = None
2993
2994
        for offset in range(0, len(revision_ids), batch_size):
2995
            self.target.start_write_group()
2996
            try:
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
2997
                pb.update(gettext('Transferring revisions'), offset,
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
2998
                          len(revision_ids))
2999
                batch = revision_ids[offset:offset+batch_size]
3000
                basis_id = self._fetch_batch(batch, basis_id, cache)
3001
            except:
3002
                self.source._safe_to_return_from_cache = False
3003
                self.target.abort_write_group()
3004
                raise
3005
            else:
3006
                hint = self.target.commit_write_group()
3007
                if hint:
3008
                    hints.extend(hint)
3009
        if hints and self.target._format.pack_compresses:
3010
            self.target.pack(hint=hints)
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
3011
        pb.update(gettext('Transferring revisions'), len(revision_ids),
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3012
                  len(revision_ids))
3013
3014
    @needs_write_lock
3015
    def fetch(self, revision_id=None, find_ghosts=False,
3016
            fetch_spec=None):
3017
        """See InterRepository.fetch()."""
3018
        if fetch_spec is not None:
3019
            revision_ids = fetch_spec.get_keys()
3020
        else:
3021
            revision_ids = None
6047.1.1 by Jelmer Vernooij
Use show_user_warning rather than custom warning methods on UIFactory.
3022
        if self.source._format.experimental:
3023
            ui.ui_factory.show_user_warning('experimental_format_fetch',
3024
                from_format=self.source._format,
3025
                to_format=self.target._format)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3026
        if (not self.source.supports_rich_root()
3027
            and self.target.supports_rich_root()):
3028
            self._converting_to_rich_root = True
3029
            self._revision_id_to_root_id = {}
3030
        else:
3031
            self._converting_to_rich_root = False
3032
        # See <https://launchpad.net/bugs/456077> asking for a warning here
3033
        if self.source._format.network_name() != self.target._format.network_name():
3034
            ui.ui_factory.show_user_warning('cross_format_fetch',
3035
                from_format=self.source._format,
3036
                to_format=self.target._format)
3037
        if revision_ids is None:
3038
            if revision_id:
3039
                search_revision_ids = [revision_id]
3040
            else:
3041
                search_revision_ids = None
3042
            revision_ids = self.target.search_missing_revision_ids(self.source,
3043
                revision_ids=search_revision_ids,
3044
                find_ghosts=find_ghosts).get_keys()
3045
        if not revision_ids:
3046
            return 0, 0
3047
        revision_ids = tsort.topo_sort(
3048
            self.source.get_graph().get_parent_map(revision_ids))
3049
        if not revision_ids:
3050
            return 0, 0
3051
        # Walk though all revisions; get inventory deltas, copy referenced
3052
        # texts that delta references, insert the delta, revision and
3053
        # signature.
3054
        pb = ui.ui_factory.nested_progress_bar()
3055
        try:
3056
            self._fetch_all_revisions(revision_ids, pb)
3057
        finally:
3058
            pb.finished()
3059
        return len(revision_ids), 0
3060
3061
    def _get_basis(self, first_revision_id):
3062
        """Get a revision and tree which exists in the target.
3063
3064
        This assumes that first_revision_id is selected for transmission
3065
        because all other ancestors are already present. If we can't find an
3066
        ancestor we fall back to NULL_REVISION since we know that is safe.
3067
3068
        :return: (basis_id, basis_tree)
3069
        """
3070
        first_rev = self.source.get_revision(first_revision_id)
3071
        try:
3072
            basis_id = first_rev.parent_ids[0]
3073
            # only valid as a basis if the target has it
3074
            self.target.get_revision(basis_id)
3075
            # Try to get a basis tree - if it's a ghost it will hit the
3076
            # NoSuchRevision case.
3077
            basis_tree = self.source.revision_tree(basis_id)
3078
        except (IndexError, errors.NoSuchRevision):
3079
            basis_id = _mod_revision.NULL_REVISION
3080
            basis_tree = self.source.revision_tree(basis_id)
3081
        return basis_id, basis_tree
3082
3083
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
3084
class InterSameDataRepository(InterVersionedFileRepository):
5815.4.19 by Jelmer Vernooij
Fix test failures.
3085
    """Code for converting between repositories that represent the same data.
3086
3087
    Data format and model must match for this to work.
3088
    """
3089
3090
    @classmethod
3091
    def _get_repo_format_to_test(self):
3092
        """Repository format for testing with.
3093
3094
        InterSameData can pull from subtree to subtree and from non-subtree to
3095
        non-subtree, so we test this with the richest repository format.
3096
        """
3097
        from bzrlib.repofmt import knitrepo
3098
        return knitrepo.RepositoryFormatKnit3()
3099
3100
    @staticmethod
3101
    def is_compatible(source, target):
3102
        return (
3103
            InterRepository._same_model(source, target) and
3104
            source._format.supports_full_versioned_files and
3105
            target._format.supports_full_versioned_files)
3106
3107
5863.4.1 by Jelmer Vernooij
Move interrepository implementation to vf_repository.
3108
InterRepository.register_optimiser(InterVersionedFileRepository)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3109
InterRepository.register_optimiser(InterDifferingSerializer)
5815.4.19 by Jelmer Vernooij
Fix test failures.
3110
InterRepository.register_optimiser(InterSameDataRepository)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3111
3112
3113
def install_revisions(repository, iterable, num_revisions=None, pb=None):
3114
    """Install all revision data into a repository.
3115
3116
    Accepts an iterable of revision, tree, signature tuples.  The signature
3117
    may be None.
3118
    """
3119
    repository.start_write_group()
3120
    try:
3121
        inventory_cache = lru_cache.LRUCache(10)
3122
        for n, (revision, revision_tree, signature) in enumerate(iterable):
3123
            _install_revision(repository, revision, revision_tree, signature,
3124
                inventory_cache)
3125
            if pb is not None:
6138.4.1 by Jonathan Riddell
add gettext to progress bar strings
3126
                pb.update(gettext('Transferring revisions'), n + 1, num_revisions)
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3127
    except:
3128
        repository.abort_write_group()
3129
        raise
3130
    else:
3131
        repository.commit_write_group()
3132
3133
3134
def _install_revision(repository, rev, revision_tree, signature,
3135
    inventory_cache):
3136
    """Install all revision data into a repository."""
3137
    present_parents = []
3138
    parent_trees = {}
3139
    for p_id in rev.parent_ids:
3140
        if repository.has_revision(p_id):
3141
            present_parents.append(p_id)
3142
            parent_trees[p_id] = repository.revision_tree(p_id)
3143
        else:
3144
            parent_trees[p_id] = repository.revision_tree(
3145
                                     _mod_revision.NULL_REVISION)
3146
6405.2.10 by Jelmer Vernooij
Fix more tests.
3147
    # FIXME: Support nested trees
3148
    inv = revision_tree.root_inventory
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3149
    entries = inv.iter_entries()
3150
    # backwards compatibility hack: skip the root id.
3151
    if not repository.supports_rich_root():
3152
        path, root = entries.next()
3153
        if root.revision != rev.revision_id:
3154
            raise errors.IncompatibleRevision(repr(repository))
3155
    text_keys = {}
3156
    for path, ie in entries:
3157
        text_keys[(ie.file_id, ie.revision)] = ie
3158
    text_parent_map = repository.texts.get_parent_map(text_keys)
3159
    missing_texts = set(text_keys) - set(text_parent_map)
3160
    # Add the texts that are not already present
3161
    for text_key in missing_texts:
3162
        ie = text_keys[text_key]
3163
        text_parents = []
3164
        # FIXME: TODO: The following loop overlaps/duplicates that done by
3165
        # commit to determine parents. There is a latent/real bug here where
3166
        # the parents inserted are not those commit would do - in particular
3167
        # they are not filtered by heads(). RBC, AB
3168
        for revision, tree in parent_trees.iteritems():
5967.7.1 by Martin Pool
Deprecate __contains__ on Tree and Inventory
3169
            if not tree.has_id(ie.file_id):
5815.4.1 by Jelmer Vernooij
Split versionedfile-specific stuff out into VersionedFileRepository.
3170
                continue
3171
            parent_id = tree.get_file_revision(ie.file_id)
3172
            if parent_id in text_parents:
3173
                continue
3174
            text_parents.append((ie.file_id, parent_id))
3175
        lines = revision_tree.get_file(ie.file_id).readlines()
3176
        repository.texts.add_lines(text_key, text_parents, lines)
3177
    try:
3178
        # install the inventory
3179
        if repository._format._commit_inv_deltas and len(rev.parent_ids):
3180
            # Cache this inventory
3181
            inventory_cache[rev.revision_id] = inv
3182
            try:
3183
                basis_inv = inventory_cache[rev.parent_ids[0]]
3184
            except KeyError:
3185
                repository.add_inventory(rev.revision_id, inv, present_parents)
3186
            else:
3187
                delta = inv._make_delta(basis_inv)
3188
                repository.add_inventory_by_delta(rev.parent_ids[0], delta,
3189
                    rev.revision_id, present_parents)
3190
        else:
3191
            repository.add_inventory(rev.revision_id, inv, present_parents)
3192
    except errors.RevisionAlreadyPresent:
3193
        pass
3194
    if signature is not None:
3195
        repository.add_signature_text(rev.revision_id, signature)
3196
    repository.add_revision(rev.revision_id, rev, inv)
3197
3198
3199
def install_revision(repository, rev, revision_tree):
3200
    """Install all revision data into a repository."""
3201
    install_revisions(repository, [(rev, revision_tree, None)])