/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to breezy/bundle/serializer/v4.py

  • Committer: Jelmer Vernooij
  • Date: 2019-06-02 02:35:46 UTC
  • mfrom: (7309 work)
  • mto: This revision was merged to the branch mainline in revision 7319.
  • Revision ID: jelmer@jelmer.uk-20190602023546-lqco868tnv26d8ow
merge trunk.

Show diffs side-by-side

added added

removed removed

Lines of Context:
26
26
    lru_cache,
27
27
    multiparent,
28
28
    osutils,
 
29
    repository as _mod_repository,
29
30
    revision as _mod_revision,
30
31
    trace,
31
32
    ui,
48
49
 
49
50
    def __init__(self, repo, inventory_keys):
50
51
        super(_MPDiffInventoryGenerator, self).__init__(repo.inventories,
51
 
            inventory_keys)
 
52
                                                        inventory_keys)
52
53
        self.repo = repo
53
54
        self.sha1s = {}
54
55
 
139
140
        metadata = {b'parents': parents,
140
141
                    b'storage_kind': b'mpdiff'}
141
142
        self._add_record(bytes, {b'parents': parents,
142
 
            b'storage_kind': b'fulltext'}, repo_kind, revision_id, None)
 
143
                                 b'storage_kind': b'fulltext'}, repo_kind, revision_id, None)
143
144
 
144
145
    def add_info_record(self, kwargs):
145
146
        """Add an info record to the bundle
154
155
    def encode_name(content_kind, revision_id, file_id=None):
155
156
        """Encode semantic ids as a container name"""
156
157
        if content_kind not in ('revision', 'file', 'inventory', 'signature',
157
 
                'info'):
 
158
                                'info'):
158
159
            raise ValueError(content_kind)
159
160
        if content_kind == 'file':
160
161
            if file_id is None:
333
334
        """Write bundle records for all revisions of all files"""
334
335
        text_keys = []
335
336
        altered_fileids = self.repository.fileids_altered_by_revision_ids(
336
 
                self.revision_ids)
 
337
            self.revision_ids)
337
338
        for file_id, revision_ids in viewitems(altered_fileids):
338
339
            for revision_id in revision_ids:
339
340
                text_keys.append((file_id, revision_id))
343
344
        """Write bundle records for all revisions and signatures"""
344
345
        inv_vf = self.repository.inventories
345
346
        topological_order = [key[-1] for key in multiparent.topo_iter_keys(
346
 
                                inv_vf, self.revision_keys)]
 
347
            inv_vf, self.revision_keys)]
347
348
        revision_order = topological_order
348
349
        if self.target is not None and self.target in self.revision_ids:
349
350
            # Make sure the target revision is always the last entry
389
390
            parents = parent_map.get(revision_id, None)
390
391
            revision_text = revision_to_bytes(revision)
391
392
            self.bundle.add_fulltext_record(revision_text, parents,
392
 
                                       'revision', revision_id)
 
393
                                            'revision', revision_id)
393
394
            try:
394
395
                self.bundle.add_fulltext_record(
395
396
                    self.repository.get_signature_text(
396
 
                    revision_id), parents, 'signature', revision_id)
 
397
                        revision_id), parents, 'signature', revision_id)
397
398
            except errors.NoSuchRevision:
398
399
                pass
399
400
 
434
435
class BundleInfoV4(object):
435
436
 
436
437
    """Provide (most of) the BundleInfo interface"""
 
438
 
437
439
    def __init__(self, fileobj, serializer):
438
440
        self._fileobj = fileobj
439
441
        self._serializer = serializer
478
480
            self.__real_revisions = []
479
481
            bundle_reader = self.get_bundle_reader()
480
482
            for bytes, metadata, repo_kind, revision_id, file_id in \
481
 
                bundle_reader.iter_records():
 
483
                    bundle_reader.iter_records():
482
484
                if repo_kind == 'info':
483
485
                    serializer =\
484
486
                        self._serializer.get_source_serializer(metadata)
518
520
 
519
521
        Must be called with the Repository locked.
520
522
        """
521
 
        self._repository.start_write_group()
522
 
        try:
523
 
            result = self._install_in_write_group()
524
 
        except:
525
 
            self._repository.abort_write_group()
526
 
            raise
527
 
        self._repository.commit_write_group()
528
 
        return result
 
523
        with _mod_repository.WriteGroup(self._repository):
 
524
            return self._install_in_write_group()
529
525
 
530
526
    def _install_in_write_group(self):
531
527
        current_file = None
536
532
        added_inv = set()
537
533
        target_revision = None
538
534
        for bytes, metadata, repo_kind, revision_id, file_id in\
539
 
            self._container.iter_records():
 
535
                self._container.iter_records():
540
536
            if repo_kind == 'info':
541
537
                if self._info is not None:
542
538
                    raise AssertionError()
543
539
                self._handle_info(metadata)
544
540
            if (pending_file_records and
545
 
                (repo_kind, file_id) != ('file', current_file)):
 
541
                    (repo_kind, file_id) != ('file', current_file)):
546
542
                # Flush the data for a single file - prevents memory
547
543
                # spiking due to buffering all files in memory.
548
544
                self._install_mp_records_keys(self._repository.texts,
549
 
                    pending_file_records)
 
545
                                              pending_file_records)
550
546
                current_file = None
551
547
                del pending_file_records[:]
552
548
            if len(pending_inventory_records) > 0 and repo_kind != 'inventory':
553
549
                self._install_inventory_records(pending_inventory_records)
554
550
                pending_inventory_records = []
555
551
            if repo_kind == 'inventory':
556
 
                pending_inventory_records.append(((revision_id,), metadata, bytes))
 
552
                pending_inventory_records.append(
 
553
                    ((revision_id,), metadata, bytes))
557
554
            if repo_kind == 'revision':
558
555
                target_revision = revision_id
559
556
                self._install_revision(revision_id, metadata, bytes)
561
558
                self._install_signature(revision_id, metadata, bytes)
562
559
            if repo_kind == 'file':
563
560
                current_file = file_id
564
 
                pending_file_records.append(((file_id, revision_id), metadata, bytes))
565
 
        self._install_mp_records_keys(self._repository.texts, pending_file_records)
 
561
                pending_file_records.append(
 
562
                    ((file_id, revision_id), metadata, bytes))
 
563
        self._install_mp_records_keys(
 
564
            self._repository.texts, pending_file_records)
566
565
        return target_revision
567
566
 
568
567
    def _handle_info(self, info):
570
569
        self._info = info
571
570
        self._source_serializer = self._serializer.get_source_serializer(info)
572
571
        if (info[b'supports_rich_root'] == 0 and
573
 
            self._repository.supports_rich_root()):
 
572
                self._repository.supports_rich_root()):
574
573
            self.update_root = True
575
574
        else:
576
575
            self.update_root = False
619
618
            # installed yet.)
620
619
            parent_keys = [(r,) for r in remaining_parent_ids]
621
620
            present_parent_map = self._repository.inventories.get_parent_map(
622
 
                                        parent_keys)
 
621
                parent_keys)
623
622
            present_parent_ids = []
624
623
            ghosts = set()
625
624
            for p_id in remaining_parent_ids:
629
628
                    ghosts.add(p_id)
630
629
            to_string = self._source_serializer.write_inventory_to_string
631
630
            for parent_inv in self._repository.iter_inventories(
632
 
                                    present_parent_ids):
 
631
                    present_parent_ids):
633
632
                p_text = to_string(parent_inv)
634
633
                inventory_cache[parent_inv.revision_id] = parent_inv
635
634
                cached_parent_texts[parent_inv.revision_id] = p_text
637
636
 
638
637
        parent_texts = [cached_parent_texts[parent_id]
639
638
                        for parent_id in parent_ids
640
 
                         if parent_id not in ghosts]
 
639
                        if parent_id not in ghosts]
641
640
        return parent_texts
642
641
 
643
642
    def _install_inventory_records(self, records):
644
643
        if (self._info[b'serializer'] == self._repository._serializer.format_num
645
 
            and self._repository._serializer.support_altered_by_hack):
 
644
                and self._repository._serializer.support_altered_by_hack):
646
645
            return self._install_mp_records_keys(self._repository.inventories,
647
 
                records)
 
646
                                                 records)
648
647
        # Use a 10MB text cache, since these are string xml inventories. Note
649
648
        # that 10MB is fairly small for large projects (a single inventory can
650
649
        # be >5MB). Another possibility is to cache 10-20 inventory texts
651
650
        # instead
652
 
        inventory_text_cache = lru_cache.LRUSizeCache(10*1024*1024)
 
651
        inventory_text_cache = lru_cache.LRUSizeCache(10 * 1024 * 1024)
653
652
        # Also cache the in-memory representation. This allows us to create
654
653
        # inventory deltas to apply rather than calling add_inventory from
655
654
        # scratch each time.
670
669
                # it would have to cast to a list of lines, which we get back
671
670
                # as lines and then cast back to a string.
672
671
                target_lines = multiparent.MultiParent.from_patch(bytes
673
 
                            ).to_lines(p_texts)
 
672
                                                                  ).to_lines(p_texts)
674
673
                inv_text = b''.join(target_lines)
675
674
                del target_lines
676
675
                sha1 = osutils.sha_string(inv_text)
691
690
                    else:
692
691
                        delta = target_inv._make_delta(parent_inv)
693
692
                        self._repository.add_inventory_by_delta(parent_ids[0],
694
 
                            delta, revision_id, parent_ids)
 
693
                                                                delta, revision_id, parent_ids)
695
694
                except errors.UnsupportedInventoryKind:
696
695
                    raise errors.IncompatibleRevision(repr(self._repository))
697
696
                inventory_cache[revision_id] = target_inv
701
700
        if self.update_root:
702
701
            text_key = (target_inv.root.file_id, revision_id)
703
702
            parent_keys = [(target_inv.root.file_id, parent) for
704
 
                parent in parent_ids]
 
703
                           parent in parent_ids]
705
704
            self._repository.texts.add_lines(text_key, parent_keys, [])
706
705
        elif not self._repository.supports_rich_root():
707
706
            if target_inv.root.revision != revision_id: