/brz/remove-bazaar

To get this branch, use:
bzr branch http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar

« back to all changes in this revision

Viewing changes to bzrlib/smart/repository.py

  • Committer: Richard Wilbur
  • Date: 2016-02-04 19:07:28 UTC
  • mto: This revision was merged to the branch mainline in revision 6618.
  • Revision ID: richard.wilbur@gmail.com-20160204190728-p0zvfii6zase0fw7
Update COPYING.txt from the original http://www.gnu.org/licenses/gpl-2.0.txt  (Only differences were in whitespace.)  Thanks to Petr Stodulka for pointing out the discrepancy.

Show diffs side-by-side

added added

removed removed

Lines of Context:
19
19
from __future__ import absolute_import
20
20
 
21
21
import bz2
22
 
import itertools
23
22
import os
24
 
try:
25
 
    import queue
26
 
except ImportError:
27
 
    import Queue as queue
 
23
import Queue
28
24
import sys
29
25
import tempfile
30
26
import threading
31
27
import zlib
32
28
 
33
 
from ... import (
 
29
from bzrlib import (
34
30
    bencode,
35
31
    errors,
36
32
    estimate_compressed_size,
 
33
    inventory as _mod_inventory,
 
34
    inventory_delta,
37
35
    osutils,
 
36
    pack,
38
37
    trace,
39
38
    ui,
40
 
    )
41
 
from .. import (
42
 
    inventory as _mod_inventory,
43
 
    inventory_delta,
44
 
    pack,
45
39
    vf_search,
46
40
    )
47
 
from ..bzrdir import BzrDir
48
 
from ...sixish import (
49
 
    reraise,
50
 
)
51
 
from .request import (
 
41
from bzrlib.bzrdir import BzrDir
 
42
from bzrlib.smart.request import (
52
43
    FailedSmartServerResponse,
53
44
    SmartServerRequest,
54
45
    SuccessfulSmartServerResponse,
55
46
    )
56
 
from ...repository import _strip_NULL_ghosts, network_format_registry
57
 
from ... import revision as _mod_revision
58
 
from ..versionedfile import (
 
47
from bzrlib.repository import _strip_NULL_ghosts, network_format_registry
 
48
from bzrlib import revision as _mod_revision
 
49
from bzrlib.versionedfile import (
59
50
    ChunkedContentFactory,
60
51
    NetworkRecordStream,
61
52
    record_to_fulltext_bytes,
123
114
        start_keys = set(lines[0].split(' '))
124
115
        exclude_keys = set(lines[1].split(' '))
125
116
        revision_count = int(lines[2])
126
 
        with repository.lock_read():
 
117
        repository.lock_read()
 
118
        try:
127
119
            search = repository.get_graph()._make_breadth_first_searcher(
128
120
                start_keys)
129
121
            while True:
130
122
                try:
131
 
                    next_revs = next(search)
 
123
                    next_revs = search.next()
132
124
                except StopIteration:
133
125
                    break
134
126
                search.stop_searching_any(exclude_keys.intersection(next_revs))
143
135
            search_result = vf_search.SearchResult(started_keys, excludes,
144
136
                len(included_keys), included_keys)
145
137
            return (search_result, None)
 
138
        finally:
 
139
            repository.unlock()
146
140
 
147
141
 
148
142
class SmartServerRepositoryReadLocked(SmartServerRepositoryRequest):
150
144
 
151
145
    def do_repository_request(self, repository, *args):
152
146
        """Read lock a repository for do_readlocked_repository_request."""
153
 
        with repository.lock_read():
 
147
        repository.lock_read()
 
148
        try:
154
149
            return self.do_readlocked_repository_request(repository, *args)
 
150
        finally:
 
151
            repository.unlock()
155
152
 
156
153
 
157
154
class SmartServerRepositoryBreakLock(SmartServerRepositoryRequest):
196
193
            compressed.
197
194
        """
198
195
        repository = self._repository
199
 
        with repository.lock_read():
 
196
        repository.lock_read()
 
197
        try:
200
198
            return self._do_repository_request(body_bytes)
 
199
        finally:
 
200
            repository.unlock()
201
201
 
202
202
    def _expand_requested_revs(self, repo_graph, revision_ids, client_seen_revs,
203
203
                               include_missing, max_size=65536):
301
301
        else:
302
302
            search_ids = repository.all_revision_ids()
303
303
        search = graph._make_breadth_first_searcher(search_ids)
304
 
        transitive_ids = set(itertools.chain.from_iterable(search))
 
304
        transitive_ids = set()
 
305
        map(transitive_ids.update, list(search))
305
306
        parent_map = graph.get_parent_map(transitive_ids)
306
307
        revision_graph = _strip_NULL_ghosts(parent_map)
307
308
        if revision_id and revision_id not in revision_graph:
326
327
        """
327
328
        try:
328
329
            found_flag, result = repository.get_rev_id_for_revno(revno, known_pair)
329
 
        except errors.RevisionNotPresent as err:
 
330
        except errors.RevisionNotPresent, err:
330
331
            if err.revision_id != known_pair[1]:
331
332
                raise AssertionError(
332
333
                    'get_rev_id_for_revno raised RevisionNotPresent for '
427
428
            return FailedSmartServerResponse(('nosuchrevision', revid))
428
429
 
429
430
        body = ''
430
 
        if 'committers' in stats:
 
431
        if stats.has_key('committers'):
431
432
            body += 'committers: %d\n' % stats['committers']
432
 
        if 'firstrev' in stats:
 
433
        if stats.has_key('firstrev'):
433
434
            body += 'firstrev: %.3f %d\n' % stats['firstrev']
434
 
        if 'latestrev' in stats:
 
435
        if stats.has_key('latestrev'):
435
436
             body += 'latestrev: %.3f %d\n' % stats['latestrev']
436
 
        if 'revisions' in stats:
 
437
        if stats.has_key('revisions'):
437
438
            body += 'revisions: %d\n' % stats['revisions']
438
 
        if 'size' in stats:
 
439
        if stats.has_key('size'):
439
440
            body += 'size: %d\n' % stats['size']
440
441
 
441
442
        return SuccessfulSmartServerResponse(('ok', ), body)
457
458
        """
458
459
        try:
459
460
            text = repository.get_signature_text(revision_id)
460
 
        except errors.NoSuchRevision as err:
 
461
        except errors.NoSuchRevision, err:
461
462
            return FailedSmartServerResponse(
462
463
                ('nosuchrevision', err.revision))
463
464
        return SuccessfulSmartServerResponse(('ok', ), text)
503
504
            token = None
504
505
        try:
505
506
            token = repository.lock_write(token=token).repository_token
506
 
        except errors.LockContention as e:
 
507
        except errors.LockContention, e:
507
508
            return FailedSmartServerResponse(('LockContention',))
508
509
        except errors.UnlockableTransport:
509
510
            return FailedSmartServerResponse(('UnlockableTransport',))
510
 
        except errors.LockFailed as e:
 
511
        except errors.LockFailed, e:
511
512
            return FailedSmartServerResponse(('LockFailed',
512
513
                str(e.lock), str(e.why)))
513
514
        if token is not None:
583
584
            source = repository._get_source(self._to_format)
584
585
            stream = source.get_stream(search_result)
585
586
        except Exception:
 
587
            exc_info = sys.exc_info()
586
588
            try:
587
589
                # On non-error, unlocking is done by the body stream handler.
588
590
                repository.unlock()
589
591
            finally:
590
 
                raise
 
592
                raise exc_info[0], exc_info[1], exc_info[2]
591
593
        return SuccessfulSmartServerResponse(('ok',),
592
594
            body_stream=self.body_stream(stream, repository))
593
595
 
596
598
        try:
597
599
            for bytes in byte_stream:
598
600
                yield bytes
599
 
        except errors.RevisionNotPresent as e:
 
601
        except errors.RevisionNotPresent, e:
600
602
            # This shouldn't be able to happen, but as we don't buffer
601
603
            # everything it can in theory happen.
602
604
            repository.unlock()
732
734
                yield record
733
735
 
734
736
        self.seed_state()
735
 
        with ui.ui_factory.nested_progress_bar() as pb:
736
 
            rc = self._record_counter
737
 
            try:
738
 
                # Make and consume sub generators, one per substream type:
739
 
                while self.first_bytes is not None:
740
 
                    substream = NetworkRecordStream(self.iter_substream_bytes())
741
 
                    # after substream is fully consumed, self.current_type is set
742
 
                    # to the next type, and self.first_bytes is set to the matching
743
 
                    # bytes.
744
 
                    yield self.current_type, wrap_and_count(pb, rc, substream)
745
 
            finally:
746
 
                if rc:
747
 
                    pb.update('Done', rc.max, rc.max)
 
737
        pb = ui.ui_factory.nested_progress_bar()
 
738
        rc = self._record_counter
 
739
        try:
 
740
            # Make and consume sub generators, one per substream type:
 
741
            while self.first_bytes is not None:
 
742
                substream = NetworkRecordStream(self.iter_substream_bytes())
 
743
                # after substream is fully consumed, self.current_type is set
 
744
                # to the next type, and self.first_bytes is set to the matching
 
745
                # bytes.
 
746
                yield self.current_type, wrap_and_count(pb, rc, substream)
 
747
        finally:
 
748
            if rc:
 
749
                pb.update('Done', rc.max, rc.max)
 
750
            pb.finished()
748
751
 
749
752
    def seed_state(self):
750
753
        """Prepare the _ByteStreamDecoder to decode from the pack stream."""
775
778
    def do_repository_request(self, repository, token):
776
779
        try:
777
780
            repository.lock_write(token=token)
778
 
        except errors.TokenMismatch as e:
 
781
        except errors.TokenMismatch, e:
779
782
            return FailedSmartServerResponse(('TokenMismatch',))
780
783
        repository.dont_leave_lock_in_place()
781
784
        repository.unlock()
828
831
 
829
832
    def _copy_to_tempdir(self, from_repo):
830
833
        tmp_dirname = osutils.mkdtemp(prefix='tmpbzrclone')
831
 
        tmp_bzrdir = from_repo.controldir._format.initialize(tmp_dirname)
 
834
        tmp_bzrdir = from_repo.bzrdir._format.initialize(tmp_dirname)
832
835
        tmp_repo = from_repo._format.initialize(tmp_bzrdir)
833
836
        from_repo.copy_content_into(tmp_repo)
834
837
        return tmp_dirname, tmp_repo
835
838
 
836
839
    def _tarfile_response(self, tmp_dirname, compression):
837
 
        with tempfile.NamedTemporaryFile() as temp:
 
840
        temp = tempfile.NamedTemporaryFile()
 
841
        try:
838
842
            self._tarball_of_dir(tmp_dirname, compression, temp.file)
839
843
            # all finished; write the tempfile out to the network
840
844
            temp.seek(0)
841
845
            return SuccessfulSmartServerResponse(('ok',), temp.read())
842
846
            # FIXME: Don't read the whole thing into memory here; rather stream
843
847
            # it out from the file onto the network. mbp 20070411
 
848
        finally:
 
849
            temp.close()
844
850
 
845
851
    def _tarball_of_dir(self, dirname, compression, ofile):
846
852
        import tarfile
881
887
        tokens = [token for token in resume_tokens.split(' ') if token]
882
888
        self.tokens = tokens
883
889
        self.repository = repository
884
 
        self.queue = queue.Queue()
 
890
        self.queue = Queue.Queue()
885
891
        self.insert_thread = threading.Thread(target=self._inserter_thread)
886
892
        self.insert_thread.start()
887
893
 
912
918
        if self.insert_thread is not None:
913
919
            self.insert_thread.join()
914
920
        if not self.insert_ok:
915
 
            try:
916
 
                reraise(*self.insert_exception)
917
 
            finally:
918
 
                del self.insert_exception
 
921
            exc_info = self.insert_exception
 
922
            raise exc_info[0], exc_info[1], exc_info[2]
919
923
        write_group_tokens, missing_keys = self.insert_result
920
924
        if write_group_tokens or missing_keys:
921
925
            # bzip needed? missing keys should typically be a small set.
1036
1040
        try:
1037
1041
            try:
1038
1042
                repository.resume_write_group(write_group_tokens)
1039
 
            except errors.UnresumableWriteGroup as e:
 
1043
            except errors.UnresumableWriteGroup, e:
1040
1044
                return FailedSmartServerResponse(
1041
1045
                    ('UnresumableWriteGroup', e.write_groups, e.reason))
1042
1046
            try:
1063
1067
        try:
1064
1068
            try:
1065
1069
                repository.resume_write_group(write_group_tokens)
1066
 
            except errors.UnresumableWriteGroup as e:
 
1070
            except errors.UnresumableWriteGroup, e:
1067
1071
                return FailedSmartServerResponse(
1068
1072
                    ('UnresumableWriteGroup', e.write_groups, e.reason))
1069
1073
                repository.abort_write_group()
1084
1088
        try:
1085
1089
            try:
1086
1090
                repository.resume_write_group(write_group_tokens)
1087
 
            except errors.UnresumableWriteGroup as e:
 
1091
            except errors.UnresumableWriteGroup, e:
1088
1092
                return FailedSmartServerResponse(
1089
1093
                    ('UnresumableWriteGroup', e.write_groups, e.reason))
1090
1094
            else:
1114
1118
    def do_repository_request(self, repository, lock_token):
1115
1119
        try:
1116
1120
            repository.lock_write(token=lock_token)
1117
 
        except errors.TokenLockingNotSupported as e:
 
1121
        except errors.TokenLockingNotSupported, e:
1118
1122
            return FailedSmartServerResponse(
1119
1123
                ('TokenLockingNotSupported', ))
1120
1124
        try:
1177
1181
    """
1178
1182
 
1179
1183
    def body_stream(self, repository, desired_files):
1180
 
        with self._repository.lock_read():
 
1184
        self._repository.lock_read()
 
1185
        try:
1181
1186
            text_keys = {}
1182
1187
            for i, key in enumerate(desired_files):
1183
1188
                text_keys[key] = i
1198
1203
                data = compressor.flush()
1199
1204
                if data:
1200
1205
                    yield data
 
1206
        finally:
 
1207
            self._repository.unlock()
1201
1208
 
1202
1209
    def do_body(self, body_bytes):
1203
1210
        desired_files = [
1235
1242
            body_stream=self.body_stream(self._repository, revision_ids))
1236
1243
 
1237
1244
    def body_stream(self, repository, revision_ids):
1238
 
        with self._repository.lock_read():
 
1245
        self._repository.lock_read()
 
1246
        try:
1239
1247
            for record in repository.revisions.get_record_stream(
1240
1248
                [(revid,) for revid in revision_ids], 'unordered', True):
1241
1249
                if record.storage_kind == 'absent':
1242
1250
                    continue
1243
1251
                yield zlib.compress(record.get_bytes_as('fulltext'))
 
1252
        finally:
 
1253
            self._repository.unlock()
1244
1254
 
1245
1255
 
1246
1256
class SmartServerRepositoryGetInventories(SmartServerRepositoryRequest):
1263
1273
        serializer = inventory_delta.InventoryDeltaSerializer(
1264
1274
            repository.supports_rich_root(),
1265
1275
            repository._format.supports_tree_reference)
1266
 
        with repository.lock_read():
 
1276
        repository.lock_read()
 
1277
        try:
1267
1278
            for inv, revid in repository._iter_inventories(revids, ordering):
1268
1279
                if inv is None:
1269
1280
                    continue
1272
1283
                    prev_inv.revision_id, inv.revision_id, inv_delta)
1273
1284
                yield ChunkedContentFactory(inv.revision_id, None, None, lines)
1274
1285
                prev_inv = inv
 
1286
        finally:
 
1287
            repository.unlock()
1275
1288
 
1276
1289
    def body_stream(self, repository, ordering, revids):
1277
1290
        substream = self._inventory_delta_stream(repository,