135
class _SFTPReadvHelper(object):
136
"""A class to help with managing the state of a readv request."""
138
# See _get_requests for an explanation.
139
_max_request_size = 32768
141
def __init__(self, original_offsets, relpath, _report_activity):
142
"""Create a new readv helper.
144
:param original_offsets: The original requests given by the caller of
146
:param relpath: The name of the file (if known)
147
:param _report_activity: A Transport._report_activity bound method,
148
to be called as data arrives.
150
self.original_offsets = list(original_offsets)
151
self.relpath = relpath
152
self._report_activity = _report_activity
154
def _get_requests(self):
155
"""Break up the offsets into individual requests over sftp.
157
The SFTP spec only requires implementers to support 32kB requests. We
158
could try something larger (openssh supports 64kB), but then we have to
159
handle requests that fail.
160
So instead, we just break up our maximum chunks into 32kB chunks, and
161
asyncronously requests them.
162
Newer versions of paramiko would do the chunking for us, but we want to
163
start processing results right away, so we do it ourselves.
165
# TODO: Because we issue async requests, we don't 'fudge' any extra
166
# data. I'm not 100% sure that is the best choice.
168
# The first thing we do, is to collapse the individual requests as much
169
# as possible, so we don't issues requests <32kB
170
sorted_offsets = sorted(self.original_offsets)
171
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
172
limit=0, fudge_factor=0))
174
for c_offset in coalesced:
175
start = c_offset.start
176
size = c_offset.length
178
# Break this up into 32kB requests
180
next_size = min(size, self._max_request_size)
181
requests.append((start, next_size))
184
if 'sftp' in debug.debug_flags:
185
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
186
self.relpath, len(sorted_offsets), len(coalesced),
190
def request_and_yield_offsets(self, fp):
191
"""Request the data from the remote machine, yielding the results.
193
:param fp: A Paramiko SFTPFile object that supports readv.
194
:return: Yield the data requested by the original readv caller, one by
197
requests = self._get_requests()
198
offset_iter = iter(self.original_offsets)
199
cur_offset, cur_size = offset_iter.next()
200
# paramiko .readv() yields strings that are in the order of the requests
201
# So we track the current request to know where the next data is
202
# being returned from.
208
# This is used to buffer chunks which we couldn't process yet
209
# It is (start, end, data) tuples.
211
# Create an 'unlimited' data stream, so we stop based on requests,
212
# rather than just because the data stream ended. This lets us detect
214
data_stream = itertools.chain(fp.readv(requests),
215
itertools.repeat(None))
216
for (start, length), data in itertools.izip(requests, data_stream):
218
if cur_coalesced is not None:
219
raise errors.ShortReadvError(self.relpath,
220
start, length, len(data))
221
if len(data) != length:
222
raise errors.ShortReadvError(self.relpath,
223
start, length, len(data))
224
self._report_activity(length, 'read')
226
# This is the first request, just buffer it
227
buffered_data = [data]
228
buffered_len = length
230
elif start == last_end:
231
# The data we are reading fits neatly on the previous
232
# buffer, so this is all part of a larger coalesced range.
233
buffered_data.append(data)
234
buffered_len += length
236
# We have an 'interrupt' in the data stream. So we know we are
237
# at a request boundary.
239
# We haven't consumed the buffer so far, so put it into
240
# data_chunks, and continue.
241
buffered = ''.join(buffered_data)
242
data_chunks.append((input_start, buffered))
244
buffered_data = [data]
245
buffered_len = length
246
last_end = start + length
247
if input_start == cur_offset and cur_size <= buffered_len:
248
# Simplify the next steps a bit by transforming buffered_data
249
# into a single string. We also have the nice property that
250
# when there is only one string ''.join([x]) == x, so there is
252
buffered = ''.join(buffered_data)
253
# Clean out buffered data so that we keep memory
257
# TODO: We *could* also consider the case where cur_offset is in
258
# in the buffered range, even though it doesn't *start*
259
# the buffered range. But for packs we pretty much always
260
# read in order, so you won't get any extra data in the
262
while (input_start == cur_offset
263
and (buffered_offset + cur_size) <= buffered_len):
264
# We've buffered enough data to process this request, spit it
266
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
267
# move the direct pointer into our buffered data
268
buffered_offset += cur_size
269
# Move the start-of-buffer pointer
270
input_start += cur_size
271
# Yield the requested data
272
yield cur_offset, cur_data
273
cur_offset, cur_size = offset_iter.next()
274
# at this point, we've consumed as much of buffered as we can,
275
# so break off the portion that we consumed
276
if buffered_offset == len(buffered_data):
277
# No tail to leave behind
281
buffered = buffered[buffered_offset:]
282
buffered_data = [buffered]
283
buffered_len = len(buffered)
285
buffered = ''.join(buffered_data)
287
data_chunks.append((input_start, buffered))
289
if 'sftp' in debug.debug_flags:
290
mutter('SFTP readv left with %d out-of-order bytes',
291
sum(map(lambda x: len(x[1]), data_chunks)))
292
# We've processed all the readv data, at this point, anything we
293
# couldn't process is in data_chunks. This doesn't happen often, so
294
# this code path isn't optimized
295
# We use an interesting process for data_chunks
296
# Specifically if we have "bisect_left([(start, len, entries)],
298
# If start == qstart, then we get the specific node. Otherwise we
299
# get the previous node
301
idx = bisect.bisect_left(data_chunks, (cur_offset,))
302
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
303
# The data starts here
304
data = data_chunks[idx][1][:cur_size]
306
# The data is in a portion of a previous page
308
sub_offset = cur_offset - data_chunks[idx][0]
309
data = data_chunks[idx][1]
310
data = data[sub_offset:sub_offset + cur_size]
312
# We are missing the page where the data should be found,
315
if len(data) != cur_size:
316
raise AssertionError('We must have miscalulated.'
317
' We expected %d bytes, but only found %d'
318
% (cur_size, len(data)))
319
yield cur_offset, data
320
cur_offset, cur_size = offset_iter.next()
323
class SFTPTransport(ConnectedTransport):
134
class SFTPUrlHandling(Transport):
135
"""Mix-in that does common handling of SSH/SFTP URLs."""
137
def __init__(self, base):
138
self._parse_url(base)
139
base = self._unparse_url(self._path)
142
super(SFTPUrlHandling, self).__init__(base)
144
def _parse_url(self, url):
146
self._username, self._password,
147
self._host, self._port, self._path) = self._split_url(url)
149
def _unparse_url(self, path):
150
"""Return a URL for a path relative to this transport.
152
path = urllib.quote(path)
153
# handle homedir paths
154
if not path.startswith('/'):
156
netloc = urllib.quote(self._host)
157
if self._username is not None:
158
netloc = '%s@%s' % (urllib.quote(self._username), netloc)
159
if self._port is not None:
160
netloc = '%s:%d' % (netloc, self._port)
161
return urlparse.urlunparse((self._scheme, netloc, path, '', '', ''))
163
def _split_url(self, url):
164
(scheme, username, password, host, port, path) = split_url(url)
165
## assert scheme == 'sftp'
167
# the initial slash should be removed from the path, and treated
168
# as a homedir relative path (the path begins with a double slash
169
# if it is absolute).
170
# see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
171
# RBC 20060118 we are not using this as its too user hostile. instead
172
# we are following lftp and using /~/foo to mean '~/foo'.
173
# handle homedir paths
174
if path.startswith('/~/'):
178
return (scheme, username, password, host, port, path)
180
def abspath(self, relpath):
181
"""Return the full url to the given relative path.
183
@param relpath: the relative path or path components
184
@type relpath: str or list
186
return self._unparse_url(self._remote_path(relpath))
188
def _remote_path(self, relpath):
189
"""Return the path to be passed along the sftp protocol for relpath.
191
:param relpath: is a urlencoded string.
193
return self._combine_paths(self._path, relpath)
196
class SFTPTransport(SFTPUrlHandling):
324
197
"""Transport implementation for SFTP access."""
326
199
_do_prefetch = _default_do_prefetch
341
214
# up the request itself, rather than us having to worry about it
342
215
_max_request_size = 32768
344
def __init__(self, base, _from_transport=None):
345
super(SFTPTransport, self).__init__(base,
346
_from_transport=_from_transport)
217
def __init__(self, base, clone_from=None):
218
super(SFTPTransport, self).__init__(base)
219
if clone_from is None:
222
# use the same ssh connection, etc
223
self._sftp = clone_from._sftp
224
# super saves 'self.base'
226
def should_cache(self):
228
Return True if the data pulled across should be cached locally.
232
def clone(self, offset=None):
234
Return a new SFTPTransport with root at self.base + offset.
235
We share the same SFTP session between such transports, because it's
236
fairly expensive to set them up.
239
return SFTPTransport(self.base, self)
241
return SFTPTransport(self.abspath(offset), self)
348
243
def _remote_path(self, relpath):
349
244
"""Return the path to be passed along the sftp protocol for relpath.
351
:param relpath: is a urlencoded string.
353
relative = urlutils.unescape(relpath).encode('utf-8')
354
remote_path = self._combine_paths(self._path, relative)
355
# the initial slash should be removed from the path, and treated as a
356
# homedir relative path (the path begins with a double slash if it is
357
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
358
# RBC 20060118 we are not using this as its too user hostile. instead
359
# we are following lftp and using /~/foo to mean '~/foo'
360
# vila--20070602 and leave absolute paths begin with a single slash.
361
if remote_path.startswith('/~/'):
362
remote_path = remote_path[3:]
363
elif remote_path == '/~':
367
def _create_connection(self, credentials=None):
368
"""Create a new connection with the provided credentials.
370
:param credentials: The credentials needed to establish the connection.
372
:return: The created connection and its associated credentials.
374
The credentials are only the password as it may have been entered
375
interactively by the user and may be different from the one provided
376
in base url at transport creation time.
378
if credentials is None:
379
password = self._password
381
password = credentials
383
vendor = ssh._get_ssh_vendor()
386
auth = config.AuthenticationConfig()
387
user = auth.get_user('ssh', self._host, self._port)
388
connection = vendor.connect_sftp(self._user, password,
389
self._host, self._port)
390
return connection, (user, password)
393
"""Ensures that a connection is established"""
394
connection = self._get_connection()
395
if connection is None:
396
# First connection ever
397
connection, credentials = self._create_connection()
398
self._set_connection(connection, credentials)
246
relpath is a urlencoded string.
248
:return: a path prefixed with / for regular abspath-based urls, or a
249
path that does not begin with / for urls which begin with /~/.
251
# FIXME: share the common code across transports
252
assert isinstance(relpath, basestring)
253
basepath = self._path.split('/')
254
if relpath.startswith('/'):
256
relpath = urlutils.unescape(relpath).split('/')
257
if len(basepath) > 0 and basepath[-1] == '':
258
basepath = basepath[:-1]
262
if len(basepath) == 0:
263
# In most filesystems, a request for the parent
264
# of root, just returns root.
272
path = '/'.join(basepath)
273
# mutter('relpath => remotepath %s => %s', relpath, path)
276
def relpath(self, abspath):
277
scheme, username, password, host, port, path = self._split_url(abspath)
279
if (username != self._username):
280
error.append('username mismatch')
281
if (host != self._host):
282
error.append('host mismatch')
283
if (port != self._port):
284
error.append('port mismatch')
285
if (not path.startswith(self._path)):
286
error.append('path mismatch')
288
extra = ': ' + ', '.join(error)
289
raise PathNotChild(abspath, self.base, extra=extra)
291
return path[pl:].strip('/')
401
293
def has(self, relpath):
403
295
Does the target location exist?
406
self._get_sftp().stat(self._remote_path(relpath))
407
# stat result is about 20 bytes, let's say
408
self._report_activity(20, 'read')
298
self._sftp.stat(self._remote_path(relpath))
413
303
def get(self, relpath):
414
"""Get the file at the given relative path.
305
Get the file at the given relative path.
416
307
:param relpath: The relative path to the file
419
# FIXME: by returning the file directly, we don't pass this
420
# through to report_activity. We could try wrapping the object
421
# before it's returned. For readv and get_bytes it's handled in
422
# the higher-level function.
424
310
path = self._remote_path(relpath)
425
f = self._get_sftp().file(path, mode='rb')
311
f = self._sftp.file(path, mode='rb')
426
312
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
429
315
except (IOError, paramiko.SSHException), e:
430
self._translate_io_exception(e, path, ': error retrieving',
431
failure_exc=errors.ReadError)
433
def get_bytes(self, relpath):
434
# reimplement this here so that we can report how many bytes came back
435
f = self.get(relpath)
438
self._report_activity(len(bytes), 'read')
443
def _readv(self, relpath, offsets):
316
self._translate_io_exception(e, path, ': error retrieving')
318
def readv(self, relpath, offsets):
444
319
"""See Transport.readv()"""
445
320
# We overload the default readv() because we want to use a file
446
321
# that does not have prefetch enabled.
452
327
path = self._remote_path(relpath)
453
fp = self._get_sftp().file(path, mode='rb')
328
fp = self._sftp.file(path, mode='rb')
454
329
readv = getattr(fp, 'readv', None)
456
return self._sftp_readv(fp, offsets, relpath)
457
if 'sftp' in debug.debug_flags:
458
mutter('seek and read %s offsets', len(offsets))
459
return self._seek_and_read(fp, offsets, relpath)
331
return self._sftp_readv(fp, offsets)
332
mutter('seek and read %s offsets', len(offsets))
333
return self._seek_and_read(fp, offsets)
460
334
except (IOError, paramiko.SSHException), e:
461
335
self._translate_io_exception(e, path, ': error retrieving')
463
def recommended_page_size(self):
464
"""See Transport.recommended_page_size().
466
For SFTP we suggest a large page size to reduce the overhead
467
introduced by latency.
471
def _sftp_readv(self, fp, offsets, relpath):
337
def _sftp_readv(self, fp, offsets):
472
338
"""Use the readv() member of fp to do async readv.
474
Then read them using paramiko.readv(). paramiko.readv()
340
And then read them using paramiko.readv(). paramiko.readv()
475
341
does not support ranges > 64K, so it caps the request size, and
476
just reads until it gets all the stuff it wants.
342
just reads until it gets all the stuff it wants
478
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
479
return helper.request_and_yield_offsets(fp)
344
offsets = list(offsets)
345
sorted_offsets = sorted(offsets)
347
# The algorithm works as follows:
348
# 1) Coalesce nearby reads into a single chunk
349
# This generates a list of combined regions, the total size
350
# and the size of the sub regions. This coalescing step is limited
351
# in the number of nearby chunks to combine, and is allowed to
352
# skip small breaks in the requests. Limiting it makes sure that
353
# we can start yielding some data earlier, and skipping means we
354
# make fewer requests. (Beneficial even when using async)
355
# 2) Break up this combined regions into chunks that are smaller
356
# than 64KiB. Technically the limit is 65536, but we are a
357
# little bit conservative. This is because sftp has a maximum
358
# return chunk size of 64KiB (max size of an unsigned short)
359
# 3) Issue a readv() to paramiko to create an async request for
361
# 4) Read in the data as it comes back, until we've read one
362
# continuous section as determined in step 1
363
# 5) Break up the full sections into hunks for the original requested
364
# offsets. And put them in a cache
365
# 6) Check if the next request is in the cache, and if it is, remove
366
# it from the cache, and yield its data. Continue until no more
367
# entries are in the cache.
368
# 7) loop back to step 4 until all data has been read
370
# TODO: jam 20060725 This could be optimized one step further, by
371
# attempting to yield whatever data we have read, even before
372
# the first coallesced section has been fully processed.
374
# When coalescing for use with readv(), we don't really need to
375
# use any fudge factor, because the requests are made asynchronously
376
coalesced = list(self._coalesce_offsets(sorted_offsets,
377
limit=self._max_readv_combine,
381
for c_offset in coalesced:
382
start = c_offset.start
383
size = c_offset.length
385
# We need to break this up into multiple requests
387
next_size = min(size, self._max_request_size)
388
requests.append((start, next_size))
392
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
393
len(offsets), len(coalesced), len(requests))
395
# Queue the current read until we have read the full coalesced section
398
cur_coalesced_stack = iter(coalesced)
399
cur_coalesced = cur_coalesced_stack.next()
401
# Cache the results, but only until they have been fulfilled
403
# turn the list of offsets into a stack
404
offset_stack = iter(offsets)
405
cur_offset_and_size = offset_stack.next()
407
for data in fp.readv(requests):
409
cur_data_len += len(data)
411
if cur_data_len < cur_coalesced.length:
413
assert cur_data_len == cur_coalesced.length, \
414
"Somehow we read too much: %s != %s" % (cur_data_len,
415
cur_coalesced.length)
416
all_data = ''.join(cur_data)
420
for suboffset, subsize in cur_coalesced.ranges:
421
key = (cur_coalesced.start+suboffset, subsize)
422
data_map[key] = all_data[suboffset:suboffset+subsize]
424
# Now that we've read some data, see if we can yield anything back
425
while cur_offset_and_size in data_map:
426
this_data = data_map.pop(cur_offset_and_size)
427
yield cur_offset_and_size[0], this_data
428
cur_offset_and_size = offset_stack.next()
430
# Now that we've read all of the data for this coalesced section
432
cur_coalesced = cur_coalesced_stack.next()
481
434
def put_file(self, relpath, f, mode=None):
793
# ------------- server test implementation --------------
796
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
798
STUB_SERVER_KEY = """
799
-----BEGIN RSA PRIVATE KEY-----
800
MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
801
oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
802
d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
803
gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
804
EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
805
soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
806
tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
807
avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
808
4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
809
H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
810
qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
811
HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
812
nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
813
-----END RSA PRIVATE KEY-----
817
class SocketListener(threading.Thread):
819
def __init__(self, callback):
820
threading.Thread.__init__(self)
821
self._callback = callback
822
self._socket = socket.socket()
823
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
824
self._socket.bind(('localhost', 0))
825
self._socket.listen(1)
826
self.port = self._socket.getsockname()[1]
827
self._stop_event = threading.Event()
830
# called from outside this thread
831
self._stop_event.set()
832
# use a timeout here, because if the test fails, the server thread may
833
# never notice the stop_event.
839
readable, writable_unused, exception_unused = \
840
select.select([self._socket], [], [], 0.1)
841
if self._stop_event.isSet():
843
if len(readable) == 0:
846
s, addr_unused = self._socket.accept()
847
# because the loopback socket is inline, and transports are
848
# never explicitly closed, best to launch a new thread.
849
threading.Thread(target=self._callback, args=(s,)).start()
850
except socket.error, x:
851
sys.excepthook(*sys.exc_info())
852
warning('Socket error during accept() within unit test server'
855
# probably a failed test; unit test thread will log the
857
sys.excepthook(*sys.exc_info())
858
warning('Exception from within unit test server thread: %r' %
862
class SocketDelay(object):
863
"""A socket decorator to make TCP appear slower.
865
This changes recv, send, and sendall to add a fixed latency to each python
866
call if a new roundtrip is detected. That is, when a recv is called and the
867
flag new_roundtrip is set, latency is charged. Every send and send_all
870
In addition every send, sendall and recv sleeps a bit per character send to
873
Not all methods are implemented, this is deliberate as this class is not a
874
replacement for the builtin sockets layer. fileno is not implemented to
875
prevent the proxy being bypassed.
879
_proxied_arguments = dict.fromkeys([
880
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
881
"setblocking", "setsockopt", "settimeout", "shutdown"])
883
def __init__(self, sock, latency, bandwidth=1.0,
886
:param bandwith: simulated bandwith (MegaBit)
887
:param really_sleep: If set to false, the SocketDelay will just
888
increase a counter, instead of calling time.sleep. This is useful for
889
unittesting the SocketDelay.
892
self.latency = latency
893
self.really_sleep = really_sleep
894
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
895
self.new_roundtrip = False
898
if self.really_sleep:
901
SocketDelay.simulated_time += s
903
def __getattr__(self, attr):
904
if attr in SocketDelay._proxied_arguments:
905
return getattr(self.sock, attr)
906
raise AttributeError("'SocketDelay' object has no attribute %r" %
910
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
913
def recv(self, *args):
914
data = self.sock.recv(*args)
915
if data and self.new_roundtrip:
916
self.new_roundtrip = False
917
self.sleep(self.latency)
918
self.sleep(len(data) * self.time_per_byte)
921
def sendall(self, data, flags=0):
922
if not self.new_roundtrip:
923
self.new_roundtrip = True
924
self.sleep(self.latency)
925
self.sleep(len(data) * self.time_per_byte)
926
return self.sock.sendall(data, flags)
928
def send(self, data, flags=0):
929
if not self.new_roundtrip:
930
self.new_roundtrip = True
931
self.sleep(self.latency)
932
bytes_sent = self.sock.send(data, flags)
933
self.sleep(bytes_sent * self.time_per_byte)
937
class SFTPServer(Server):
938
"""Common code for SFTP server facilities."""
941
self._original_vendor = None
943
self._server_homedir = None
944
self._listener = None
946
self._vendor = ssh.ParamikoVendor()
951
def _get_sftp_url(self, path):
952
"""Calculate an sftp url to this server for path."""
953
return 'sftp://foo:bar@localhost:%d/%s' % (self._listener.port, path)
955
def log(self, message):
956
"""StubServer uses this to log when a new server is created."""
957
self.logs.append(message)
959
def _run_server_entry(self, sock):
960
"""Entry point for all implementations of _run_server.
962
If self.add_latency is > 0.000001 then sock is given a latency adding
965
if self.add_latency > 0.000001:
966
sock = SocketDelay(sock, self.add_latency)
967
return self._run_server(sock)
969
def _run_server(self, s):
970
ssh_server = paramiko.Transport(s)
971
key_file = pathjoin(self._homedir, 'test_rsa.key')
972
f = open(key_file, 'w')
973
f.write(STUB_SERVER_KEY)
975
host_key = paramiko.RSAKey.from_private_key_file(key_file)
976
ssh_server.add_server_key(host_key)
977
server = StubServer(self)
978
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
979
StubSFTPServer, root=self._root,
980
home=self._server_homedir)
981
event = threading.Event()
982
ssh_server.start_server(event, server)
986
self._original_vendor = ssh._ssh_vendor
987
ssh._ssh_vendor = self._vendor
988
if sys.platform == 'win32':
989
# Win32 needs to use the UNICODE api
990
self._homedir = getcwd()
992
# But Linux SFTP servers should just deal in bytestreams
993
self._homedir = os.getcwd()
994
if self._server_homedir is None:
995
self._server_homedir = self._homedir
997
if sys.platform == 'win32':
999
self._listener = SocketListener(self._run_server_entry)
1000
self._listener.setDaemon(True)
1001
self._listener.start()
1004
"""See bzrlib.transport.Server.tearDown."""
1005
self._listener.stop()
1006
ssh._ssh_vendor = self._original_vendor
1008
def get_bogus_url(self):
1009
"""See bzrlib.transport.Server.get_bogus_url."""
1010
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
1011
# we bind a random socket, so that we get a guaranteed unused port
1012
# we just never listen on that port
1014
s.bind(('localhost', 0))
1015
return 'sftp://%s:%s/' % s.getsockname()
1018
class SFTPFullAbsoluteServer(SFTPServer):
1019
"""A test server for sftp transports, using absolute urls and ssh."""
1022
"""See bzrlib.transport.Server.get_url."""
1023
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
1026
class SFTPServerWithoutSSH(SFTPServer):
1027
"""An SFTP server that uses a simple TCP socket pair rather than SSH."""
1030
super(SFTPServerWithoutSSH, self).__init__()
1031
self._vendor = ssh.LoopbackVendor()
1033
def _run_server(self, sock):
1034
# Re-import these as locals, so that they're still accessible during
1035
# interpreter shutdown (when all module globals get set to None, leading
1036
# to confusing errors like "'NoneType' object has no attribute 'error'".
1037
class FakeChannel(object):
1038
def get_transport(self):
1040
def get_log_channel(self):
1044
def get_hexdump(self):
1049
server = paramiko.SFTPServer(FakeChannel(), 'sftp', StubServer(self), StubSFTPServer,
1050
root=self._root, home=self._server_homedir)
1052
server.start_subsystem('sftp', None, sock)
1053
except socket.error, e:
1054
if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
1055
# it's okay for the client to disconnect abruptly
1056
# (bug in paramiko 1.6: it should absorb this exception)
1060
except Exception, e:
1061
import sys; sys.stderr.write('\nEXCEPTION %r\n\n' % e.__class__)
1062
server.finish_subsystem()
1065
class SFTPAbsoluteServer(SFTPServerWithoutSSH):
1066
"""A test server for sftp transports, using absolute urls."""
1069
"""See bzrlib.transport.Server.get_url."""
1070
if sys.platform == 'win32':
1071
return self._get_sftp_url(urlutils.escape(self._homedir))
1073
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
1076
class SFTPHomeDirServer(SFTPServerWithoutSSH):
1077
"""A test server for sftp transports, using homedir relative urls."""
1080
"""See bzrlib.transport.Server.get_url."""
1081
return self._get_sftp_url("~/")
1084
class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
1085
"""A test servere for sftp transports, using absolute urls to non-home."""
1088
self._server_homedir = '/dev/noone/runs/tests/here'
1089
super(SFTPSiblingAbsoluteServer, self).setUp()
1092
def _sftp_connect(host, port, username, password):
1093
"""Connect to the remote sftp server.
1095
:raises: a TransportError 'could not connect'.
1097
:returns: an paramiko.sftp_client.SFTPClient
1099
TODO: Raise a more reasonable ConnectionFailed exception
1101
idx = (host, port, username)
1103
return _connected_hosts[idx]
1107
sftp = _sftp_connect_uncached(host, port, username, password)
1108
_connected_hosts[idx] = sftp
1111
def _sftp_connect_uncached(host, port, username, password):
1112
vendor = ssh._get_ssh_vendor()
1113
sftp = vendor.connect_sftp(username, password, host, port)
908
1117
def get_test_permutations():
909
1118
"""Return the permutations to be used in testing."""
910
from bzrlib.tests import stub_sftp
911
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
912
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
913
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),
1119
return [(SFTPTransport, SFTPAbsoluteServer),
1120
(SFTPTransport, SFTPHomeDirServer),
1121
(SFTPTransport, SFTPSiblingAbsoluteServer),