135
class _SFTPReadvHelper(object):
136
"""A class to help with managing the state of a readv request."""
138
# See _get_requests for an explanation.
139
_max_request_size = 32768
141
def __init__(self, original_offsets, relpath, _report_activity):
142
"""Create a new readv helper.
144
:param original_offsets: The original requests given by the caller of
146
:param relpath: The name of the file (if known)
147
:param _report_activity: A Transport._report_activity bound method,
148
to be called as data arrives.
150
self.original_offsets = list(original_offsets)
151
self.relpath = relpath
152
self._report_activity = _report_activity
154
def _get_requests(self):
155
"""Break up the offsets into individual requests over sftp.
157
The SFTP spec only requires implementers to support 32kB requests. We
158
could try something larger (openssh supports 64kB), but then we have to
159
handle requests that fail.
160
So instead, we just break up our maximum chunks into 32kB chunks, and
161
asyncronously requests them.
162
Newer versions of paramiko would do the chunking for us, but we want to
163
start processing results right away, so we do it ourselves.
165
# TODO: Because we issue async requests, we don't 'fudge' any extra
166
# data. I'm not 100% sure that is the best choice.
168
# The first thing we do, is to collapse the individual requests as much
169
# as possible, so we don't issues requests <32kB
170
sorted_offsets = sorted(self.original_offsets)
171
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
172
limit=0, fudge_factor=0))
174
for c_offset in coalesced:
175
start = c_offset.start
176
size = c_offset.length
178
# Break this up into 32kB requests
180
next_size = min(size, self._max_request_size)
181
requests.append((start, next_size))
184
if 'sftp' in debug.debug_flags:
185
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
186
self.relpath, len(sorted_offsets), len(coalesced),
190
def request_and_yield_offsets(self, fp):
191
"""Request the data from the remote machine, yielding the results.
193
:param fp: A Paramiko SFTPFile object that supports readv.
194
:return: Yield the data requested by the original readv caller, one by
197
requests = self._get_requests()
198
offset_iter = iter(self.original_offsets)
199
cur_offset, cur_size = offset_iter.next()
200
# paramiko .readv() yields strings that are in the order of the requests
201
# So we track the current request to know where the next data is
202
# being returned from.
208
# This is used to buffer chunks which we couldn't process yet
209
# It is (start, end, data) tuples.
211
# Create an 'unlimited' data stream, so we stop based on requests,
212
# rather than just because the data stream ended. This lets us detect
214
data_stream = itertools.chain(fp.readv(requests),
215
itertools.repeat(None))
216
for (start, length), data in itertools.izip(requests, data_stream):
218
if cur_coalesced is not None:
219
raise errors.ShortReadvError(self.relpath,
220
start, length, len(data))
221
if len(data) != length:
222
raise errors.ShortReadvError(self.relpath,
223
start, length, len(data))
224
self._report_activity(length, 'read')
226
# This is the first request, just buffer it
227
buffered_data = [data]
228
buffered_len = length
230
elif start == last_end:
231
# The data we are reading fits neatly on the previous
232
# buffer, so this is all part of a larger coalesced range.
233
buffered_data.append(data)
234
buffered_len += length
236
# We have an 'interrupt' in the data stream. So we know we are
237
# at a request boundary.
239
# We haven't consumed the buffer so far, so put it into
240
# data_chunks, and continue.
241
buffered = ''.join(buffered_data)
242
data_chunks.append((input_start, buffered))
244
buffered_data = [data]
245
buffered_len = length
246
last_end = start + length
247
if input_start == cur_offset and cur_size <= buffered_len:
248
# Simplify the next steps a bit by transforming buffered_data
249
# into a single string. We also have the nice property that
250
# when there is only one string ''.join([x]) == x, so there is
252
buffered = ''.join(buffered_data)
253
# Clean out buffered data so that we keep memory
257
# TODO: We *could* also consider the case where cur_offset is in
258
# in the buffered range, even though it doesn't *start*
259
# the buffered range. But for packs we pretty much always
260
# read in order, so you won't get any extra data in the
262
while (input_start == cur_offset
263
and (buffered_offset + cur_size) <= buffered_len):
264
# We've buffered enough data to process this request, spit it
266
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
267
# move the direct pointer into our buffered data
268
buffered_offset += cur_size
269
# Move the start-of-buffer pointer
270
input_start += cur_size
271
# Yield the requested data
272
yield cur_offset, cur_data
273
cur_offset, cur_size = offset_iter.next()
274
# at this point, we've consumed as much of buffered as we can,
275
# so break off the portion that we consumed
276
if buffered_offset == len(buffered_data):
277
# No tail to leave behind
281
buffered = buffered[buffered_offset:]
282
buffered_data = [buffered]
283
buffered_len = len(buffered)
285
buffered = ''.join(buffered_data)
287
data_chunks.append((input_start, buffered))
289
if 'sftp' in debug.debug_flags:
290
mutter('SFTP readv left with %d out-of-order bytes',
291
sum(map(lambda x: len(x[1]), data_chunks)))
292
# We've processed all the readv data, at this point, anything we
293
# couldn't process is in data_chunks. This doesn't happen often, so
294
# this code path isn't optimized
295
# We use an interesting process for data_chunks
296
# Specifically if we have "bisect_left([(start, len, entries)],
298
# If start == qstart, then we get the specific node. Otherwise we
299
# get the previous node
301
idx = bisect.bisect_left(data_chunks, (cur_offset,))
302
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
303
# The data starts here
304
data = data_chunks[idx][1][:cur_size]
306
# The data is in a portion of a previous page
308
sub_offset = cur_offset - data_chunks[idx][0]
309
data = data_chunks[idx][1]
310
data = data[sub_offset:sub_offset + cur_size]
312
# We are missing the page where the data should be found,
315
if len(data) != cur_size:
316
raise AssertionError('We must have miscalulated.'
317
' We expected %d bytes, but only found %d'
318
% (cur_size, len(data)))
319
yield cur_offset, data
320
cur_offset, cur_size = offset_iter.next()
323
class SFTPTransport(ConnectedTransport):
122
class SFTPTransport(Transport):
324
123
"""Transport implementation for SFTP access."""
326
125
_do_prefetch = _default_do_prefetch
341
140
# up the request itself, rather than us having to worry about it
342
141
_max_request_size = 32768
344
def __init__(self, base, _from_transport=None):
345
super(SFTPTransport, self).__init__(base,
346
_from_transport=_from_transport)
143
def __init__(self, base, clone_from=None):
144
assert base.startswith('sftp://')
145
self._parse_url(base)
146
base = self._unparse_url()
149
super(SFTPTransport, self).__init__(base)
150
if clone_from is None:
153
# use the same ssh connection, etc
154
self._sftp = clone_from._sftp
155
# super saves 'self.base'
157
def should_cache(self):
159
Return True if the data pulled across should be cached locally.
163
def clone(self, offset=None):
165
Return a new SFTPTransport with root at self.base + offset.
166
We share the same SFTP session between such transports, because it's
167
fairly expensive to set them up.
170
return SFTPTransport(self.base, self)
172
return SFTPTransport(self.abspath(offset), self)
174
def abspath(self, relpath):
176
Return the full url to the given relative path.
178
@param relpath: the relative path or path components
179
@type relpath: str or list
181
return self._unparse_url(self._remote_path(relpath))
348
183
def _remote_path(self, relpath):
349
184
"""Return the path to be passed along the sftp protocol for relpath.
351
:param relpath: is a urlencoded string.
353
relative = urlutils.unescape(relpath).encode('utf-8')
354
remote_path = self._combine_paths(self._path, relative)
355
# the initial slash should be removed from the path, and treated as a
356
# homedir relative path (the path begins with a double slash if it is
357
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
358
# RBC 20060118 we are not using this as its too user hostile. instead
359
# we are following lftp and using /~/foo to mean '~/foo'
360
# vila--20070602 and leave absolute paths begin with a single slash.
361
if remote_path.startswith('/~/'):
362
remote_path = remote_path[3:]
363
elif remote_path == '/~':
367
def _create_connection(self, credentials=None):
368
"""Create a new connection with the provided credentials.
370
:param credentials: The credentials needed to establish the connection.
372
:return: The created connection and its associated credentials.
374
The credentials are only the password as it may have been entered
375
interactively by the user and may be different from the one provided
376
in base url at transport creation time.
378
if credentials is None:
379
password = self._password
381
password = credentials
383
vendor = ssh._get_ssh_vendor()
386
auth = config.AuthenticationConfig()
387
user = auth.get_user('ssh', self._host, self._port)
388
connection = vendor.connect_sftp(self._user, password,
389
self._host, self._port)
390
return connection, (user, password)
393
"""Ensures that a connection is established"""
394
connection = self._get_connection()
395
if connection is None:
396
# First connection ever
397
connection, credentials = self._create_connection()
398
self._set_connection(connection, credentials)
186
relpath is a urlencoded string.
188
# FIXME: share the common code across transports
189
assert isinstance(relpath, basestring)
190
relpath = urlutils.unescape(relpath).split('/')
191
basepath = self._path.split('/')
192
if len(basepath) > 0 and basepath[-1] == '':
193
basepath = basepath[:-1]
197
if len(basepath) == 0:
198
# In most filesystems, a request for the parent
199
# of root, just returns root.
207
path = '/'.join(basepath)
208
# mutter('relpath => remotepath %s => %s', relpath, path)
211
def relpath(self, abspath):
212
username, password, host, port, path = self._split_url(abspath)
214
if (username != self._username):
215
error.append('username mismatch')
216
if (host != self._host):
217
error.append('host mismatch')
218
if (port != self._port):
219
error.append('port mismatch')
220
if (not path.startswith(self._path)):
221
error.append('path mismatch')
223
extra = ': ' + ', '.join(error)
224
raise PathNotChild(abspath, self.base, extra=extra)
226
return path[pl:].strip('/')
401
228
def has(self, relpath):
403
230
Does the target location exist?
406
self._get_sftp().stat(self._remote_path(relpath))
407
# stat result is about 20 bytes, let's say
408
self._report_activity(20, 'read')
233
self._sftp.stat(self._remote_path(relpath))
413
238
def get(self, relpath):
414
"""Get the file at the given relative path.
240
Get the file at the given relative path.
416
242
:param relpath: The relative path to the file
419
# FIXME: by returning the file directly, we don't pass this
420
# through to report_activity. We could try wrapping the object
421
# before it's returned. For readv and get_bytes it's handled in
422
# the higher-level function.
424
245
path = self._remote_path(relpath)
425
f = self._get_sftp().file(path, mode='rb')
246
f = self._sftp.file(path, mode='rb')
426
247
if self._do_prefetch and (getattr(f, 'prefetch', None) is not None):
429
250
except (IOError, paramiko.SSHException), e:
430
self._translate_io_exception(e, path, ': error retrieving',
431
failure_exc=errors.ReadError)
433
def get_bytes(self, relpath):
434
# reimplement this here so that we can report how many bytes came back
435
f = self.get(relpath)
438
self._report_activity(len(bytes), 'read')
443
def _readv(self, relpath, offsets):
251
self._translate_io_exception(e, path, ': error retrieving')
253
def readv(self, relpath, offsets):
444
254
"""See Transport.readv()"""
445
255
# We overload the default readv() because we want to use a file
446
256
# that does not have prefetch enabled.
452
262
path = self._remote_path(relpath)
453
fp = self._get_sftp().file(path, mode='rb')
263
fp = self._sftp.file(path, mode='rb')
454
264
readv = getattr(fp, 'readv', None)
456
return self._sftp_readv(fp, offsets, relpath)
457
if 'sftp' in debug.debug_flags:
458
mutter('seek and read %s offsets', len(offsets))
459
return self._seek_and_read(fp, offsets, relpath)
266
return self._sftp_readv(fp, offsets)
267
mutter('seek and read %s offsets', len(offsets))
268
return self._seek_and_read(fp, offsets)
460
269
except (IOError, paramiko.SSHException), e:
461
270
self._translate_io_exception(e, path, ': error retrieving')
463
def recommended_page_size(self):
464
"""See Transport.recommended_page_size().
466
For SFTP we suggest a large page size to reduce the overhead
467
introduced by latency.
471
def _sftp_readv(self, fp, offsets, relpath):
272
def _sftp_readv(self, fp, offsets):
472
273
"""Use the readv() member of fp to do async readv.
474
Then read them using paramiko.readv(). paramiko.readv()
275
And then read them using paramiko.readv(). paramiko.readv()
475
276
does not support ranges > 64K, so it caps the request size, and
476
just reads until it gets all the stuff it wants.
478
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
479
return helper.request_and_yield_offsets(fp)
481
def put_file(self, relpath, f, mode=None):
483
Copy the file-like object into the location.
277
just reads until it gets all the stuff it wants
279
offsets = list(offsets)
280
sorted_offsets = sorted(offsets)
282
# The algorithm works as follows:
283
# 1) Coalesce nearby reads into a single chunk
284
# This generates a list of combined regions, the total size
285
# and the size of the sub regions. This coalescing step is limited
286
# in the number of nearby chunks to combine, and is allowed to
287
# skip small breaks in the requests. Limiting it makes sure that
288
# we can start yielding some data earlier, and skipping means we
289
# make fewer requests. (Beneficial even when using async)
290
# 2) Break up this combined regions into chunks that are smaller
291
# than 64KiB. Technically the limit is 65536, but we are a
292
# little bit conservative. This is because sftp has a maximum
293
# return chunk size of 64KiB (max size of an unsigned short)
294
# 3) Issue a readv() to paramiko to create an async request for
296
# 4) Read in the data as it comes back, until we've read one
297
# continuous section as determined in step 1
298
# 5) Break up the full sections into hunks for the original requested
299
# offsets. And put them in a cache
300
# 6) Check if the next request is in the cache, and if it is, remove
301
# it from the cache, and yield its data. Continue until no more
302
# entries are in the cache.
303
# 7) loop back to step 4 until all data has been read
305
# TODO: jam 20060725 This could be optimized one step further, by
306
# attempting to yield whatever data we have read, even before
307
# the first coallesced section has been fully processed.
309
# When coalescing for use with readv(), we don't really need to
310
# use any fudge factor, because the requests are made asynchronously
311
coalesced = list(self._coalesce_offsets(sorted_offsets,
312
limit=self._max_readv_combine,
316
for c_offset in coalesced:
317
start = c_offset.start
318
size = c_offset.length
320
# We need to break this up into multiple requests
322
next_size = min(size, self._max_request_size)
323
requests.append((start, next_size))
327
mutter('SFTP.readv() %s offsets => %s coalesced => %s requests',
328
len(offsets), len(coalesced), len(requests))
330
# Queue the current read until we have read the full coalesced section
333
cur_coalesced_stack = iter(coalesced)
334
cur_coalesced = cur_coalesced_stack.next()
336
# Cache the results, but only until they have been fulfilled
338
# turn the list of offsets into a stack
339
offset_stack = iter(offsets)
340
cur_offset_and_size = offset_stack.next()
342
for data in fp.readv(requests):
344
cur_data_len += len(data)
346
if cur_data_len < cur_coalesced.length:
348
assert cur_data_len == cur_coalesced.length, \
349
"Somehow we read too much: %s != %s" % (cur_data_len,
350
cur_coalesced.length)
351
all_data = ''.join(cur_data)
355
for suboffset, subsize in cur_coalesced.ranges:
356
key = (cur_coalesced.start+suboffset, subsize)
357
data_map[key] = all_data[suboffset:suboffset+subsize]
359
# Now that we've read some data, see if we can yield anything back
360
while cur_offset_and_size in data_map:
361
this_data = data_map.pop(cur_offset_and_size)
362
yield cur_offset_and_size[0], this_data
363
cur_offset_and_size = offset_stack.next()
365
# Now that we've read all of the data for this coalesced section
367
cur_coalesced = cur_coalesced_stack.next()
369
def put(self, relpath, f, mode=None):
371
Copy the file-like or string object into the location.
485
373
:param relpath: Location to put the contents, relative to base.
486
:param f: File-like object.
374
:param f: File-like or string object.
487
375
:param mode: The final mode for the file
489
377
final_path = self._remote_path(relpath)
490
return self._put(final_path, f, mode=mode)
378
self._put(final_path, f, mode=mode)
492
380
def _put(self, abspath, f, mode=None):
493
381
"""Helper function so both put() and copy_abspaths can reuse the code"""
534
self._get_sftp().remove(tmp_abspath)
408
self._sftp.remove(tmp_abspath)
536
410
# raise the saved except
538
412
# raise the original with its traceback if we can.
541
def _put_non_atomic_helper(self, relpath, writer, mode=None,
542
create_parent_dir=False,
544
abspath = self._remote_path(relpath)
546
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
547
# set the file mode at create time. If it does, use it.
548
# But for now, we just chmod later anyway.
550
def _open_and_write_file():
551
"""Try to open the target file, raise error on failure"""
555
fout = self._get_sftp().file(abspath, mode='wb')
556
fout.set_pipelined(True)
558
except (paramiko.SSHException, IOError), e:
559
self._translate_io_exception(e, abspath,
562
# This is designed to chmod() right before we close.
563
# Because we set_pipelined() earlier, theoretically we might
564
# avoid the round trip for fout.close()
566
self._get_sftp().chmod(abspath, mode)
571
if not create_parent_dir:
572
_open_and_write_file()
575
# Try error handling to create the parent directory if we need to
577
_open_and_write_file()
579
# Try to create the parent directory, and then go back to
581
parent_dir = os.path.dirname(abspath)
582
self._mkdir(parent_dir, dir_mode)
583
_open_and_write_file()
585
def put_file_non_atomic(self, relpath, f, mode=None,
586
create_parent_dir=False,
588
"""Copy the file-like object into the target location.
590
This function is not strictly safe to use. It is only meant to
591
be used when you already know that the target does not exist.
592
It is not safe, because it will open and truncate the remote
593
file. So there may be a time when the file has invalid contents.
595
:param relpath: The remote location to put the contents.
596
:param f: File-like object.
597
:param mode: Possible access permissions for new file.
598
None means do not set remote permissions.
599
:param create_parent_dir: If we cannot create the target file because
600
the parent directory does not exist, go ahead and
601
create it, and then try again.
605
self._put_non_atomic_helper(relpath, writer, mode=mode,
606
create_parent_dir=create_parent_dir,
609
def put_bytes_non_atomic(self, relpath, bytes, mode=None,
610
create_parent_dir=False,
614
self._put_non_atomic_helper(relpath, writer, mode=mode,
615
create_parent_dir=create_parent_dir,
618
415
def iter_files_recursive(self):
619
416
"""Walk the relative paths of all files in this transport."""
620
# progress is handled by list_dir
621
417
queue = list(self.list_dir('.'))
623
419
relpath = queue.pop(0)
631
def _mkdir(self, abspath, mode=None):
637
self._report_activity(len(abspath), 'write')
638
self._get_sftp().mkdir(abspath, local_mode)
639
self._report_activity(1, 'read')
641
# chmod a dir through sftp will erase any sgid bit set
642
# on the server side. So, if the bit mode are already
643
# set, avoid the chmod. If the mode is not fine but
644
# the sgid bit is set, report a warning to the user
645
# with the umask fix.
646
stat = self._get_sftp().lstat(abspath)
647
mode = mode & 0777 # can't set special bits anyway
648
if mode != stat.st_mode & 0777:
649
if stat.st_mode & 06000:
650
warning('About to chmod %s over sftp, which will result'
651
' in its suid or sgid bits being cleared. If'
652
' you want to preserve those bits, change your '
653
' environment on the server to use umask 0%03o.'
654
% (abspath, 0777 - mode))
655
self._get_sftp().chmod(abspath, mode=mode)
656
except (paramiko.SSHException, IOError), e:
657
self._translate_io_exception(e, abspath, ': unable to mkdir',
658
failure_exc=FileExists)
660
427
def mkdir(self, relpath, mode=None):
661
428
"""Create a directory at the given path."""
662
self._mkdir(self._remote_path(relpath), mode=mode)
664
def open_write_stream(self, relpath, mode=None):
665
"""See Transport.open_write_stream."""
666
# initialise the file to zero-length
667
# this is three round trips, but we don't use this
668
# api more than once per write_group at the moment so
669
# it is a tolerable overhead. Better would be to truncate
670
# the file after opening. RBC 20070805
671
self.put_bytes_non_atomic(relpath, "", mode)
672
abspath = self._remote_path(relpath)
673
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
674
# set the file mode at create time. If it does, use it.
675
# But for now, we just chmod later anyway.
429
path = self._remote_path(relpath)
678
handle = self._get_sftp().file(abspath, mode='wb')
679
handle.set_pipelined(True)
431
# In the paramiko documentation, it says that passing a mode flag
432
# will filtered against the server umask.
433
# StubSFTPServer does not do this, which would be nice, because it is
434
# what we really want :)
435
# However, real servers do use umask, so we really should do it that way
436
self._sftp.mkdir(path)
438
self._sftp.chmod(path, mode=mode)
680
439
except (paramiko.SSHException, IOError), e:
681
self._translate_io_exception(e, abspath,
683
_file_streams[self.abspath(relpath)] = handle
684
return FileFileStream(self, relpath, handle)
440
self._translate_io_exception(e, path, ': unable to mkdir',
441
failure_exc=FileExists)
686
def _translate_io_exception(self, e, path, more_info='',
443
def _translate_io_exception(self, e, path, more_info='',
687
444
failure_exc=PathError):
688
445
"""Translate a paramiko or IOError into a friendlier exception.
876
641
:param abspath: The remote absolute path where the file should be opened
877
642
:param mode: The mode permissions bits for the new file
879
# TODO: jam 20060816 Paramiko >= 1.6.2 (probably earlier) supports
880
# using the 'x' flag to indicate SFTP_FLAG_EXCL.
881
# However, there is no way to set the permission mode at open
882
# time using the sftp_client.file() functionality.
883
path = self._get_sftp()._adjust_cwd(abspath)
644
path = self._sftp._adjust_cwd(abspath)
884
645
# mutter('sftp abspath %s => %s', abspath, path)
885
646
attr = SFTPAttributes()
886
647
if mode is not None:
887
648
attr.st_mode = mode
888
omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
649
omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
889
650
| SFTP_FLAG_TRUNC | SFTP_FLAG_EXCL)
891
t, msg = self._get_sftp()._request(CMD_OPEN, path, omode, attr)
652
t, msg = self._sftp._request(CMD_OPEN, path, omode, attr)
892
653
if t != CMD_HANDLE:
893
654
raise TransportError('Expected an SFTP handle')
894
655
handle = msg.get_string()
895
return SFTPFile(self._get_sftp(), handle, 'wb', -1)
656
return SFTPFile(self._sftp, handle, 'wb', -1)
896
657
except (paramiko.SSHException, IOError), e:
897
658
self._translate_io_exception(e, abspath, ': unable to open',
898
659
failure_exc=FileExists)
900
def _can_roundtrip_unix_modebits(self):
901
if sys.platform == 'win32':
662
# ------------- server test implementation --------------
665
from bzrlib.tests.stub_sftp import StubServer, StubSFTPServer
667
STUB_SERVER_KEY = """
668
-----BEGIN RSA PRIVATE KEY-----
669
MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
670
oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
671
d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
672
gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
673
EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
674
soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
675
tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
676
avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
677
4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
678
H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
679
qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
680
HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
681
nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
682
-----END RSA PRIVATE KEY-----
686
class SocketListener(threading.Thread):
688
def __init__(self, callback):
689
threading.Thread.__init__(self)
690
self._callback = callback
691
self._socket = socket.socket()
692
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
693
self._socket.bind(('localhost', 0))
694
self._socket.listen(1)
695
self.port = self._socket.getsockname()[1]
696
self._stop_event = threading.Event()
699
# called from outside this thread
700
self._stop_event.set()
701
# use a timeout here, because if the test fails, the server thread may
702
# never notice the stop_event.
708
readable, writable_unused, exception_unused = \
709
select.select([self._socket], [], [], 0.1)
710
if self._stop_event.isSet():
712
if len(readable) == 0:
715
s, addr_unused = self._socket.accept()
716
# because the loopback socket is inline, and transports are
717
# never explicitly closed, best to launch a new thread.
718
threading.Thread(target=self._callback, args=(s,)).start()
719
except socket.error, x:
720
sys.excepthook(*sys.exc_info())
721
warning('Socket error during accept() within unit test server'
724
# probably a failed test; unit test thread will log the
726
sys.excepthook(*sys.exc_info())
727
warning('Exception from within unit test server thread: %r' %
731
class SocketDelay(object):
732
"""A socket decorator to make TCP appear slower.
734
This changes recv, send, and sendall to add a fixed latency to each python
735
call if a new roundtrip is detected. That is, when a recv is called and the
736
flag new_roundtrip is set, latency is charged. Every send and send_all
739
In addition every send, sendall and recv sleeps a bit per character send to
742
Not all methods are implemented, this is deliberate as this class is not a
743
replacement for the builtin sockets layer. fileno is not implemented to
744
prevent the proxy being bypassed.
748
_proxied_arguments = dict.fromkeys([
749
"close", "getpeername", "getsockname", "getsockopt", "gettimeout",
750
"setblocking", "setsockopt", "settimeout", "shutdown"])
752
def __init__(self, sock, latency, bandwidth=1.0,
755
:param bandwith: simulated bandwith (MegaBit)
756
:param really_sleep: If set to false, the SocketDelay will just
757
increase a counter, instead of calling time.sleep. This is useful for
758
unittesting the SocketDelay.
761
self.latency = latency
762
self.really_sleep = really_sleep
763
self.time_per_byte = 1 / (bandwidth / 8.0 * 1024 * 1024)
764
self.new_roundtrip = False
767
if self.really_sleep:
770
SocketDelay.simulated_time += s
772
def __getattr__(self, attr):
773
if attr in SocketDelay._proxied_arguments:
774
return getattr(self.sock, attr)
775
raise AttributeError("'SocketDelay' object has no attribute %r" %
779
return SocketDelay(self.sock.dup(), self.latency, self.time_per_byte,
782
def recv(self, *args):
783
data = self.sock.recv(*args)
784
if data and self.new_roundtrip:
785
self.new_roundtrip = False
786
self.sleep(self.latency)
787
self.sleep(len(data) * self.time_per_byte)
790
def sendall(self, data, flags=0):
791
if not self.new_roundtrip:
792
self.new_roundtrip = True
793
self.sleep(self.latency)
794
self.sleep(len(data) * self.time_per_byte)
795
return self.sock.sendall(data, flags)
797
def send(self, data, flags=0):
798
if not self.new_roundtrip:
799
self.new_roundtrip = True
800
self.sleep(self.latency)
801
bytes_sent = self.sock.send(data, flags)
802
self.sleep(bytes_sent * self.time_per_byte)
806
class SFTPServer(Server):
807
"""Common code for SFTP server facilities."""
810
self._original_vendor = None
812
self._server_homedir = None
813
self._listener = None
815
self._vendor = ssh.ParamikoVendor()
820
def _get_sftp_url(self, path):
821
"""Calculate an sftp url to this server for path."""
822
return 'sftp://foo:bar@localhost:%d/%s' % (self._listener.port, path)
824
def log(self, message):
825
"""StubServer uses this to log when a new server is created."""
826
self.logs.append(message)
828
def _run_server_entry(self, sock):
829
"""Entry point for all implementations of _run_server.
831
If self.add_latency is > 0.000001 then sock is given a latency adding
834
if self.add_latency > 0.000001:
835
sock = SocketDelay(sock, self.add_latency)
836
return self._run_server(sock)
838
def _run_server(self, s):
839
ssh_server = paramiko.Transport(s)
840
key_file = pathjoin(self._homedir, 'test_rsa.key')
841
f = open(key_file, 'w')
842
f.write(STUB_SERVER_KEY)
844
host_key = paramiko.RSAKey.from_private_key_file(key_file)
845
ssh_server.add_server_key(host_key)
846
server = StubServer(self)
847
ssh_server.set_subsystem_handler('sftp', paramiko.SFTPServer,
848
StubSFTPServer, root=self._root,
849
home=self._server_homedir)
850
event = threading.Event()
851
ssh_server.start_server(event, server)
855
self._original_vendor = ssh._ssh_vendor
856
ssh._ssh_vendor = self._vendor
857
if sys.platform == 'win32':
858
# Win32 needs to use the UNICODE api
859
self._homedir = getcwd()
861
# But Linux SFTP servers should just deal in bytestreams
862
self._homedir = os.getcwd()
863
if self._server_homedir is None:
864
self._server_homedir = self._homedir
866
if sys.platform == 'win32':
868
self._listener = SocketListener(self._run_server_entry)
869
self._listener.setDaemon(True)
870
self._listener.start()
873
"""See bzrlib.transport.Server.tearDown."""
874
self._listener.stop()
875
ssh._ssh_vendor = self._original_vendor
877
def get_bogus_url(self):
878
"""See bzrlib.transport.Server.get_bogus_url."""
879
# this is chosen to try to prevent trouble with proxies, wierd dns, etc
880
# we bind a random socket, so that we get a guaranteed unused port
881
# we just never listen on that port
883
s.bind(('localhost', 0))
884
return 'sftp://%s:%s/' % s.getsockname()
887
class SFTPFullAbsoluteServer(SFTPServer):
888
"""A test server for sftp transports, using absolute urls and ssh."""
891
"""See bzrlib.transport.Server.get_url."""
892
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
895
class SFTPServerWithoutSSH(SFTPServer):
896
"""An SFTP server that uses a simple TCP socket pair rather than SSH."""
899
super(SFTPServerWithoutSSH, self).__init__()
900
self._vendor = ssh.LoopbackVendor()
902
def _run_server(self, sock):
903
# Re-import these as locals, so that they're still accessible during
904
# interpreter shutdown (when all module globals get set to None, leading
905
# to confusing errors like "'NoneType' object has no attribute 'error'".
907
class FakeChannel(object):
908
def get_transport(self):
910
def get_log_channel(self):
914
def get_hexdump(self):
919
server = paramiko.SFTPServer(FakeChannel(), 'sftp', StubServer(self), StubSFTPServer,
920
root=self._root, home=self._server_homedir)
922
server.start_subsystem('sftp', None, sock)
923
except socket.error, e:
924
if (len(e.args) > 0) and (e.args[0] == errno.EPIPE):
925
# it's okay for the client to disconnect abruptly
926
# (bug in paramiko 1.6: it should absorb this exception)
931
import sys; sys.stderr.write('\nEXCEPTION %r\n\n' % e.__class__)
932
server.finish_subsystem()
935
class SFTPAbsoluteServer(SFTPServerWithoutSSH):
936
"""A test server for sftp transports, using absolute urls."""
939
"""See bzrlib.transport.Server.get_url."""
940
if sys.platform == 'win32':
941
return self._get_sftp_url(urlutils.escape(self._homedir))
943
return self._get_sftp_url(urlutils.escape(self._homedir[1:]))
946
class SFTPHomeDirServer(SFTPServerWithoutSSH):
947
"""A test server for sftp transports, using homedir relative urls."""
950
"""See bzrlib.transport.Server.get_url."""
951
return self._get_sftp_url("~/")
954
class SFTPSiblingAbsoluteServer(SFTPAbsoluteServer):
955
"""A test servere for sftp transports, using absolute urls to non-home."""
958
self._server_homedir = '/dev/noone/runs/tests/here'
959
super(SFTPSiblingAbsoluteServer, self).setUp()
962
def _sftp_connect(host, port, username, password):
963
"""Connect to the remote sftp server.
965
:raises: a TransportError 'could not connect'.
967
:returns: an paramiko.sftp_client.SFTPClient
969
TODO: Raise a more reasonable ConnectionFailed exception
971
idx = (host, port, username)
973
return _connected_hosts[idx]
977
sftp = _sftp_connect_uncached(host, port, username, password)
978
_connected_hosts[idx] = sftp
981
def _sftp_connect_uncached(host, port, username, password):
982
vendor = ssh._get_ssh_vendor()
983
sftp = vendor.connect_sftp(username, password, host, port)
908
987
def get_test_permutations():
909
988
"""Return the permutations to be used in testing."""
910
from bzrlib.tests import stub_sftp
911
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
912
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
913
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),
989
return [(SFTPTransport, SFTPAbsoluteServer),
990
(SFTPTransport, SFTPHomeDirServer),
991
(SFTPTransport, SFTPSiblingAbsoluteServer),