1
# Copyright (C) 2005-2011, 2016, 2017 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
"""Implementation of Transport over SFTP, using paramiko."""
19
from __future__ import absolute_import
21
# TODO: Remove the transport-based lock_read and lock_write methods. They'll
22
# then raise TransportNotPossible, which will break remote access to any
23
# formats which rely on OS-level locks. That should be fine as those formats
24
# are pretty old, but these combinations may have to be removed from the test
25
# suite. Those formats all date back to 0.7; so we should be able to remove
26
# these methods when we officially drop support for those formats.
44
from ..errors import (FileExists,
51
from ..osutils import fancy_rename
52
from ..sixish import (
55
from ..trace import mutter, warning
56
from ..transport import (
63
# Disable one particular warning that comes from paramiko in Python2.5; if
64
# this is emitted at the wrong time it tends to cause spurious test failures
65
# or at least noise in the test case::
67
# [1770/7639 in 86s, 1 known failures, 50 skipped, 2 missing features]
68
# test_permissions.TestSftpPermissions.test_new_files
69
# /var/lib/python-support/python2.5/paramiko/message.py:226: DeprecationWarning: integer argument expected, got float
70
# self.packet.write(struct.pack('>I', n))
71
warnings.filterwarnings('ignore',
72
'integer argument expected, got float',
73
category=DeprecationWarning,
74
module='paramiko.message')
78
except ImportError as e:
79
raise ParamikoNotPresent(e)
81
from paramiko.sftp import (SFTP_FLAG_WRITE, SFTP_FLAG_CREATE,
82
SFTP_FLAG_EXCL, SFTP_FLAG_TRUNC,
83
SFTP_OK, CMD_HANDLE, CMD_OPEN)
84
from paramiko.sftp_attr import SFTPAttributes
85
from paramiko.sftp_file import SFTPFile
88
# GZ 2017-05-25: Some dark hackery to monkeypatch out issues with paramiko's
89
# Python 3 compatibility code. Replace broken b() and asbytes() code.
91
from paramiko.py3compat import b as _bad
92
from paramiko.common import asbytes as _bad_asbytes
96
def _b_for_broken_paramiko(s, encoding='utf8'):
97
"""Hacked b() that does not raise TypeError."""
98
# https://github.com/paramiko/paramiko/issues/967
99
if not isinstance(s, bytes):
100
encode = getattr(s, 'encode', None)
101
if encode is not None:
102
return encode(encoding)
103
# Would like to pass buffer objects along, but have to realise.
104
tobytes = getattr(s, 'tobytes', None)
105
if tobytes is not None:
109
def _asbytes_for_broken_paramiko(s):
110
"""Hacked asbytes() that does not raise Exception."""
111
# https://github.com/paramiko/paramiko/issues/968
112
if not isinstance(s, bytes):
113
encode = getattr(s, 'encode', None)
114
if encode is not None:
115
return encode('utf8')
116
asbytes = getattr(s, 'asbytes', None)
117
if asbytes is not None:
121
_bad.__code__ = _b_for_broken_paramiko.__code__
122
_bad_asbytes.__code__ = _asbytes_for_broken_paramiko.__code__
125
class SFTPLock(object):
126
"""This fakes a lock in a remote location.
128
A present lock is indicated just by the existence of a file. This
129
doesn't work well on all transports and they are only used in
130
deprecated storage formats.
133
__slots__ = ['path', 'lock_path', 'lock_file', 'transport']
135
def __init__(self, path, transport):
136
self.lock_file = None
138
self.lock_path = path + '.write-lock'
139
self.transport = transport
141
# RBC 20060103 FIXME should we be using private methods here ?
142
abspath = transport._remote_path(self.lock_path)
143
self.lock_file = transport._sftp_open_exclusive(abspath)
145
raise LockError('File %r already locked' % (self.path,))
148
if not self.lock_file:
150
self.lock_file.close()
151
self.lock_file = None
153
self.transport.delete(self.lock_path)
154
except (NoSuchFile,):
155
# What specific errors should we catch here?
159
class _SFTPReadvHelper(object):
160
"""A class to help with managing the state of a readv request."""
162
# See _get_requests for an explanation.
163
_max_request_size = 32768
165
def __init__(self, original_offsets, relpath, _report_activity):
166
"""Create a new readv helper.
168
:param original_offsets: The original requests given by the caller of
170
:param relpath: The name of the file (if known)
171
:param _report_activity: A Transport._report_activity bound method,
172
to be called as data arrives.
174
self.original_offsets = list(original_offsets)
175
self.relpath = relpath
176
self._report_activity = _report_activity
178
def _get_requests(self):
179
"""Break up the offsets into individual requests over sftp.
181
The SFTP spec only requires implementers to support 32kB requests. We
182
could try something larger (openssh supports 64kB), but then we have to
183
handle requests that fail.
184
So instead, we just break up our maximum chunks into 32kB chunks, and
185
asyncronously requests them.
186
Newer versions of paramiko would do the chunking for us, but we want to
187
start processing results right away, so we do it ourselves.
189
# TODO: Because we issue async requests, we don't 'fudge' any extra
190
# data. I'm not 100% sure that is the best choice.
192
# The first thing we do, is to collapse the individual requests as much
193
# as possible, so we don't issues requests <32kB
194
sorted_offsets = sorted(self.original_offsets)
195
coalesced = list(ConnectedTransport._coalesce_offsets(sorted_offsets,
196
limit=0, fudge_factor=0))
198
for c_offset in coalesced:
199
start = c_offset.start
200
size = c_offset.length
202
# Break this up into 32kB requests
204
next_size = min(size, self._max_request_size)
205
requests.append((start, next_size))
208
if 'sftp' in debug.debug_flags:
209
mutter('SFTP.readv(%s) %s offsets => %s coalesced => %s requests',
210
self.relpath, len(sorted_offsets), len(coalesced),
214
def request_and_yield_offsets(self, fp):
215
"""Request the data from the remote machine, yielding the results.
217
:param fp: A Paramiko SFTPFile object that supports readv.
218
:return: Yield the data requested by the original readv caller, one by
221
requests = self._get_requests()
222
offset_iter = iter(self.original_offsets)
223
cur_offset, cur_size = next(offset_iter)
224
# paramiko .readv() yields strings that are in the order of the requests
225
# So we track the current request to know where the next data is
226
# being returned from.
232
# This is used to buffer chunks which we couldn't process yet
233
# It is (start, end, data) tuples.
235
# Create an 'unlimited' data stream, so we stop based on requests,
236
# rather than just because the data stream ended. This lets us detect
238
data_stream = itertools.chain(fp.readv(requests),
239
itertools.repeat(None))
240
for (start, length), data in zip(requests, data_stream):
242
if cur_coalesced is not None:
243
raise errors.ShortReadvError(self.relpath,
244
start, length, len(data))
245
if len(data) != length:
246
raise errors.ShortReadvError(self.relpath,
247
start, length, len(data))
248
self._report_activity(length, 'read')
250
# This is the first request, just buffer it
251
buffered_data = [data]
252
buffered_len = length
254
elif start == last_end:
255
# The data we are reading fits neatly on the previous
256
# buffer, so this is all part of a larger coalesced range.
257
buffered_data.append(data)
258
buffered_len += length
260
# We have an 'interrupt' in the data stream. So we know we are
261
# at a request boundary.
263
# We haven't consumed the buffer so far, so put it into
264
# data_chunks, and continue.
265
buffered = b''.join(buffered_data)
266
data_chunks.append((input_start, buffered))
268
buffered_data = [data]
269
buffered_len = length
270
last_end = start + length
271
if input_start == cur_offset and cur_size <= buffered_len:
272
# Simplify the next steps a bit by transforming buffered_data
273
# into a single string. We also have the nice property that
274
# when there is only one string ''.join([x]) == x, so there is
276
buffered = b''.join(buffered_data)
277
# Clean out buffered data so that we keep memory
281
# TODO: We *could* also consider the case where cur_offset is in
282
# in the buffered range, even though it doesn't *start*
283
# the buffered range. But for packs we pretty much always
284
# read in order, so you won't get any extra data in the
286
while (input_start == cur_offset
287
and (buffered_offset + cur_size) <= buffered_len):
288
# We've buffered enough data to process this request, spit it
290
cur_data = buffered[buffered_offset:buffered_offset + cur_size]
291
# move the direct pointer into our buffered data
292
buffered_offset += cur_size
293
# Move the start-of-buffer pointer
294
input_start += cur_size
295
# Yield the requested data
296
yield cur_offset, cur_data
297
cur_offset, cur_size = next(offset_iter)
298
# at this point, we've consumed as much of buffered as we can,
299
# so break off the portion that we consumed
300
if buffered_offset == len(buffered_data):
301
# No tail to leave behind
305
buffered = buffered[buffered_offset:]
306
buffered_data = [buffered]
307
buffered_len = len(buffered)
308
# now that the data stream is done, close the handle
311
buffered = b''.join(buffered_data)
313
data_chunks.append((input_start, buffered))
315
if 'sftp' in debug.debug_flags:
316
mutter('SFTP readv left with %d out-of-order bytes',
317
sum(len(x[1]) for x in data_chunks))
318
# We've processed all the readv data, at this point, anything we
319
# couldn't process is in data_chunks. This doesn't happen often, so
320
# this code path isn't optimized
321
# We use an interesting process for data_chunks
322
# Specifically if we have "bisect_left([(start, len, entries)],
324
# If start == qstart, then we get the specific node. Otherwise we
325
# get the previous node
327
idx = bisect.bisect_left(data_chunks, (cur_offset,))
328
if idx < len(data_chunks) and data_chunks[idx][0] == cur_offset:
329
# The data starts here
330
data = data_chunks[idx][1][:cur_size]
332
# The data is in a portion of a previous page
334
sub_offset = cur_offset - data_chunks[idx][0]
335
data = data_chunks[idx][1]
336
data = data[sub_offset:sub_offset + cur_size]
338
# We are missing the page where the data should be found,
341
if len(data) != cur_size:
342
raise AssertionError('We must have miscalulated.'
343
' We expected %d bytes, but only found %d'
344
% (cur_size, len(data)))
345
yield cur_offset, data
346
cur_offset, cur_size = next(offset_iter)
349
class SFTPTransport(ConnectedTransport):
350
"""Transport implementation for SFTP access."""
352
# TODO: jam 20060717 Conceivably these could be configurable, either
353
# by auto-tuning at run-time, or by a configuration (per host??)
354
# but the performance curve is pretty flat, so just going with
355
# reasonable defaults.
356
_max_readv_combine = 200
357
# Having to round trip to the server means waiting for a response,
358
# so it is better to download extra bytes.
359
# 8KiB had good performance for both local and remote network operations
360
_bytes_to_read_before_seek = 8192
362
# The sftp spec says that implementations SHOULD allow reads
363
# to be at least 32K. paramiko.readv() does an async request
364
# for the chunks. So we need to keep it within a single request
365
# size for paramiko <= 1.6.1. paramiko 1.6.2 will probably chop
366
# up the request itself, rather than us having to worry about it
367
_max_request_size = 32768
369
def _remote_path(self, relpath):
370
"""Return the path to be passed along the sftp protocol for relpath.
372
:param relpath: is a urlencoded string.
374
remote_path = self._parsed_url.clone(relpath).path
375
# the initial slash should be removed from the path, and treated as a
376
# homedir relative path (the path begins with a double slash if it is
377
# absolute). see draft-ietf-secsh-scp-sftp-ssh-uri-03.txt
378
# RBC 20060118 we are not using this as its too user hostile. instead
379
# we are following lftp and using /~/foo to mean '~/foo'
380
# vila--20070602 and leave absolute paths begin with a single slash.
381
if remote_path.startswith('/~/'):
382
remote_path = remote_path[3:]
383
elif remote_path == '/~':
387
def _create_connection(self, credentials=None):
388
"""Create a new connection with the provided credentials.
390
:param credentials: The credentials needed to establish the connection.
392
:return: The created connection and its associated credentials.
394
The credentials are only the password as it may have been entered
395
interactively by the user and may be different from the one provided
396
in base url at transport creation time.
398
if credentials is None:
399
password = self._parsed_url.password
401
password = credentials
403
vendor = ssh._get_ssh_vendor()
404
user = self._parsed_url.user
406
auth = config.AuthenticationConfig()
407
user = auth.get_user('ssh', self._parsed_url.host,
408
self._parsed_url.port)
409
connection = vendor.connect_sftp(self._parsed_url.user, password,
410
self._parsed_url.host, self._parsed_url.port)
411
return connection, (user, password)
413
def disconnect(self):
414
connection = self._get_connection()
415
if connection is not None:
419
"""Ensures that a connection is established"""
420
connection = self._get_connection()
421
if connection is None:
422
# First connection ever
423
connection, credentials = self._create_connection()
424
self._set_connection(connection, credentials)
427
def has(self, relpath):
429
Does the target location exist?
432
self._get_sftp().stat(self._remote_path(relpath))
433
# stat result is about 20 bytes, let's say
434
self._report_activity(20, 'read')
439
def get(self, relpath):
440
"""Get the file at the given relative path.
442
:param relpath: The relative path to the file
445
path = self._remote_path(relpath)
446
f = self._get_sftp().file(path, mode='rb')
447
size = f.stat().st_size
448
if getattr(f, 'prefetch', None) is not None:
451
except (IOError, paramiko.SSHException) as e:
452
self._translate_io_exception(e, path, ': error retrieving',
453
failure_exc=errors.ReadError)
455
def get_bytes(self, relpath):
456
# reimplement this here so that we can report how many bytes came back
457
with self.get(relpath) as f:
459
self._report_activity(len(bytes), 'read')
462
def _readv(self, relpath, offsets):
463
"""See Transport.readv()"""
464
# We overload the default readv() because we want to use a file
465
# that does not have prefetch enabled.
466
# Also, if we have a new paramiko, it implements an async readv()
471
path = self._remote_path(relpath)
472
fp = self._get_sftp().file(path, mode='rb')
473
readv = getattr(fp, 'readv', None)
475
return self._sftp_readv(fp, offsets, relpath)
476
if 'sftp' in debug.debug_flags:
477
mutter('seek and read %s offsets', len(offsets))
478
return self._seek_and_read(fp, offsets, relpath)
479
except (IOError, paramiko.SSHException) as e:
480
self._translate_io_exception(e, path, ': error retrieving')
482
def recommended_page_size(self):
483
"""See Transport.recommended_page_size().
485
For SFTP we suggest a large page size to reduce the overhead
486
introduced by latency.
490
def _sftp_readv(self, fp, offsets, relpath):
491
"""Use the readv() member of fp to do async readv.
493
Then read them using paramiko.readv(). paramiko.readv()
494
does not support ranges > 64K, so it caps the request size, and
495
just reads until it gets all the stuff it wants.
497
helper = _SFTPReadvHelper(offsets, relpath, self._report_activity)
498
return helper.request_and_yield_offsets(fp)
500
def put_file(self, relpath, f, mode=None):
502
Copy the file-like object into the location.
504
:param relpath: Location to put the contents, relative to base.
505
:param f: File-like object.
506
:param mode: The final mode for the file
508
final_path = self._remote_path(relpath)
509
return self._put(final_path, f, mode=mode)
511
def _put(self, abspath, f, mode=None):
512
"""Helper function so both put() and copy_abspaths can reuse the code"""
513
tmp_abspath = '%s.tmp.%.9f.%d.%d' % (abspath, time.time(),
514
os.getpid(), random.randint(0, 0x7FFFFFFF))
515
fout = self._sftp_open_exclusive(tmp_abspath, mode=mode)
519
fout.set_pipelined(True)
520
length = self._pump(f, fout)
521
except (IOError, paramiko.SSHException) as e:
522
self._translate_io_exception(e, tmp_abspath)
523
# XXX: This doesn't truly help like we would like it to.
524
# The problem is that openssh strips sticky bits. So while we
525
# can properly set group write permission, we lose the group
526
# sticky bit. So it is probably best to stop chmodding, and
527
# just tell users that they need to set the umask correctly.
528
# The attr.st_mode = mode, in _sftp_open_exclusive
529
# will handle when the user wants the final mode to be more
530
# restrictive. And then we avoid a round trip. Unless
531
# paramiko decides to expose an async chmod()
533
# This is designed to chmod() right before we close.
534
# Because we set_pipelined() earlier, theoretically we might
535
# avoid the round trip for fout.close()
537
self._get_sftp().chmod(tmp_abspath, mode)
540
self._rename_and_overwrite(tmp_abspath, abspath)
542
except Exception as e:
543
# If we fail, try to clean up the temporary file
544
# before we throw the exception
545
# but don't let another exception mess things up
546
# Write out the traceback, because otherwise
547
# the catch and throw destroys it
549
mutter(traceback.format_exc())
553
self._get_sftp().remove(tmp_abspath)
555
# raise the saved except
557
# raise the original with its traceback if we can.
560
def _put_non_atomic_helper(self, relpath, writer, mode=None,
561
create_parent_dir=False,
563
abspath = self._remote_path(relpath)
565
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
566
# set the file mode at create time. If it does, use it.
567
# But for now, we just chmod later anyway.
569
def _open_and_write_file():
570
"""Try to open the target file, raise error on failure"""
574
fout = self._get_sftp().file(abspath, mode='wb')
575
fout.set_pipelined(True)
577
except (paramiko.SSHException, IOError) as e:
578
self._translate_io_exception(e, abspath,
581
# This is designed to chmod() right before we close.
582
# Because we set_pipelined() earlier, theoretically we might
583
# avoid the round trip for fout.close()
585
self._get_sftp().chmod(abspath, mode)
590
if not create_parent_dir:
591
_open_and_write_file()
594
# Try error handling to create the parent directory if we need to
596
_open_and_write_file()
598
# Try to create the parent directory, and then go back to
600
parent_dir = os.path.dirname(abspath)
601
self._mkdir(parent_dir, dir_mode)
602
_open_and_write_file()
604
def put_file_non_atomic(self, relpath, f, mode=None,
605
create_parent_dir=False,
607
"""Copy the file-like object into the target location.
609
This function is not strictly safe to use. It is only meant to
610
be used when you already know that the target does not exist.
611
It is not safe, because it will open and truncate the remote
612
file. So there may be a time when the file has invalid contents.
614
:param relpath: The remote location to put the contents.
615
:param f: File-like object.
616
:param mode: Possible access permissions for new file.
617
None means do not set remote permissions.
618
:param create_parent_dir: If we cannot create the target file because
619
the parent directory does not exist, go ahead and
620
create it, and then try again.
624
self._put_non_atomic_helper(relpath, writer, mode=mode,
625
create_parent_dir=create_parent_dir,
628
def put_bytes_non_atomic(self, relpath, raw_bytes, mode=None,
629
create_parent_dir=False,
631
if not isinstance(raw_bytes, bytes):
633
'raw_bytes must be a plain string, not %s' % type(raw_bytes))
636
fout.write(raw_bytes)
637
self._put_non_atomic_helper(relpath, writer, mode=mode,
638
create_parent_dir=create_parent_dir,
641
def iter_files_recursive(self):
642
"""Walk the relative paths of all files in this transport."""
643
# progress is handled by list_dir
644
queue = list(self.list_dir('.'))
646
relpath = queue.pop(0)
647
st = self.stat(relpath)
648
if stat.S_ISDIR(st.st_mode):
649
for i, basename in enumerate(self.list_dir(relpath)):
650
queue.insert(i, relpath+'/'+basename)
654
def _mkdir(self, abspath, mode=None):
660
self._report_activity(len(abspath), 'write')
661
self._get_sftp().mkdir(abspath, local_mode)
662
self._report_activity(1, 'read')
664
# chmod a dir through sftp will erase any sgid bit set
665
# on the server side. So, if the bit mode are already
666
# set, avoid the chmod. If the mode is not fine but
667
# the sgid bit is set, report a warning to the user
668
# with the umask fix.
669
stat = self._get_sftp().lstat(abspath)
670
mode = mode & 0o777 # can't set special bits anyway
671
if mode != stat.st_mode & 0o777:
672
if stat.st_mode & 0o6000:
673
warning('About to chmod %s over sftp, which will result'
674
' in its suid or sgid bits being cleared. If'
675
' you want to preserve those bits, change your '
676
' environment on the server to use umask 0%03o.'
677
% (abspath, 0o777 - mode))
678
self._get_sftp().chmod(abspath, mode=mode)
679
except (paramiko.SSHException, IOError) as e:
680
self._translate_io_exception(e, abspath, ': unable to mkdir',
681
failure_exc=FileExists)
683
def mkdir(self, relpath, mode=None):
684
"""Create a directory at the given path."""
685
self._mkdir(self._remote_path(relpath), mode=mode)
687
def open_write_stream(self, relpath, mode=None):
688
"""See Transport.open_write_stream."""
689
# initialise the file to zero-length
690
# this is three round trips, but we don't use this
691
# api more than once per write_group at the moment so
692
# it is a tolerable overhead. Better would be to truncate
693
# the file after opening. RBC 20070805
694
self.put_bytes_non_atomic(relpath, b"", mode)
695
abspath = self._remote_path(relpath)
696
# TODO: jam 20060816 paramiko doesn't publicly expose a way to
697
# set the file mode at create time. If it does, use it.
698
# But for now, we just chmod later anyway.
701
handle = self._get_sftp().file(abspath, mode='wb')
702
handle.set_pipelined(True)
703
except (paramiko.SSHException, IOError) as e:
704
self._translate_io_exception(e, abspath,
706
_file_streams[self.abspath(relpath)] = handle
707
return FileFileStream(self, relpath, handle)
709
def _translate_io_exception(self, e, path, more_info='',
710
failure_exc=PathError):
711
"""Translate a paramiko or IOError into a friendlier exception.
713
:param e: The original exception
714
:param path: The path in question when the error is raised
715
:param more_info: Extra information that can be included,
716
such as what was going on
717
:param failure_exc: Paramiko has the super fun ability to raise completely
718
opaque errors that just set "e.args = ('Failure',)" with
720
If this parameter is set, it defines the exception
721
to raise in these cases.
723
# paramiko seems to generate detailless errors.
724
self._translate_error(e, path, raise_generic=False)
725
if getattr(e, 'args', None) is not None:
726
if (e.args == ('No such file or directory',) or
727
e.args == ('No such file',)):
728
raise NoSuchFile(path, str(e) + more_info)
729
if (e.args == ('mkdir failed',) or
730
e.args[0].startswith('syserr: File exists')):
731
raise FileExists(path, str(e) + more_info)
732
# strange but true, for the paramiko server.
733
if (e.args == ('Failure',)):
734
raise failure_exc(path, str(e) + more_info)
735
# Can be something like args = ('Directory not empty:
736
# '/srv/bazaar.launchpad.net/blah...: '
737
# [Errno 39] Directory not empty',)
738
if (e.args[0].startswith('Directory not empty: ')
739
or getattr(e, 'errno', None) == errno.ENOTEMPTY):
740
raise errors.DirectoryNotEmpty(path, str(e))
741
if e.args == ('Operation unsupported',):
742
raise errors.TransportNotPossible()
743
mutter('Raising exception with args %s', e.args)
744
if getattr(e, 'errno', None) is not None:
745
mutter('Raising exception with errno %s', e.errno)
748
def append_file(self, relpath, f, mode=None):
750
Append the text in the file-like object into the final
754
path = self._remote_path(relpath)
755
fout = self._get_sftp().file(path, 'ab')
757
self._get_sftp().chmod(path, mode)
761
except (IOError, paramiko.SSHException) as e:
762
self._translate_io_exception(e, relpath, ': unable to append')
764
def rename(self, rel_from, rel_to):
765
"""Rename without special overwriting"""
767
self._get_sftp().rename(self._remote_path(rel_from),
768
self._remote_path(rel_to))
769
except (IOError, paramiko.SSHException) as e:
770
self._translate_io_exception(e, rel_from,
771
': unable to rename to %r' % (rel_to))
773
def _rename_and_overwrite(self, abs_from, abs_to):
774
"""Do a fancy rename on the remote server.
776
Using the implementation provided by osutils.
779
sftp = self._get_sftp()
780
fancy_rename(abs_from, abs_to,
781
rename_func=sftp.rename,
782
unlink_func=sftp.remove)
783
except (IOError, paramiko.SSHException) as e:
784
self._translate_io_exception(e, abs_from,
785
': unable to rename to %r' % (abs_to))
787
def move(self, rel_from, rel_to):
788
"""Move the item at rel_from to the location at rel_to"""
789
path_from = self._remote_path(rel_from)
790
path_to = self._remote_path(rel_to)
791
self._rename_and_overwrite(path_from, path_to)
793
def delete(self, relpath):
794
"""Delete the item at relpath"""
795
path = self._remote_path(relpath)
797
self._get_sftp().remove(path)
798
except (IOError, paramiko.SSHException) as e:
799
self._translate_io_exception(e, path, ': unable to delete')
801
def external_url(self):
802
"""See breezy.transport.Transport.external_url."""
803
# the external path for SFTP is the base
807
"""Return True if this store supports listing."""
810
def list_dir(self, relpath):
812
Return a list of all files at the given location.
814
# does anything actually use this?
816
# This is at least used by copy_tree for remote upgrades.
817
# -- David Allouche 2006-08-11
818
path = self._remote_path(relpath)
820
entries = self._get_sftp().listdir(path)
821
self._report_activity(sum(map(len, entries)), 'read')
822
except (IOError, paramiko.SSHException) as e:
823
self._translate_io_exception(e, path, ': failed to list_dir')
824
return [urlutils.escape(entry) for entry in entries]
826
def rmdir(self, relpath):
827
"""See Transport.rmdir."""
828
path = self._remote_path(relpath)
830
return self._get_sftp().rmdir(path)
831
except (IOError, paramiko.SSHException) as e:
832
self._translate_io_exception(e, path, ': failed to rmdir')
834
def stat(self, relpath):
835
"""Return the stat information for a file."""
836
path = self._remote_path(relpath)
838
return self._get_sftp().lstat(path)
839
except (IOError, paramiko.SSHException) as e:
840
self._translate_io_exception(e, path, ': unable to stat')
842
def readlink(self, relpath):
843
"""See Transport.readlink."""
844
path = self._remote_path(relpath)
846
return self._get_sftp().readlink(path)
847
except (IOError, paramiko.SSHException) as e:
848
self._translate_io_exception(e, path, ': unable to readlink')
850
def symlink(self, source, link_name):
851
"""See Transport.symlink."""
853
conn = self._get_sftp()
854
sftp_retval = conn.symlink(source, link_name)
855
if SFTP_OK != sftp_retval:
856
raise TransportError(
857
'%r: unable to create symlink to %r' % (link_name, source),
860
except (IOError, paramiko.SSHException) as e:
861
self._translate_io_exception(e, link_name,
862
': unable to create symlink to %r' % (source))
864
def lock_read(self, relpath):
866
Lock the given file for shared (read) access.
867
:return: A lock object, which has an unlock() member function
869
# FIXME: there should be something clever i can do here...
870
class BogusLock(object):
871
def __init__(self, path):
875
def __exit__(self, exc_type, exc_val, exc_tb):
879
return BogusLock(relpath)
881
def lock_write(self, relpath):
883
Lock the given file for exclusive (write) access.
884
WARNING: many transports do not support this, so trying avoid using it
886
:return: A lock object, which has an unlock() member function
888
# This is a little bit bogus, but basically, we create a file
889
# which should not already exist, and if it does, we assume
890
# that there is a lock, and if it doesn't, the we assume
891
# that we have taken the lock.
892
return SFTPLock(relpath, self)
894
def _sftp_open_exclusive(self, abspath, mode=None):
895
"""Open a remote path exclusively.
897
SFTP supports O_EXCL (SFTP_FLAG_EXCL), which fails if
898
the file already exists. However it does not expose this
899
at the higher level of SFTPClient.open(), so we have to
902
WARNING: This breaks the SFTPClient abstraction, so it
903
could easily break against an updated version of paramiko.
905
:param abspath: The remote absolute path where the file should be opened
906
:param mode: The mode permissions bits for the new file
908
# TODO: jam 20060816 Paramiko >= 1.6.2 (probably earlier) supports
909
# using the 'x' flag to indicate SFTP_FLAG_EXCL.
910
# However, there is no way to set the permission mode at open
911
# time using the sftp_client.file() functionality.
912
path = self._get_sftp()._adjust_cwd(abspath)
913
# mutter('sftp abspath %s => %s', abspath, path)
914
attr = SFTPAttributes()
917
omode = (SFTP_FLAG_WRITE | SFTP_FLAG_CREATE
918
| SFTP_FLAG_TRUNC | SFTP_FLAG_EXCL)
920
t, msg = self._get_sftp()._request(CMD_OPEN, path, omode, attr)
922
raise TransportError('Expected an SFTP handle')
923
handle = msg.get_string()
924
return SFTPFile(self._get_sftp(), handle, 'wb', -1)
925
except (paramiko.SSHException, IOError) as e:
926
self._translate_io_exception(e, abspath, ': unable to open',
927
failure_exc=FileExists)
929
def _can_roundtrip_unix_modebits(self):
930
if sys.platform == 'win32':
937
def get_test_permutations():
938
"""Return the permutations to be used in testing."""
939
from ..tests import stub_sftp
940
return [(SFTPTransport, stub_sftp.SFTPAbsoluteServer),
941
(SFTPTransport, stub_sftp.SFTPHomeDirServer),
942
(SFTPTransport, stub_sftp.SFTPSiblingAbsoluteServer),