17
17
"""A collection of function for handling URL operations."""
19
from __future__ import absolute_import
23
from bzrlib.lazy_import import lazy_import
24
lazy_import(globals(), """
25
from posixpath import split as _posix_split, normpath as _posix_normpath
28
from urllib import parse as urlparse
35
from .lazy_import import lazy_import
36
lazy_import(globals(), """
37
from posixpath import split as _posix_split
48
class InvalidURL(errors.PathError):
50
_fmt = 'Invalid url supplied to transport: "%(path)s"%(extra)s'
53
class InvalidURLJoin(errors.PathError):
55
_fmt = "Invalid URL join request: %(reason)s: %(base)r + %(join_args)r"
57
def __init__(self, reason, base, join_args):
60
self.join_args = join_args
61
errors.PathError.__init__(self, base, reason)
64
class InvalidRebaseURLs(errors.PathError):
66
_fmt = "URLs differ by more than path: %(from_)r and %(to)r"
68
def __init__(self, from_, to):
71
errors.PathError.__init__(
72
self, from_, 'URLs differ by more than path.')
36
75
def basename(url, exclude_trailing_slash=True):
37
76
"""Return the last component of a URL.
60
99
return split(url, exclude_trailing_slash=exclude_trailing_slash)[0]
103
quote_from_bytes = urlparse.quote_from_bytes
104
quote = urlparse.quote
105
unquote_to_bytes = urlparse.unquote_to_bytes
107
# Private copies of quote and unquote, copied from Python's urllib module
108
# because urllib unconditionally imports socket, which imports ssl.
110
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
111
'abcdefghijklmnopqrstuvwxyz'
114
for i, c in zip(range(256), ''.join(map(chr, range(256)))):
115
_safe_map[c] = c if (
116
i < 128 and c in always_safe) else '%{0:02X}'.format(i)
119
def quote_from_bytes(s, safe='/'):
120
"""quote('abc def') -> 'abc%20def'
122
Each part of a URL, e.g. the path info, the query, etc., has a
123
different set of reserved characters that must be quoted.
125
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
126
the following reserved characters.
128
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
131
Each of these characters is reserved in some component of a URL,
132
but not necessarily in all of them.
134
By default, the quote function is intended for quoting the path
135
section of a URL. Thus, it will not encode '/'. This character
136
is reserved, but in typical usage the quote function is being
137
called on a path where the existing slash characters are used as
143
raise TypeError('None object cannot be quoted')
145
cachekey = (safe, always_safe)
147
(quoter, safe) = _safe_quoters[cachekey]
149
safe_map = _safe_map.copy()
150
safe_map.update([(c, c) for c in safe])
151
quoter = safe_map.__getitem__
152
safe = always_safe + safe
153
_safe_quoters[cachekey] = (quoter, safe)
154
if not s.rstrip(safe):
156
return ''.join(map(quoter, s))
158
quote = quote_from_bytes
159
unquote_to_bytes = urlparse.unquote
162
unquote = urlparse.unquote
165
def escape(relpath, safe='/~'):
64
166
"""Escape relpath to be a valid url."""
65
if isinstance(relpath, unicode):
167
if not isinstance(relpath, str) and sys.version_info[0] == 2:
168
# GZ 2019-06-16: Should use _fs_enc instead here really?
66
169
relpath = relpath.encode('utf-8')
67
# After quoting and encoding, the path should be perfectly
68
# safe as a plain ASCII string, str() just enforces this
69
return str(urllib.quote(relpath, safe='/~'))
170
return quote(relpath, safe=safe)
72
173
def file_relpath(base, path):
121
match = _url_scheme_re.match(base)
124
scheme = match.group('scheme')
125
path = match.group('path').split('/')
126
if path[-1:] == ['']:
127
# Strip off a trailing slash
128
# This helps both when we are at the root, and when
129
# 'base' has an extra slash at the end
132
path = base.split('/')
134
if scheme is not None and len(path) >= 1:
136
# the path should be represented as an abs path.
137
# we know this must be absolute because of the presence of a URL scheme.
139
path = [''] + path[1:]
141
# create an empty host, but dont alter the path - this might be a
142
# relative url fragment.
227
scheme_end, path_start = _find_scheme_and_separator(base)
228
if scheme_end is None and path_start is None:
230
elif path_start is None:
231
path_start = len(base)
232
path = base[path_start:]
147
match = _url_scheme_re.match(arg)
150
scheme = match.group('scheme')
151
# this skips .. normalisation, making http://host/../../..
153
path = match.group('path').split('/')
154
# set the host and path according to new absolute URL, discarding
155
# any previous values.
156
# XXX: duplicates mess from earlier in this function. This URL
157
# manipulation code needs some cleaning up.
158
if scheme is not None and len(path) >= 1:
161
# url scheme implies absolute path.
164
# no url scheme we take the path as is.
234
arg_scheme_end, arg_path_start = _find_scheme_and_separator(arg)
235
if arg_scheme_end is None and arg_path_start is None:
237
elif arg_path_start is None:
238
arg_path_start = len(arg)
239
if arg_scheme_end is not None:
241
path = arg[arg_path_start:]
242
scheme_end = arg_scheme_end
243
path_start = arg_path_start
167
path = '/'.join(path)
168
245
path = joinpath(path, arg)
169
path = path.split('/')
170
if remove_root and path[0:1] == ['']:
173
# Remove the leading slash from the path, so long as it isn't also the
174
# trailing slash, which we want to keep if present.
175
if path and path[0] == '' and len(path) > 1:
180
return '/'.join(path)
181
return scheme + '://' + '/'.join(path)
246
return base[:path_start] + path
184
249
def joinpath(base, *args):
237
303
# importing directly from posixpath allows us to test this
238
304
# on non-posix platforms
239
return 'file://' + escape(_posix_normpath(
240
osutils._posix_abspath(path)))
305
return 'file://' + escape(osutils._posix_abspath(path))
243
308
def _win32_local_path_from_url(url):
244
309
"""Convert a url like file:///C:/path/to/foo into C:/path/to/foo"""
245
310
if not url.startswith('file://'):
246
raise errors.InvalidURL(url, 'local urls must start with file:///, '
247
'UNC path urls must start with file://')
311
raise InvalidURL(url, 'local urls must start with file:///, '
312
'UNC path urls must start with file://')
313
url = split_segment_parameters_raw(url)[0]
248
314
# We strip off all 3 slashes
249
315
win32_url = url[len('file:'):]
250
316
# check for UNC path: //HOST/path
251
317
if not win32_url.startswith('///'):
252
318
if (win32_url[2] == '/'
253
or win32_url[3] in '|:'):
254
raise errors.InvalidURL(url, 'Win32 UNC path urls'
255
' have form file://HOST/path')
319
or win32_url[3] in '|:'):
320
raise InvalidURL(url, 'Win32 UNC path urls'
321
' have form file://HOST/path')
256
322
return unescape(win32_url)
258
324
# allow empty paths so we can serve all roots
339
406
:param url: Either a hybrid URL or a local path
340
407
:return: A normalized URL which only includes 7-bit ASCII characters.
342
m = _url_scheme_re.match(url)
409
scheme_end, path_start = _find_scheme_and_separator(url)
410
if scheme_end is None:
344
411
return local_path_to_url(url)
345
scheme = m.group('scheme')
346
path = m.group('path')
347
if not isinstance(url, unicode):
412
prefix = url[:path_start]
413
path = url[path_start:]
414
if not isinstance(url, text_type):
349
416
if c not in _url_safe_characters:
350
raise errors.InvalidURL(url, 'URLs can only contain specific'
351
' safe characters (not %r)' % c)
417
raise InvalidURL(url, 'URLs can only contain specific'
418
' safe characters (not %r)' % c)
352
419
path = _url_hex_escapes_re.sub(_unescape_safe_chars, path)
353
return str(scheme + '://' + ''.join(path))
420
return str(prefix + ''.join(path))
355
422
# We have a unicode (hybrid) url
356
423
path_chars = list(path)
358
for i in xrange(len(path_chars)):
425
for i in range(len(path_chars)):
359
426
if path_chars[i] not in _url_safe_characters:
360
chars = path_chars[i].encode('utf-8')
361
427
path_chars[i] = ''.join(
362
['%%%02X' % ord(c) for c in path_chars[i].encode('utf-8')])
428
['%%%02X' % c for c in bytearray(path_chars[i].encode('utf-8'))])
363
429
path = ''.join(path_chars)
364
430
path = _url_hex_escapes_re.sub(_unescape_safe_chars, path)
365
return str(scheme + '://' + path)
431
return str(prefix + path)
368
434
def relative_url(base, other):
469
535
return url_base + head, tail
538
def split_segment_parameters_raw(url):
539
"""Split the subsegment of the last segment of a URL.
541
:param url: A relative or absolute URL
542
:return: (url, subsegments)
544
# GZ 2011-11-18: Dodgy removing the terminal slash like this, function
545
# operates on urls not url+segments, and Transport classes
546
# should not be blindly adding slashes in the first place.
547
lurl = strip_trailing_slash(url)
548
# Segments begin at first comma after last forward slash, if one exists
549
segment_start = lurl.find(",", lurl.rfind("/") + 1)
550
if segment_start == -1:
552
return (lurl[:segment_start],
553
[str(s) for s in lurl[segment_start + 1:].split(",")])
556
def split_segment_parameters(url):
557
"""Split the segment parameters of the last segment of a URL.
559
:param url: A relative or absolute URL
560
:return: (url, segment_parameters)
562
(base_url, subsegments) = split_segment_parameters_raw(url)
564
for subsegment in subsegments:
566
(key, value) = subsegment.split("=", 1)
568
raise InvalidURL(url, "missing = in subsegment")
569
if not isinstance(key, str):
571
if not isinstance(value, str):
572
raise TypeError(value)
573
parameters[key] = value
574
return (base_url, parameters)
577
def join_segment_parameters_raw(base, *subsegments):
578
"""Create a new URL by adding subsegments to an existing one.
580
This adds the specified subsegments to the last path in the specified
581
base URL. The subsegments should be bytestrings.
583
:note: You probably want to use join_segment_parameters instead.
587
for subsegment in subsegments:
588
if not isinstance(subsegment, str):
589
raise TypeError("Subsegment %r is not a bytestring" % subsegment)
590
if "," in subsegment:
591
raise InvalidURLJoin(", exists in subsegments",
593
return ",".join((base,) + subsegments)
596
def join_segment_parameters(url, parameters):
597
"""Create a new URL by adding segment parameters to an existing one.
599
The parameters of the last segment in the URL will be updated; if a
600
parameter with the same key already exists it will be overwritten.
602
:param url: A URL, as string
603
:param parameters: Dictionary of parameters, keys and values as bytestrings
605
(base, existing_parameters) = split_segment_parameters(url)
607
new_parameters.update(existing_parameters)
608
for key, value in parameters.items():
609
if not isinstance(key, str):
610
raise TypeError("parameter key %r is not a str" % key)
611
if not isinstance(value, str):
612
raise TypeError("parameter value %r for %r is not a str" %
615
raise InvalidURLJoin("= exists in parameter key", url,
617
new_parameters[key] = value
618
return join_segment_parameters_raw(
619
base, *["%s=%s" % item for item in sorted(new_parameters.items())])
472
622
def _win32_strip_local_trailing_slash(url):
473
623
"""Strip slashes after the drive letter"""
474
624
if len(url) > WIN32_MIN_ABS_FILEURL_LENGTH:
524
674
This returns a Unicode path from a URL
526
676
# jam 20060427 URLs are supposed to be ASCII only strings
527
# If they are passed in as unicode, urllib.unquote
677
# If they are passed in as unicode, unquote
528
678
# will return a UNICODE string, which actually contains
529
679
# utf-8 bytes. So we have to ensure that they are
530
680
# plain ASCII strings, or the final .decode will
531
681
# try to encode the UNICODE => ASCII, and then decode
535
except UnicodeError, e:
536
raise errors.InvalidURL(url, 'URL was not a plain ASCII url: %s' % (e,))
538
unquoted = urllib.unquote(url)
540
unicode_path = unquoted.decode('utf-8')
541
except UnicodeError, e:
542
raise errors.InvalidURL(url, 'Unable to encode the URL as utf-8: %s' % (e,))
685
if isinstance(url, text_type):
688
except UnicodeError as e:
690
url, 'URL was not a plain ASCII url: %s' % (e,))
691
return urlparse.unquote(url)
693
if isinstance(url, text_type):
695
url = url.encode("ascii")
696
except UnicodeError as e:
698
url, 'URL was not a plain ASCII url: %s' % (e,))
699
unquoted = unquote(url)
701
unicode_path = unquoted.decode('utf-8')
702
except UnicodeError as e:
704
url, 'Unable to encode the URL as utf-8: %s' % (e,))
546
708
# These are characters that if escaped, should stay that way
547
709
_no_decode_chars = ';/?:@&=+$,#'
548
710
_no_decode_ords = [ord(c) for c in _no_decode_chars]
549
711
_no_decode_hex = (['%02x' % o for o in _no_decode_ords]
550
+ ['%02X' % o for o in _no_decode_ords])
551
_hex_display_map = dict(([('%02x' % o, chr(o)) for o in range(256)]
552
+ [('%02X' % o, chr(o)) for o in range(256)]))
553
#These entries get mapped to themselves
554
_hex_display_map.update((hex,'%'+hex) for hex in _no_decode_hex)
712
+ ['%02X' % o for o in _no_decode_ords])
713
_hex_display_map = dict(([('%02x' % o, int2byte(o)) for o in range(256)]
714
+ [('%02X' % o, int2byte(o)) for o in range(256)]))
715
# These entries get mapped to themselves
716
_hex_display_map.update((hex, b'%' + hex.encode('ascii'))
717
for hex in _no_decode_hex)
556
719
# These characters shouldn't be percent-encoded, and it's always safe to
557
720
# unencode them if they are.
558
721
_url_dont_escape_characters = set(
559
"abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
560
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
561
"0123456789" # Numbers
562
"-._~" # Unreserved characters
722
"abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
723
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
724
"0123456789" # Numbers
725
"-._~" # Unreserved characters
565
728
# These characters should not be escaped
566
729
_url_safe_characters = set(
567
"abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
568
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
569
"0123456789" # Numbers
570
"_.-!~*'()" # Unreserved characters
571
"/;?:@&=+$," # Reserved characters
572
"%#" # Extra reserved characters
730
"abcdefghijklmnopqrstuvwxyz" # Lowercase alpha
731
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" # Uppercase alpha
732
"0123456789" # Numbers
733
"_.-!~*'()" # Unreserved characters
734
"/;?:@&=+$," # Reserved characters
735
"%#" # Extra reserved characters
739
def _unescape_segment_for_display(segment, encoding):
740
"""Unescape a segment for display.
742
Helper for unescape_for_display
744
:param url: A 7-bit ASCII URL
745
:param encoding: The final output encoding
747
:return: A unicode string which can be safely encoded into the
750
escaped_chunks = segment.split('%')
751
escaped_chunks[0] = escaped_chunks[0].encode('utf-8')
752
for j in range(1, len(escaped_chunks)):
753
item = escaped_chunks[j]
755
escaped_chunks[j] = _hex_display_map[item[:2]]
757
# Put back the percent symbol
758
escaped_chunks[j] = b'%' + \
759
(item[:2].encode('utf-8') if PY3 else item[:2])
760
except UnicodeDecodeError:
761
escaped_chunks[j] = unichr(int(item[:2], 16)).encode('utf-8')
762
escaped_chunks[j] += (item[2:].encode('utf-8') if PY3 else item[2:])
763
unescaped = b''.join(escaped_chunks)
765
decoded = unescaped.decode('utf-8')
766
except UnicodeDecodeError:
767
# If this path segment cannot be properly utf-8 decoded
768
# after doing unescaping we will just leave it alone
772
decoded.encode(encoding)
773
except UnicodeEncodeError:
774
# If this chunk cannot be encoded in the local
775
# encoding, then we should leave it alone
778
# Otherwise take the url decoded one
575
782
def unescape_for_display(url, encoding):
576
783
"""Decode what you can for a URL, so that we get a nice looking path.
600
807
# Split into sections to try to decode utf-8
601
808
res = url.split('/')
602
for i in xrange(1, len(res)):
603
escaped_chunks = res[i].split('%')
604
for j in xrange(1, len(escaped_chunks)):
605
item = escaped_chunks[j]
607
escaped_chunks[j] = _hex_display_map[item[:2]] + item[2:]
609
# Put back the percent symbol
610
escaped_chunks[j] = '%' + item
611
except UnicodeDecodeError:
612
escaped_chunks[j] = unichr(int(item[:2], 16)) + item[2:]
613
unescaped = ''.join(escaped_chunks)
615
decoded = unescaped.decode('utf-8')
616
except UnicodeDecodeError:
617
# If this path segment cannot be properly utf-8 decoded
618
# after doing unescaping we will just leave it alone
622
decoded.encode(encoding)
623
except UnicodeEncodeError:
624
# If this chunk cannot be encoded in the local
625
# encoding, then we should leave it alone
628
# Otherwise take the url decoded one
809
for i in range(1, len(res)):
810
res[i] = _unescape_segment_for_display(res[i], encoding)
630
811
return u'/'.join(res)
693
875
return osutils.pathjoin(*segments)
881
def __init__(self, scheme, quoted_user, quoted_password, quoted_host,
884
self.quoted_host = quoted_host
885
self.host = unquote(self.quoted_host)
886
self.quoted_user = quoted_user
887
if self.quoted_user is not None:
888
self.user = unquote(self.quoted_user)
891
self.quoted_password = quoted_password
892
if self.quoted_password is not None:
893
self.password = unquote(self.quoted_password)
897
self.quoted_path = _url_hex_escapes_re.sub(
898
_unescape_safe_chars, quoted_path)
899
self.path = unquote(self.quoted_path)
901
def __eq__(self, other):
902
return (isinstance(other, self.__class__) and
903
self.scheme == other.scheme and
904
self.host == other.host and
905
self.user == other.user and
906
self.password == other.password and
907
self.path == other.path)
910
return "<%s(%r, %r, %r, %r, %r, %r)>" % (
911
self.__class__.__name__,
912
self.scheme, self.quoted_user, self.quoted_password,
913
self.quoted_host, self.port, self.quoted_path)
916
def from_string(cls, url):
917
"""Create a URL object from a string.
919
:param url: URL as bytestring
921
# GZ 2017-06-09: Actually validate ascii-ness
922
# pad.lv/1696545: For the moment, accept both native strings and
924
if isinstance(url, str):
926
elif isinstance(url, text_type):
929
except UnicodeEncodeError:
930
raise InvalidURL(url)
932
raise InvalidURL(url)
933
(scheme, netloc, path, params,
934
query, fragment) = urlparse.urlparse(url, allow_fragments=False)
935
user = password = host = port = None
937
user, host = netloc.rsplit('@', 1)
939
user, password = user.split(':', 1)
943
if ':' in host and not (host[0] == '[' and host[-1] == ']'):
945
host, port = host.rsplit(':', 1)
950
raise InvalidURL('invalid port number %s in url:\n%s' %
954
if host != "" and host[0] == '[' and host[-1] == ']': # IPv6
957
return cls(scheme, user, password, host, port, path)
960
netloc = self.quoted_host
962
netloc = "[%s]" % netloc
963
if self.quoted_user is not None:
964
# Note that we don't put the password back even if we
965
# have one so that it doesn't get accidentally
967
netloc = '%s@%s' % (self.quoted_user, netloc)
968
if self.port is not None:
969
netloc = '%s:%d' % (netloc, self.port)
970
return urlparse.urlunparse(
971
(self.scheme, netloc, self.quoted_path, None, None, None))
974
def _combine_paths(base_path, relpath):
975
"""Transform a Transport-relative path to a remote absolute path.
977
This does not handle substitution of ~ but does handle '..' and '.'
982
t._combine_paths('/home/sarah', 'project/foo')
983
=> '/home/sarah/project/foo'
984
t._combine_paths('/home/sarah', '../../etc')
986
t._combine_paths('/home/sarah', '/etc')
989
:param base_path: base path
990
:param relpath: relative url string for relative part of remote path.
991
:return: urlencoded string for final path.
993
# pad.lv/1696545: For the moment, accept both native strings and
995
if isinstance(relpath, str):
997
elif isinstance(relpath, text_type):
999
relpath = relpath.encode()
1000
except UnicodeEncodeError:
1001
raise InvalidURL(relpath)
1003
raise InvalidURL(relpath)
1004
relpath = _url_hex_escapes_re.sub(_unescape_safe_chars, relpath)
1005
if relpath.startswith('/'):
1008
base_parts = base_path.split('/')
1009
if len(base_parts) > 0 and base_parts[-1] == '':
1010
base_parts = base_parts[:-1]
1011
for p in relpath.split('/'):
1013
if len(base_parts) == 0:
1014
# In most filesystems, a request for the parent
1015
# of root, just returns root.
1021
base_parts.append(p)
1022
path = '/'.join(base_parts)
1023
if not path.startswith('/'):
1027
def clone(self, offset=None):
1028
"""Return a new URL for a path relative to this URL.
1030
:param offset: A relative path, already urlencoded
1031
:return: `URL` instance
1033
if offset is not None:
1034
relative = unescape(offset)
1035
if sys.version_info[0] == 2:
1036
relative = relative.encode('utf-8')
1037
path = self._combine_paths(self.path, relative)
1038
path = quote(path, safe="/~")
1040
path = self.quoted_path
1041
return self.__class__(self.scheme, self.quoted_user,
1042
self.quoted_password, self.quoted_host, self.port,
697
1046
def parse_url(url):
698
1047
"""Extract the server address, the credentials and the path from the url.
703
1052
:param url: an quoted url
705
1053
:return: (scheme, user, password, host, port, path) tuple, all fields
708
if isinstance(url, unicode):
709
raise errors.InvalidURL('should be ascii:\n%r' % url)
710
url = url.encode('utf-8')
711
(scheme, netloc, path, params,
712
query, fragment) = urlparse.urlparse(url, allow_fragments=False)
713
user = password = host = port = None
715
user, host = netloc.rsplit('@', 1)
717
user, password = user.split(':', 1)
718
password = urllib.unquote(password)
719
user = urllib.unquote(user)
723
if ':' in host and not (host[0] == '[' and host[-1] == ']'): #there *is* port
724
host, port = host.rsplit(':',1)
728
raise errors.InvalidURL('invalid port number %s in url:\n%s' %
730
if host != "" and host[0] == '[' and host[-1] == ']': #IPv6
733
host = urllib.unquote(host)
734
path = urllib.unquote(path)
736
return (scheme, user, password, host, port, path)
1056
parsed_url = URL.from_string(url)
1057
return (parsed_url.scheme, parsed_url.user, parsed_url.password,
1058
parsed_url.host, parsed_url.port, parsed_url.path)