1
# Copyright (C) 2005, 2006, 2007, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
from stat import (S_ISREG, S_ISDIR, S_ISLNK, ST_MODE, ST_SIZE,
21
S_ISCHR, S_ISBLK, S_ISFIFO, S_ISSOCK)
26
from bzrlib.lazy_import import lazy_import
27
lazy_import(globals(), """
29
from datetime import datetime
31
from ntpath import (abspath as _nt_abspath,
33
normpath as _nt_normpath,
34
realpath as _nt_realpath,
35
splitdrive as _nt_splitdrive,
44
from tempfile import (
56
# sha and md5 modules are deprecated in python2.6 but hashlib is available as
58
if sys.version_info < (2, 5):
59
import md5 as _mod_md5
61
import sha as _mod_sha
71
from bzrlib import symbol_versioning
74
# On win32, O_BINARY is used to indicate the file should
75
# be opened in binary mode, rather than text mode.
76
# On other platforms, O_BINARY doesn't exist, because
77
# they always open in binary mode, so it is okay to
78
# OR with 0 on those platforms
79
O_BINARY = getattr(os, 'O_BINARY', 0)
82
def get_unicode_argv():
84
user_encoding = get_user_encoding()
85
return [a.decode(user_encoding) for a in sys.argv[1:]]
86
except UnicodeDecodeError:
87
raise errors.BzrError(("Parameter '%r' is unsupported by the current "
91
def make_readonly(filename):
92
"""Make a filename read-only."""
93
mod = os.lstat(filename).st_mode
94
if not stat.S_ISLNK(mod):
96
os.chmod(filename, mod)
99
def make_writable(filename):
100
mod = os.lstat(filename).st_mode
101
if not stat.S_ISLNK(mod):
103
os.chmod(filename, mod)
106
def minimum_path_selection(paths):
107
"""Return the smallset subset of paths which are outside paths.
109
:param paths: A container (and hence not None) of paths.
110
:return: A set of paths sufficient to include everything in paths via
111
is_inside, drawn from the paths parameter.
117
return path.split('/')
118
sorted_paths = sorted(list(paths), key=sort_key)
120
search_paths = [sorted_paths[0]]
121
for path in sorted_paths[1:]:
122
if not is_inside(search_paths[-1], path):
123
# This path is unique, add it
124
search_paths.append(path)
126
return set(search_paths)
133
"""Return a quoted filename filename
135
This previously used backslash quoting, but that works poorly on
137
# TODO: I'm not really sure this is the best format either.x
139
if _QUOTE_RE is None:
140
_QUOTE_RE = re.compile(r'([^a-zA-Z0-9.,:/\\_~-])')
142
if _QUOTE_RE.search(f):
148
_directory_kind = 'directory'
151
"""Return the current umask"""
152
# Assume that people aren't messing with the umask while running
153
# XXX: This is not thread safe, but there is no way to get the
154
# umask without setting it
162
_directory_kind: "/",
164
'tree-reference': '+',
168
def kind_marker(kind):
170
return _kind_marker_map[kind]
172
raise errors.BzrError('invalid file kind %r' % kind)
175
lexists = getattr(os.path, 'lexists', None)
179
stat = getattr(os, 'lstat', os.stat)
183
if e.errno == errno.ENOENT:
186
raise errors.BzrError("lstat/stat of (%r): %r" % (f, e))
189
def fancy_rename(old, new, rename_func, unlink_func):
190
"""A fancy rename, when you don't have atomic rename.
192
:param old: The old path, to rename from
193
:param new: The new path, to rename to
194
:param rename_func: The potentially non-atomic rename function
195
:param unlink_func: A way to delete the target file if the full rename succeeds
198
# sftp rename doesn't allow overwriting, so play tricks:
199
base = os.path.basename(new)
200
dirname = os.path.dirname(new)
201
tmp_name = u'tmp.%s.%.9f.%d.%s' % (base, time.time(), os.getpid(), rand_chars(10))
202
tmp_name = pathjoin(dirname, tmp_name)
204
# Rename the file out of the way, but keep track if it didn't exist
205
# We don't want to grab just any exception
206
# something like EACCES should prevent us from continuing
207
# The downside is that the rename_func has to throw an exception
208
# with an errno = ENOENT, or NoSuchFile
211
rename_func(new, tmp_name)
212
except (errors.NoSuchFile,), e:
215
# RBC 20060103 abstraction leakage: the paramiko SFTP clients rename
216
# function raises an IOError with errno is None when a rename fails.
217
# This then gets caught here.
218
if e.errno not in (None, errno.ENOENT, errno.ENOTDIR):
221
if (getattr(e, 'errno', None) is None
222
or e.errno not in (errno.ENOENT, errno.ENOTDIR)):
230
# This may throw an exception, in which case success will
232
rename_func(old, new)
234
except (IOError, OSError), e:
235
# source and target may be aliases of each other (e.g. on a
236
# case-insensitive filesystem), so we may have accidentally renamed
237
# source by when we tried to rename target
238
if not (file_existed and e.errno in (None, errno.ENOENT)):
242
# If the file used to exist, rename it back into place
243
# otherwise just delete it from the tmp location
245
unlink_func(tmp_name)
247
rename_func(tmp_name, new)
250
# In Python 2.4.2 and older, os.path.abspath and os.path.realpath
251
# choke on a Unicode string containing a relative path if
252
# os.getcwd() returns a non-sys.getdefaultencoding()-encoded
254
_fs_enc = sys.getfilesystemencoding() or 'utf-8'
255
def _posix_abspath(path):
256
# jam 20060426 rather than encoding to fsencoding
257
# copy posixpath.abspath, but use os.getcwdu instead
258
if not posixpath.isabs(path):
259
path = posixpath.join(getcwd(), path)
260
return posixpath.normpath(path)
263
def _posix_realpath(path):
264
return posixpath.realpath(path.encode(_fs_enc)).decode(_fs_enc)
267
def _win32_fixdrive(path):
268
"""Force drive letters to be consistent.
270
win32 is inconsistent whether it returns lower or upper case
271
and even if it was consistent the user might type the other
272
so we force it to uppercase
273
running python.exe under cmd.exe return capital C:\\
274
running win32 python inside a cygwin shell returns lowercase c:\\
276
drive, path = _nt_splitdrive(path)
277
return drive.upper() + path
280
def _win32_abspath(path):
281
# Real _nt_abspath doesn't have a problem with a unicode cwd
282
return _win32_fixdrive(_nt_abspath(unicode(path)).replace('\\', '/'))
285
def _win98_abspath(path):
286
"""Return the absolute version of a path.
287
Windows 98 safe implementation (python reimplementation
288
of Win32 API function GetFullPathNameW)
293
# \\HOST\path => //HOST/path
294
# //HOST/path => //HOST/path
295
# path => C:/cwd/path
298
# check for absolute path
299
drive = _nt_splitdrive(path)[0]
300
if drive == '' and path[:2] not in('//','\\\\'):
302
# we cannot simply os.path.join cwd and path
303
# because os.path.join('C:','/path') produce '/path'
304
# and this is incorrect
305
if path[:1] in ('/','\\'):
306
cwd = _nt_splitdrive(cwd)[0]
308
path = cwd + '\\' + path
309
return _win32_fixdrive(_nt_normpath(path).replace('\\', '/'))
312
def _win32_realpath(path):
313
# Real _nt_realpath doesn't have a problem with a unicode cwd
314
return _win32_fixdrive(_nt_realpath(unicode(path)).replace('\\', '/'))
317
def _win32_pathjoin(*args):
318
return _nt_join(*args).replace('\\', '/')
321
def _win32_normpath(path):
322
return _win32_fixdrive(_nt_normpath(unicode(path)).replace('\\', '/'))
326
return _win32_fixdrive(os.getcwdu().replace('\\', '/'))
329
def _win32_mkdtemp(*args, **kwargs):
330
return _win32_fixdrive(tempfile.mkdtemp(*args, **kwargs).replace('\\', '/'))
333
def _win32_rename(old, new):
334
"""We expect to be able to atomically replace 'new' with old.
336
On win32, if new exists, it must be moved out of the way first,
340
fancy_rename(old, new, rename_func=os.rename, unlink_func=os.unlink)
342
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY, errno.EINVAL):
343
# If we try to rename a non-existant file onto cwd, we get
344
# EPERM or EACCES instead of ENOENT, this will raise ENOENT
345
# if the old path doesn't exist, sometimes we get EACCES
346
# On Linux, we seem to get EBUSY, on Mac we get EINVAL
352
return unicodedata.normalize('NFC', os.getcwdu())
355
# Default is to just use the python builtins, but these can be rebound on
356
# particular platforms.
357
abspath = _posix_abspath
358
realpath = _posix_realpath
359
pathjoin = os.path.join
360
normpath = os.path.normpath
363
dirname = os.path.dirname
364
basename = os.path.basename
365
split = os.path.split
366
splitext = os.path.splitext
367
# These were already imported into local scope
368
# mkdtemp = tempfile.mkdtemp
369
# rmtree = shutil.rmtree
371
MIN_ABS_PATHLENGTH = 1
374
if sys.platform == 'win32':
375
if win32utils.winver == 'Windows 98':
376
abspath = _win98_abspath
378
abspath = _win32_abspath
379
realpath = _win32_realpath
380
pathjoin = _win32_pathjoin
381
normpath = _win32_normpath
382
getcwd = _win32_getcwd
383
mkdtemp = _win32_mkdtemp
384
rename = _win32_rename
386
MIN_ABS_PATHLENGTH = 3
388
def _win32_delete_readonly(function, path, excinfo):
389
"""Error handler for shutil.rmtree function [for win32]
390
Helps to remove files and dirs marked as read-only.
392
exception = excinfo[1]
393
if function in (os.remove, os.rmdir) \
394
and isinstance(exception, OSError) \
395
and exception.errno == errno.EACCES:
401
def rmtree(path, ignore_errors=False, onerror=_win32_delete_readonly):
402
"""Replacer for shutil.rmtree: could remove readonly dirs/files"""
403
return shutil.rmtree(path, ignore_errors, onerror)
405
f = win32utils.get_unicode_argv # special function or None
409
elif sys.platform == 'darwin':
413
def get_terminal_encoding():
414
"""Find the best encoding for printing to the screen.
416
This attempts to check both sys.stdout and sys.stdin to see
417
what encoding they are in, and if that fails it falls back to
418
osutils.get_user_encoding().
419
The problem is that on Windows, locale.getpreferredencoding()
420
is not the same encoding as that used by the console:
421
http://mail.python.org/pipermail/python-list/2003-May/162357.html
423
On my standard US Windows XP, the preferred encoding is
424
cp1252, but the console is cp437
426
from bzrlib.trace import mutter
427
output_encoding = getattr(sys.stdout, 'encoding', None)
428
if not output_encoding:
429
input_encoding = getattr(sys.stdin, 'encoding', None)
430
if not input_encoding:
431
output_encoding = get_user_encoding()
432
mutter('encoding stdout as osutils.get_user_encoding() %r',
435
output_encoding = input_encoding
436
mutter('encoding stdout as sys.stdin encoding %r', output_encoding)
438
mutter('encoding stdout as sys.stdout encoding %r', output_encoding)
439
if output_encoding == 'cp0':
440
# invalid encoding (cp0 means 'no codepage' on Windows)
441
output_encoding = get_user_encoding()
442
mutter('cp0 is invalid encoding.'
443
' encoding stdout as osutils.get_user_encoding() %r',
447
codecs.lookup(output_encoding)
449
sys.stderr.write('bzr: warning:'
450
' unknown terminal encoding %s.\n'
451
' Using encoding %s instead.\n'
452
% (output_encoding, get_user_encoding())
454
output_encoding = get_user_encoding()
456
return output_encoding
459
def normalizepath(f):
460
if getattr(os.path, 'realpath', None) is not None:
464
[p,e] = os.path.split(f)
465
if e == "" or e == "." or e == "..":
468
return pathjoin(F(p), e)
472
"""True if f is an accessible directory."""
474
return S_ISDIR(os.lstat(f)[ST_MODE])
480
"""True if f is a regular file."""
482
return S_ISREG(os.lstat(f)[ST_MODE])
487
"""True if f is a symlink."""
489
return S_ISLNK(os.lstat(f)[ST_MODE])
493
def is_inside(dir, fname):
494
"""True if fname is inside dir.
496
The parameters should typically be passed to osutils.normpath first, so
497
that . and .. and repeated slashes are eliminated, and the separators
498
are canonical for the platform.
500
The empty string as a dir name is taken as top-of-tree and matches
503
# XXX: Most callers of this can actually do something smarter by
504
# looking at the inventory
514
return fname.startswith(dir)
517
def is_inside_any(dir_list, fname):
518
"""True if fname is inside any of given dirs."""
519
for dirname in dir_list:
520
if is_inside(dirname, fname):
525
def is_inside_or_parent_of_any(dir_list, fname):
526
"""True if fname is a child or a parent of any of the given files."""
527
for dirname in dir_list:
528
if is_inside(dirname, fname) or is_inside(fname, dirname):
533
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
534
report_activity=None, direction='read'):
535
"""Copy contents of one file to another.
537
The read_length can either be -1 to read to end-of-file (EOF) or
538
it can specify the maximum number of bytes to read.
540
The buff_size represents the maximum size for each read operation
541
performed on from_file.
543
:param report_activity: Call this as bytes are read, see
544
Transport._report_activity
545
:param direction: Will be passed to report_activity
547
:return: The number of bytes copied.
551
# read specified number of bytes
553
while read_length > 0:
554
num_bytes_to_read = min(read_length, buff_size)
556
block = from_file.read(num_bytes_to_read)
560
if report_activity is not None:
561
report_activity(len(block), direction)
564
actual_bytes_read = len(block)
565
read_length -= actual_bytes_read
566
length += actual_bytes_read
570
block = from_file.read(buff_size)
574
if report_activity is not None:
575
report_activity(len(block), direction)
581
def pump_string_file(bytes, file_handle, segment_size=None):
582
"""Write bytes to file_handle in many smaller writes.
584
:param bytes: The string to write.
585
:param file_handle: The file to write to.
587
# Write data in chunks rather than all at once, because very large
588
# writes fail on some platforms (e.g. Windows with SMB mounted
591
segment_size = 5242880 # 5MB
592
segments = range(len(bytes) / segment_size + 1)
593
write = file_handle.write
594
for segment_index in segments:
595
segment = buffer(bytes, segment_index * segment_size, segment_size)
599
def file_iterator(input_file, readsize=32768):
601
b = input_file.read(readsize)
608
"""Calculate the hexdigest of an open file.
610
The file cursor should be already at the start.
622
def size_sha_file(f):
623
"""Calculate the size and hexdigest of an open file.
625
The file cursor should be already at the start and
626
the caller is responsible for closing the file afterwards.
637
return size, s.hexdigest()
640
def sha_file_by_name(fname):
641
"""Calculate the SHA1 of a file by reading the full text"""
643
f = os.open(fname, os.O_RDONLY | O_BINARY)
646
b = os.read(f, 1<<16)
654
def sha_strings(strings, _factory=sha):
655
"""Return the sha-1 of concatenation of strings"""
657
map(s.update, strings)
661
def sha_string(f, _factory=sha):
662
return _factory(f).hexdigest()
665
def fingerprint_file(f):
667
return {'size': len(b),
668
'sha1': sha(b).hexdigest()}
671
def compare_files(a, b):
672
"""Returns true if equal in contents"""
683
def local_time_offset(t=None):
684
"""Return offset of local zone from GMT, either at present or at time t."""
687
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
688
return offset.days * 86400 + offset.seconds
690
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
692
def format_date(t, offset=0, timezone='original', date_fmt=None,
694
"""Return a formatted date string.
696
:param t: Seconds since the epoch.
697
:param offset: Timezone offset in seconds east of utc.
698
:param timezone: How to display the time: 'utc', 'original' for the
699
timezone specified by offset, or 'local' for the process's current
701
:param date_fmt: strftime format.
702
:param show_offset: Whether to append the timezone.
704
(date_fmt, tt, offset_str) = \
705
_format_date(t, offset, timezone, date_fmt, show_offset)
706
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
707
date_str = time.strftime(date_fmt, tt)
708
return date_str + offset_str
710
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
712
"""Return an unicode date string formatted according to the current locale.
714
:param t: Seconds since the epoch.
715
:param offset: Timezone offset in seconds east of utc.
716
:param timezone: How to display the time: 'utc', 'original' for the
717
timezone specified by offset, or 'local' for the process's current
719
:param date_fmt: strftime format.
720
:param show_offset: Whether to append the timezone.
722
(date_fmt, tt, offset_str) = \
723
_format_date(t, offset, timezone, date_fmt, show_offset)
724
date_str = time.strftime(date_fmt, tt)
725
if not isinstance(date_str, unicode):
726
date_str = date_str.decode(get_user_encoding(), 'replace')
727
return date_str + offset_str
729
def _format_date(t, offset, timezone, date_fmt, show_offset):
730
if timezone == 'utc':
733
elif timezone == 'original':
736
tt = time.gmtime(t + offset)
737
elif timezone == 'local':
738
tt = time.localtime(t)
739
offset = local_time_offset(t)
741
raise errors.UnsupportedTimezoneFormat(timezone)
743
date_fmt = "%a %Y-%m-%d %H:%M:%S"
745
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
748
return (date_fmt, tt, offset_str)
751
def compact_date(when):
752
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
755
def format_delta(delta):
756
"""Get a nice looking string for a time delta.
758
:param delta: The time difference in seconds, can be positive or negative.
759
positive indicates time in the past, negative indicates time in the
760
future. (usually time.time() - stored_time)
761
:return: String formatted to show approximate resolution
767
direction = 'in the future'
771
if seconds < 90: # print seconds up to 90 seconds
773
return '%d second %s' % (seconds, direction,)
775
return '%d seconds %s' % (seconds, direction)
777
minutes = int(seconds / 60)
778
seconds -= 60 * minutes
783
if minutes < 90: # print minutes, seconds up to 90 minutes
785
return '%d minute, %d second%s %s' % (
786
minutes, seconds, plural_seconds, direction)
788
return '%d minutes, %d second%s %s' % (
789
minutes, seconds, plural_seconds, direction)
791
hours = int(minutes / 60)
792
minutes -= 60 * hours
799
return '%d hour, %d minute%s %s' % (hours, minutes,
800
plural_minutes, direction)
801
return '%d hours, %d minute%s %s' % (hours, minutes,
802
plural_minutes, direction)
805
"""Return size of given open file."""
806
return os.fstat(f.fileno())[ST_SIZE]
809
# Define rand_bytes based on platform.
811
# Python 2.4 and later have os.urandom,
812
# but it doesn't work on some arches
814
rand_bytes = os.urandom
815
except (NotImplementedError, AttributeError):
816
# If python doesn't have os.urandom, or it doesn't work,
817
# then try to first pull random data from /dev/urandom
819
rand_bytes = file('/dev/urandom', 'rb').read
820
# Otherwise, use this hack as a last resort
821
except (IOError, OSError):
822
# not well seeded, but better than nothing
827
s += chr(random.randint(0, 255))
832
ALNUM = '0123456789abcdefghijklmnopqrstuvwxyz'
834
"""Return a random string of num alphanumeric characters
836
The result only contains lowercase chars because it may be used on
837
case-insensitive filesystems.
840
for raw_byte in rand_bytes(num):
841
s += ALNUM[ord(raw_byte) % 36]
845
## TODO: We could later have path objects that remember their list
846
## decomposition (might be too tricksy though.)
849
"""Turn string into list of parts."""
850
# split on either delimiter because people might use either on
852
ps = re.split(r'[\\/]', p)
857
raise errors.BzrError("sorry, %r not allowed in path" % f)
858
elif (f == '.') or (f == ''):
867
if (f == '..') or (f is None) or (f == ''):
868
raise errors.BzrError("sorry, %r not allowed in path" % f)
872
def parent_directories(filename):
873
"""Return the list of parent directories, deepest first.
875
For example, parent_directories("a/b/c") -> ["a/b", "a"].
878
parts = splitpath(dirname(filename))
880
parents.append(joinpath(parts))
885
_extension_load_failures = []
888
def failed_to_load_extension(exception):
889
"""Handle failing to load a binary extension.
891
This should be called from the ImportError block guarding the attempt to
892
import the native extension. If this function returns, the pure-Python
893
implementation should be loaded instead::
896
>>> import bzrlib._fictional_extension_pyx
897
>>> except ImportError, e:
898
>>> bzrlib.osutils.failed_to_load_extension(e)
899
>>> import bzrlib._fictional_extension_py
901
# NB: This docstring is just an example, not a doctest, because doctest
902
# currently can't cope with the use of lazy imports in this namespace --
905
# This currently doesn't report the failure at the time it occurs, because
906
# they tend to happen very early in startup when we can't check config
907
# files etc, and also we want to report all failures but not spam the user
909
from bzrlib import trace
910
exception_str = str(exception)
911
if exception_str not in _extension_load_failures:
912
trace.mutter("failed to load compiled extension: %s" % exception_str)
913
_extension_load_failures.append(exception_str)
916
def report_extension_load_failures():
917
if not _extension_load_failures:
919
from bzrlib.config import GlobalConfig
920
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
922
# the warnings framework should by default show this only once
923
from bzrlib.trace import warning
925
"bzr: warning: some compiled extensions could not be loaded; "
926
"see <https://answers.launchpad.net/bzr/+faq/703>")
927
# we no longer show the specific missing extensions here, because it makes
928
# the message too long and scary - see
929
# https://bugs.launchpad.net/bzr/+bug/430529
933
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
934
except ImportError, e:
935
failed_to_load_extension(e)
936
from bzrlib._chunks_to_lines_py import chunks_to_lines
940
"""Split s into lines, but without removing the newline characters."""
941
# Trivially convert a fulltext into a 'chunked' representation, and let
942
# chunks_to_lines do the heavy lifting.
943
if isinstance(s, str):
944
# chunks_to_lines only supports 8-bit strings
945
return chunks_to_lines([s])
947
return _split_lines(s)
951
"""Split s into lines, but without removing the newline characters.
953
This supports Unicode or plain string objects.
955
lines = s.split('\n')
956
result = [line + '\n' for line in lines[:-1]]
958
result.append(lines[-1])
962
def hardlinks_good():
963
return sys.platform not in ('win32', 'cygwin', 'darwin')
966
def link_or_copy(src, dest):
967
"""Hardlink a file, or copy it if it can't be hardlinked."""
968
if not hardlinks_good():
969
shutil.copyfile(src, dest)
973
except (OSError, IOError), e:
974
if e.errno != errno.EXDEV:
976
shutil.copyfile(src, dest)
979
def delete_any(path):
980
"""Delete a file, symlink or directory.
982
Will delete even if readonly.
985
_delete_file_or_dir(path)
986
except (OSError, IOError), e:
987
if e.errno in (errno.EPERM, errno.EACCES):
988
# make writable and try again
991
except (OSError, IOError):
993
_delete_file_or_dir(path)
998
def _delete_file_or_dir(path):
999
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1000
# Forgiveness than Permission (EAFP) because:
1001
# - root can damage a solaris file system by using unlink,
1002
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1003
# EACCES, OSX: EPERM) when invoked on a directory.
1004
if isdir(path): # Takes care of symlinks
1011
if getattr(os, 'symlink', None) is not None:
1017
def has_hardlinks():
1018
if getattr(os, 'link', None) is not None:
1024
def host_os_dereferences_symlinks():
1025
return (has_symlinks()
1026
and sys.platform not in ('cygwin', 'win32'))
1029
def readlink(abspath):
1030
"""Return a string representing the path to which the symbolic link points.
1032
:param abspath: The link absolute unicode path.
1034
This his guaranteed to return the symbolic link in unicode in all python
1037
link = abspath.encode(_fs_enc)
1038
target = os.readlink(link)
1039
target = target.decode(_fs_enc)
1043
def contains_whitespace(s):
1044
"""True if there are any whitespace characters in s."""
1045
# string.whitespace can include '\xa0' in certain locales, because it is
1046
# considered "non-breaking-space" as part of ISO-8859-1. But it
1047
# 1) Isn't a breaking whitespace
1048
# 2) Isn't one of ' \t\r\n' which are characters we sometimes use as
1050
# 3) '\xa0' isn't unicode safe since it is >128.
1052
# This should *not* be a unicode set of characters in case the source
1053
# string is not a Unicode string. We can auto-up-cast the characters since
1054
# they are ascii, but we don't want to auto-up-cast the string in case it
1056
for ch in ' \t\n\r\v\f':
1063
def contains_linebreaks(s):
1064
"""True if there is any vertical whitespace in s."""
1072
def relpath(base, path):
1073
"""Return path relative to base, or raise exception.
1075
The path may be either an absolute path or a path relative to the
1076
current working directory.
1078
os.path.commonprefix (python2.4) has a bad bug that it works just
1079
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
1080
avoids that problem.
1083
if len(base) < MIN_ABS_PATHLENGTH:
1084
# must have space for e.g. a drive letter
1085
raise ValueError('%r is too short to calculate a relative path'
1093
if len(head) <= len(base) and head != base:
1094
raise errors.PathNotChild(rp, base)
1097
head, tail = split(head)
1102
return pathjoin(*reversed(s))
1107
def _cicp_canonical_relpath(base, path):
1108
"""Return the canonical path relative to base.
1110
Like relpath, but on case-insensitive-case-preserving file-systems, this
1111
will return the relpath as stored on the file-system rather than in the
1112
case specified in the input string, for all existing portions of the path.
1114
This will cause O(N) behaviour if called for every path in a tree; if you
1115
have a number of paths to convert, you should use canonical_relpaths().
1117
# TODO: it should be possible to optimize this for Windows by using the
1118
# win32 API FindFiles function to look for the specified name - but using
1119
# os.listdir() still gives us the correct, platform agnostic semantics in
1122
rel = relpath(base, path)
1123
# '.' will have been turned into ''
1127
abs_base = abspath(base)
1129
_listdir = os.listdir
1131
# use an explicit iterator so we can easily consume the rest on early exit.
1132
bit_iter = iter(rel.split('/'))
1133
for bit in bit_iter:
1136
next_entries = _listdir(current)
1137
except OSError: # enoent, eperm, etc
1138
# We can't find this in the filesystem, so just append the
1140
current = pathjoin(current, bit, *list(bit_iter))
1142
for look in next_entries:
1143
if lbit == look.lower():
1144
current = pathjoin(current, look)
1147
# got to the end, nothing matched, so we just return the
1148
# non-existing bits as they were specified (the filename may be
1149
# the target of a move, for example).
1150
current = pathjoin(current, bit, *list(bit_iter))
1152
return current[len(abs_base):].lstrip('/')
1154
# XXX - TODO - we need better detection/integration of case-insensitive
1155
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1156
# filesystems), for example, so could probably benefit from the same basic
1157
# support there. For now though, only Windows and OSX get that support, and
1158
# they get it for *all* file-systems!
1159
if sys.platform in ('win32', 'darwin'):
1160
canonical_relpath = _cicp_canonical_relpath
1162
canonical_relpath = relpath
1164
def canonical_relpaths(base, paths):
1165
"""Create an iterable to canonicalize a sequence of relative paths.
1167
The intent is for this implementation to use a cache, vastly speeding
1168
up multiple transformations in the same directory.
1170
# but for now, we haven't optimized...
1171
return [canonical_relpath(base, p) for p in paths]
1173
def safe_unicode(unicode_or_utf8_string):
1174
"""Coerce unicode_or_utf8_string into unicode.
1176
If it is unicode, it is returned.
1177
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1178
wrapped in a BzrBadParameterNotUnicode exception.
1180
if isinstance(unicode_or_utf8_string, unicode):
1181
return unicode_or_utf8_string
1183
return unicode_or_utf8_string.decode('utf8')
1184
except UnicodeDecodeError:
1185
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1188
def safe_utf8(unicode_or_utf8_string):
1189
"""Coerce unicode_or_utf8_string to a utf8 string.
1191
If it is a str, it is returned.
1192
If it is Unicode, it is encoded into a utf-8 string.
1194
if isinstance(unicode_or_utf8_string, str):
1195
# TODO: jam 20070209 This is overkill, and probably has an impact on
1196
# performance if we are dealing with lots of apis that want a
1199
# Make sure it is a valid utf-8 string
1200
unicode_or_utf8_string.decode('utf-8')
1201
except UnicodeDecodeError:
1202
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1203
return unicode_or_utf8_string
1204
return unicode_or_utf8_string.encode('utf-8')
1207
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1208
' Revision id generators should be creating utf8'
1212
def safe_revision_id(unicode_or_utf8_string, warn=True):
1213
"""Revision ids should now be utf8, but at one point they were unicode.
1215
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1217
:param warn: Functions that are sanitizing user data can set warn=False
1218
:return: None or a utf8 revision id.
1220
if (unicode_or_utf8_string is None
1221
or unicode_or_utf8_string.__class__ == str):
1222
return unicode_or_utf8_string
1224
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1226
return cache_utf8.encode(unicode_or_utf8_string)
1229
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1230
' generators should be creating utf8 file ids.')
1233
def safe_file_id(unicode_or_utf8_string, warn=True):
1234
"""File ids should now be utf8, but at one point they were unicode.
1236
This is the same as safe_utf8, except it uses the cached encode functions
1237
to save a little bit of performance.
1239
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1241
:param warn: Functions that are sanitizing user data can set warn=False
1242
:return: None or a utf8 file id.
1244
if (unicode_or_utf8_string is None
1245
or unicode_or_utf8_string.__class__ == str):
1246
return unicode_or_utf8_string
1248
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1250
return cache_utf8.encode(unicode_or_utf8_string)
1253
_platform_normalizes_filenames = False
1254
if sys.platform == 'darwin':
1255
_platform_normalizes_filenames = True
1258
def normalizes_filenames():
1259
"""Return True if this platform normalizes unicode filenames.
1261
Mac OSX does, Windows/Linux do not.
1263
return _platform_normalizes_filenames
1266
def _accessible_normalized_filename(path):
1267
"""Get the unicode normalized path, and if you can access the file.
1269
On platforms where the system normalizes filenames (Mac OSX),
1270
you can access a file by any path which will normalize correctly.
1271
On platforms where the system does not normalize filenames
1272
(Windows, Linux), you have to access a file by its exact path.
1274
Internally, bzr only supports NFC normalization, since that is
1275
the standard for XML documents.
1277
So return the normalized path, and a flag indicating if the file
1278
can be accessed by that path.
1281
return unicodedata.normalize('NFC', unicode(path)), True
1284
def _inaccessible_normalized_filename(path):
1285
__doc__ = _accessible_normalized_filename.__doc__
1287
normalized = unicodedata.normalize('NFC', unicode(path))
1288
return normalized, normalized == path
1291
if _platform_normalizes_filenames:
1292
normalized_filename = _accessible_normalized_filename
1294
normalized_filename = _inaccessible_normalized_filename
1297
def terminal_width():
1298
"""Return estimated terminal width."""
1299
isatty = getattr(sys.stdout, 'isatty', None)
1300
if isatty is None or not isatty():
1301
# If it's not a tty, the width makes no sense. We just use a value bug
1302
# enough to avoid truncations. When the output is redirected, the
1303
# pagers can then handle that themselves. A cleaner implementation
1304
# would be to fix the callers to not try to format at all in these
1308
if sys.platform == 'win32':
1309
return win32utils.get_console_size()[0]
1312
import struct, fcntl, termios
1313
s = struct.pack('HHHH', 0, 0, 0, 0)
1314
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
1315
width = struct.unpack('HHHH', x)[1]
1320
width = int(os.environ['COLUMNS'])
1329
def supports_executable():
1330
return sys.platform != "win32"
1333
def supports_posix_readonly():
1334
"""Return True if 'readonly' has POSIX semantics, False otherwise.
1336
Notably, a win32 readonly file cannot be deleted, unlike POSIX where the
1337
directory controls creation/deletion, etc.
1339
And under win32, readonly means that the directory itself cannot be
1340
deleted. The contents of a readonly directory can be changed, unlike POSIX
1341
where files in readonly directories cannot be added, deleted or renamed.
1343
return sys.platform != "win32"
1346
def set_or_unset_env(env_variable, value):
1347
"""Modify the environment, setting or removing the env_variable.
1349
:param env_variable: The environment variable in question
1350
:param value: The value to set the environment to. If None, then
1351
the variable will be removed.
1352
:return: The original value of the environment variable.
1354
orig_val = os.environ.get(env_variable)
1356
if orig_val is not None:
1357
del os.environ[env_variable]
1359
if isinstance(value, unicode):
1360
value = value.encode(get_user_encoding())
1361
os.environ[env_variable] = value
1365
_validWin32PathRE = re.compile(r'^([A-Za-z]:[/\\])?[^:<>*"?\|]*$')
1368
def check_legal_path(path):
1369
"""Check whether the supplied path is legal.
1370
This is only required on Windows, so we don't test on other platforms
1373
if sys.platform != "win32":
1375
if _validWin32PathRE.match(path) is None:
1376
raise errors.IllegalPath(path)
1379
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1381
def _is_error_enotdir(e):
1382
"""Check if this exception represents ENOTDIR.
1384
Unfortunately, python is very inconsistent about the exception
1385
here. The cases are:
1386
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1387
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1388
which is the windows error code.
1389
3) Windows, Python2.5 uses errno == EINVAL and
1390
winerror == ERROR_DIRECTORY
1392
:param e: An Exception object (expected to be OSError with an errno
1393
attribute, but we should be able to cope with anything)
1394
:return: True if this represents an ENOTDIR error. False otherwise.
1396
en = getattr(e, 'errno', None)
1397
if (en == errno.ENOTDIR
1398
or (sys.platform == 'win32'
1399
and (en == _WIN32_ERROR_DIRECTORY
1400
or (en == errno.EINVAL
1401
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
1407
def walkdirs(top, prefix=""):
1408
"""Yield data about all the directories in a tree.
1410
This yields all the data about the contents of a directory at a time.
1411
After each directory has been yielded, if the caller has mutated the list
1412
to exclude some directories, they are then not descended into.
1414
The data yielded is of the form:
1415
((directory-relpath, directory-path-from-top),
1416
[(relpath, basename, kind, lstat, path-from-top), ...]),
1417
- directory-relpath is the relative path of the directory being returned
1418
with respect to top. prefix is prepended to this.
1419
- directory-path-from-root is the path including top for this directory.
1420
It is suitable for use with os functions.
1421
- relpath is the relative path within the subtree being walked.
1422
- basename is the basename of the path
1423
- kind is the kind of the file now. If unknown then the file is not
1424
present within the tree - but it may be recorded as versioned. See
1426
- lstat is the stat data *if* the file was statted.
1427
- planned, not implemented:
1428
path_from_tree_root is the path from the root of the tree.
1430
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1431
allows one to walk a subtree but get paths that are relative to a tree
1433
:return: an iterator over the dirs.
1435
#TODO there is a bit of a smell where the results of the directory-
1436
# summary in this, and the path from the root, may not agree
1437
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
1438
# potentially confusing output. We should make this more robust - but
1439
# not at a speed cost. RBC 20060731
1441
_directory = _directory_kind
1442
_listdir = os.listdir
1443
_kind_from_mode = file_kind_from_stat_mode
1444
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
1446
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1447
relroot, _, _, _, top = pending.pop()
1449
relprefix = relroot + u'/'
1452
top_slash = top + u'/'
1455
append = dirblock.append
1457
names = sorted(_listdir(top))
1459
if not _is_error_enotdir(e):
1463
abspath = top_slash + name
1464
statvalue = _lstat(abspath)
1465
kind = _kind_from_mode(statvalue.st_mode)
1466
append((relprefix + name, name, kind, statvalue, abspath))
1467
yield (relroot, top), dirblock
1469
# push the user specified dirs from dirblock
1470
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1473
class DirReader(object):
1474
"""An interface for reading directories."""
1476
def top_prefix_to_starting_dir(self, top, prefix=""):
1477
"""Converts top and prefix to a starting dir entry
1479
:param top: A utf8 path
1480
:param prefix: An optional utf8 path to prefix output relative paths
1482
:return: A tuple starting with prefix, and ending with the native
1485
raise NotImplementedError(self.top_prefix_to_starting_dir)
1487
def read_dir(self, prefix, top):
1488
"""Read a specific dir.
1490
:param prefix: A utf8 prefix to be preprended to the path basenames.
1491
:param top: A natively encoded path to read.
1492
:return: A list of the directories contents. Each item contains:
1493
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1495
raise NotImplementedError(self.read_dir)
1498
_selected_dir_reader = None
1501
def _walkdirs_utf8(top, prefix=""):
1502
"""Yield data about all the directories in a tree.
1504
This yields the same information as walkdirs() only each entry is yielded
1505
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1506
are returned as exact byte-strings.
1508
:return: yields a tuple of (dir_info, [file_info])
1509
dir_info is (utf8_relpath, path-from-top)
1510
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1511
if top is an absolute path, path-from-top is also an absolute path.
1512
path-from-top might be unicode or utf8, but it is the correct path to
1513
pass to os functions to affect the file in question. (such as os.lstat)
1515
global _selected_dir_reader
1516
if _selected_dir_reader is None:
1517
fs_encoding = _fs_enc.upper()
1518
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1519
# Win98 doesn't have unicode apis like FindFirstFileW
1520
# TODO: We possibly could support Win98 by falling back to the
1521
# original FindFirstFile, and using TCHAR instead of WCHAR,
1522
# but that gets a bit tricky, and requires custom compiling
1525
from bzrlib._walkdirs_win32 import Win32ReadDir
1526
_selected_dir_reader = Win32ReadDir()
1529
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1530
# ANSI_X3.4-1968 is a form of ASCII
1532
from bzrlib._readdir_pyx import UTF8DirReader
1533
_selected_dir_reader = UTF8DirReader()
1534
except ImportError, e:
1535
failed_to_load_extension(e)
1538
if _selected_dir_reader is None:
1539
# Fallback to the python version
1540
_selected_dir_reader = UnicodeDirReader()
1542
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1543
# But we don't actually uses 1-3 in pending, so set them to None
1544
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1545
read_dir = _selected_dir_reader.read_dir
1546
_directory = _directory_kind
1548
relroot, _, _, _, top = pending[-1].pop()
1551
dirblock = sorted(read_dir(relroot, top))
1552
yield (relroot, top), dirblock
1553
# push the user specified dirs from dirblock
1554
next = [d for d in reversed(dirblock) if d[2] == _directory]
1556
pending.append(next)
1559
class UnicodeDirReader(DirReader):
1560
"""A dir reader for non-utf8 file systems, which transcodes."""
1562
__slots__ = ['_utf8_encode']
1565
self._utf8_encode = codecs.getencoder('utf8')
1567
def top_prefix_to_starting_dir(self, top, prefix=""):
1568
"""See DirReader.top_prefix_to_starting_dir."""
1569
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1571
def read_dir(self, prefix, top):
1572
"""Read a single directory from a non-utf8 file system.
1574
top, and the abspath element in the output are unicode, all other paths
1575
are utf8. Local disk IO is done via unicode calls to listdir etc.
1577
This is currently the fallback code path when the filesystem encoding is
1578
not UTF-8. It may be better to implement an alternative so that we can
1579
safely handle paths that are not properly decodable in the current
1582
See DirReader.read_dir for details.
1584
_utf8_encode = self._utf8_encode
1586
_listdir = os.listdir
1587
_kind_from_mode = file_kind_from_stat_mode
1590
relprefix = prefix + '/'
1593
top_slash = top + u'/'
1596
append = dirblock.append
1597
for name in sorted(_listdir(top)):
1599
name_utf8 = _utf8_encode(name)[0]
1600
except UnicodeDecodeError:
1601
raise errors.BadFilenameEncoding(
1602
_utf8_encode(relprefix)[0] + name, _fs_enc)
1603
abspath = top_slash + name
1604
statvalue = _lstat(abspath)
1605
kind = _kind_from_mode(statvalue.st_mode)
1606
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
1610
def copy_tree(from_path, to_path, handlers={}):
1611
"""Copy all of the entries in from_path into to_path.
1613
:param from_path: The base directory to copy.
1614
:param to_path: The target directory. If it does not exist, it will
1616
:param handlers: A dictionary of functions, which takes a source and
1617
destinations for files, directories, etc.
1618
It is keyed on the file kind, such as 'directory', 'symlink', or 'file'
1619
'file', 'directory', and 'symlink' should always exist.
1620
If they are missing, they will be replaced with 'os.mkdir()',
1621
'os.readlink() + os.symlink()', and 'shutil.copy2()', respectively.
1623
# Now, just copy the existing cached tree to the new location
1624
# We use a cheap trick here.
1625
# Absolute paths are prefixed with the first parameter
1626
# relative paths are prefixed with the second.
1627
# So we can get both the source and target returned
1628
# without any extra work.
1630
def copy_dir(source, dest):
1633
def copy_link(source, dest):
1634
"""Copy the contents of a symlink"""
1635
link_to = os.readlink(source)
1636
os.symlink(link_to, dest)
1638
real_handlers = {'file':shutil.copy2,
1639
'symlink':copy_link,
1640
'directory':copy_dir,
1642
real_handlers.update(handlers)
1644
if not os.path.exists(to_path):
1645
real_handlers['directory'](from_path, to_path)
1647
for dir_info, entries in walkdirs(from_path, prefix=to_path):
1648
for relpath, name, kind, st, abspath in entries:
1649
real_handlers[kind](abspath, relpath)
1652
def path_prefix_key(path):
1653
"""Generate a prefix-order path key for path.
1655
This can be used to sort paths in the same way that walkdirs does.
1657
return (dirname(path) , path)
1660
def compare_paths_prefix_order(path_a, path_b):
1661
"""Compare path_a and path_b to generate the same order walkdirs uses."""
1662
key_a = path_prefix_key(path_a)
1663
key_b = path_prefix_key(path_b)
1664
return cmp(key_a, key_b)
1667
_cached_user_encoding = None
1670
def get_user_encoding(use_cache=True):
1671
"""Find out what the preferred user encoding is.
1673
This is generally the encoding that is used for command line parameters
1674
and file contents. This may be different from the terminal encoding
1675
or the filesystem encoding.
1677
:param use_cache: Enable cache for detected encoding.
1678
(This parameter is turned on by default,
1679
and required only for selftesting)
1681
:return: A string defining the preferred user encoding
1683
global _cached_user_encoding
1684
if _cached_user_encoding is not None and use_cache:
1685
return _cached_user_encoding
1687
if sys.platform == 'darwin':
1688
# python locale.getpreferredencoding() always return
1689
# 'mac-roman' on darwin. That's a lie.
1690
sys.platform = 'posix'
1692
if os.environ.get('LANG', None) is None:
1693
# If LANG is not set, we end up with 'ascii', which is bad
1694
# ('mac-roman' is more than ascii), so we set a default which
1695
# will give us UTF-8 (which appears to work in all cases on
1696
# OSX). Users are still free to override LANG of course, as
1697
# long as it give us something meaningful. This work-around
1698
# *may* not be needed with python 3k and/or OSX 10.5, but will
1699
# work with them too -- vila 20080908
1700
os.environ['LANG'] = 'en_US.UTF-8'
1703
sys.platform = 'darwin'
1708
user_encoding = locale.getpreferredencoding()
1709
except locale.Error, e:
1710
sys.stderr.write('bzr: warning: %s\n'
1711
' Could not determine what text encoding to use.\n'
1712
' This error usually means your Python interpreter\n'
1713
' doesn\'t support the locale set by $LANG (%s)\n'
1714
" Continuing with ascii encoding.\n"
1715
% (e, os.environ.get('LANG')))
1716
user_encoding = 'ascii'
1718
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1719
# treat that as ASCII, and not support printing unicode characters to the
1722
# For python scripts run under vim, we get '', so also treat that as ASCII
1723
if user_encoding in (None, 'cp0', ''):
1724
user_encoding = 'ascii'
1728
codecs.lookup(user_encoding)
1730
sys.stderr.write('bzr: warning:'
1731
' unknown encoding %s.'
1732
' Continuing with ascii encoding.\n'
1735
user_encoding = 'ascii'
1738
_cached_user_encoding = user_encoding
1740
return user_encoding
1743
def get_host_name():
1744
"""Return the current unicode host name.
1746
This is meant to be used in place of socket.gethostname() because that
1747
behaves inconsistently on different platforms.
1749
if sys.platform == "win32":
1751
return win32utils.get_host_name()
1754
return socket.gethostname().decode(get_user_encoding())
1757
def recv_all(socket, bytes):
1758
"""Receive an exact number of bytes.
1760
Regular Socket.recv() may return less than the requested number of bytes,
1761
dependning on what's in the OS buffer. MSG_WAITALL is not available
1762
on all platforms, but this should work everywhere. This will return
1763
less than the requested amount if the remote end closes.
1765
This isn't optimized and is intended mostly for use in testing.
1768
while len(b) < bytes:
1769
new = until_no_eintr(socket.recv, bytes - len(b))
1776
def send_all(socket, bytes, report_activity=None):
1777
"""Send all bytes on a socket.
1779
Regular socket.sendall() can give socket error 10053 on Windows. This
1780
implementation sends no more than 64k at a time, which avoids this problem.
1782
:param report_activity: Call this as bytes are read, see
1783
Transport._report_activity
1786
for pos in xrange(0, len(bytes), chunk_size):
1787
block = bytes[pos:pos+chunk_size]
1788
if report_activity is not None:
1789
report_activity(len(block), 'write')
1790
until_no_eintr(socket.sendall, block)
1793
def dereference_path(path):
1794
"""Determine the real path to a file.
1796
All parent elements are dereferenced. But the file itself is not
1798
:param path: The original path. May be absolute or relative.
1799
:return: the real path *to* the file
1801
parent, base = os.path.split(path)
1802
# The pathjoin for '.' is a workaround for Python bug #1213894.
1803
# (initial path components aren't dereferenced)
1804
return pathjoin(realpath(pathjoin('.', parent)), base)
1807
def supports_mapi():
1808
"""Return True if we can use MAPI to launch a mail client."""
1809
return sys.platform == "win32"
1812
def resource_string(package, resource_name):
1813
"""Load a resource from a package and return it as a string.
1815
Note: Only packages that start with bzrlib are currently supported.
1817
This is designed to be a lightweight implementation of resource
1818
loading in a way which is API compatible with the same API from
1820
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1821
If and when pkg_resources becomes a standard library, this routine
1824
# Check package name is within bzrlib
1825
if package == "bzrlib":
1826
resource_relpath = resource_name
1827
elif package.startswith("bzrlib."):
1828
package = package[len("bzrlib."):].replace('.', os.sep)
1829
resource_relpath = pathjoin(package, resource_name)
1831
raise errors.BzrError('resource package %s not in bzrlib' % package)
1833
# Map the resource to a file and read its contents
1834
base = dirname(bzrlib.__file__)
1835
if getattr(sys, 'frozen', None): # bzr.exe
1836
base = abspath(pathjoin(base, '..', '..'))
1837
filename = pathjoin(base, resource_relpath)
1838
return open(filename, 'rU').read()
1841
def file_kind_from_stat_mode_thunk(mode):
1842
global file_kind_from_stat_mode
1843
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1845
from bzrlib._readdir_pyx import UTF8DirReader
1846
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1847
except ImportError, e:
1848
# This is one time where we won't warn that an extension failed to
1849
# load. The extension is never available on Windows anyway.
1850
from bzrlib._readdir_py import (
1851
_kind_from_mode as file_kind_from_stat_mode
1853
return file_kind_from_stat_mode(mode)
1854
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1857
def file_kind(f, _lstat=os.lstat):
1859
return file_kind_from_stat_mode(_lstat(f).st_mode)
1861
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1862
raise errors.NoSuchFile(f)
1866
def until_no_eintr(f, *a, **kw):
1867
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1868
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1872
except (IOError, OSError), e:
1873
if e.errno == errno.EINTR:
1877
def re_compile_checked(re_string, flags=0, where=""):
1878
"""Return a compiled re, or raise a sensible error.
1880
This should only be used when compiling user-supplied REs.
1882
:param re_string: Text form of regular expression.
1883
:param flags: eg re.IGNORECASE
1884
:param where: Message explaining to the user the context where
1885
it occurred, eg 'log search filter'.
1887
# from https://bugs.launchpad.net/bzr/+bug/251352
1889
re_obj = re.compile(re_string, flags)
1894
where = ' in ' + where
1895
# despite the name 'error' is a type
1896
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
1897
% (where, re_string, e))
1900
if sys.platform == "win32":
1903
return msvcrt.getch()
1908
fd = sys.stdin.fileno()
1909
settings = termios.tcgetattr(fd)
1912
ch = sys.stdin.read(1)
1914
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
1918
if sys.platform == 'linux2':
1919
def _local_concurrency():
1921
prefix = 'processor'
1922
for line in file('/proc/cpuinfo', 'rb'):
1923
if line.startswith(prefix):
1924
concurrency = int(line[line.find(':')+1:]) + 1
1926
elif sys.platform == 'darwin':
1927
def _local_concurrency():
1928
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
1929
stdout=subprocess.PIPE).communicate()[0]
1930
elif sys.platform[0:7] == 'freebsd':
1931
def _local_concurrency():
1932
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
1933
stdout=subprocess.PIPE).communicate()[0]
1934
elif sys.platform == 'sunos5':
1935
def _local_concurrency():
1936
return subprocess.Popen(['psrinfo', '-p',],
1937
stdout=subprocess.PIPE).communicate()[0]
1938
elif sys.platform == "win32":
1939
def _local_concurrency():
1940
# This appears to return the number of cores.
1941
return os.environ.get('NUMBER_OF_PROCESSORS')
1943
def _local_concurrency():
1948
_cached_local_concurrency = None
1950
def local_concurrency(use_cache=True):
1951
"""Return how many processes can be run concurrently.
1953
Rely on platform specific implementations and default to 1 (one) if
1954
anything goes wrong.
1956
global _cached_local_concurrency
1957
if _cached_local_concurrency is not None and use_cache:
1958
return _cached_local_concurrency
1961
concurrency = _local_concurrency()
1962
except (OSError, IOError):
1965
concurrency = int(concurrency)
1966
except (TypeError, ValueError):
1969
_cached_concurrency = concurrency