1
# Copyright (C) 2005, 2006, 2007, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
from stat import (S_ISREG, S_ISDIR, S_ISLNK, ST_MODE, ST_SIZE,
21
S_ISCHR, S_ISBLK, S_ISFIFO, S_ISSOCK)
26
from bzrlib.lazy_import import lazy_import
27
lazy_import(globals(), """
29
from datetime import datetime
31
from ntpath import (abspath as _nt_abspath,
33
normpath as _nt_normpath,
34
realpath as _nt_realpath,
35
splitdrive as _nt_splitdrive,
45
from tempfile import (
57
# sha and md5 modules are deprecated in python2.6 but hashlib is available as
59
if sys.version_info < (2, 5):
60
import md5 as _mod_md5
62
import sha as _mod_sha
72
from bzrlib import symbol_versioning
75
# Cross platform wall-clock time functionality with decent resolution.
76
# On Linux ``time.clock`` returns only CPU time. On Windows, ``time.time()``
77
# only has a resolution of ~15ms. Note that ``time.clock()`` is not
78
# synchronized with ``time.time()``, this is only meant to be used to find
79
# delta times by subtracting from another call to this function.
80
timer_func = time.time
81
if sys.platform == 'win32':
82
timer_func = time.clock
84
# On win32, O_BINARY is used to indicate the file should
85
# be opened in binary mode, rather than text mode.
86
# On other platforms, O_BINARY doesn't exist, because
87
# they always open in binary mode, so it is okay to
88
# OR with 0 on those platforms
89
O_BINARY = getattr(os, 'O_BINARY', 0)
92
def get_unicode_argv():
94
user_encoding = get_user_encoding()
95
return [a.decode(user_encoding) for a in sys.argv[1:]]
96
except UnicodeDecodeError:
97
raise errors.BzrError(("Parameter '%r' is unsupported by the current "
101
def make_readonly(filename):
102
"""Make a filename read-only."""
103
mod = os.lstat(filename).st_mode
104
if not stat.S_ISLNK(mod):
106
os.chmod(filename, mod)
109
def make_writable(filename):
110
mod = os.lstat(filename).st_mode
111
if not stat.S_ISLNK(mod):
113
os.chmod(filename, mod)
116
def minimum_path_selection(paths):
117
"""Return the smallset subset of paths which are outside paths.
119
:param paths: A container (and hence not None) of paths.
120
:return: A set of paths sufficient to include everything in paths via
121
is_inside, drawn from the paths parameter.
127
return path.split('/')
128
sorted_paths = sorted(list(paths), key=sort_key)
130
search_paths = [sorted_paths[0]]
131
for path in sorted_paths[1:]:
132
if not is_inside(search_paths[-1], path):
133
# This path is unique, add it
134
search_paths.append(path)
136
return set(search_paths)
143
"""Return a quoted filename filename
145
This previously used backslash quoting, but that works poorly on
147
# TODO: I'm not really sure this is the best format either.x
149
if _QUOTE_RE is None:
150
_QUOTE_RE = re.compile(r'([^a-zA-Z0-9.,:/\\_~-])')
152
if _QUOTE_RE.search(f):
158
_directory_kind = 'directory'
161
"""Return the current umask"""
162
# Assume that people aren't messing with the umask while running
163
# XXX: This is not thread safe, but there is no way to get the
164
# umask without setting it
172
_directory_kind: "/",
174
'tree-reference': '+',
178
def kind_marker(kind):
180
return _kind_marker_map[kind]
182
raise errors.BzrError('invalid file kind %r' % kind)
185
lexists = getattr(os.path, 'lexists', None)
189
stat = getattr(os, 'lstat', os.stat)
193
if e.errno == errno.ENOENT:
196
raise errors.BzrError("lstat/stat of (%r): %r" % (f, e))
199
def fancy_rename(old, new, rename_func, unlink_func):
200
"""A fancy rename, when you don't have atomic rename.
202
:param old: The old path, to rename from
203
:param new: The new path, to rename to
204
:param rename_func: The potentially non-atomic rename function
205
:param unlink_func: A way to delete the target file if the full rename
208
new = safe_unicode(new)
209
# sftp rename doesn't allow overwriting, so play tricks:
210
base = os.path.basename(new)
211
dirname = os.path.dirname(new)
212
tmp_name = u'tmp.%s.%.9f.%d.%s' % (base, time.time(),
213
os.getpid(), rand_chars(10))
214
tmp_name = pathjoin(dirname, tmp_name)
216
# Rename the file out of the way, but keep track if it didn't exist
217
# We don't want to grab just any exception
218
# something like EACCES should prevent us from continuing
219
# The downside is that the rename_func has to throw an exception
220
# with an errno = ENOENT, or NoSuchFile
223
rename_func(new, tmp_name)
224
except (errors.NoSuchFile,), e:
227
# RBC 20060103 abstraction leakage: the paramiko SFTP clients rename
228
# function raises an IOError with errno is None when a rename fails.
229
# This then gets caught here.
230
if e.errno not in (None, errno.ENOENT, errno.ENOTDIR):
233
if (getattr(e, 'errno', None) is None
234
or e.errno not in (errno.ENOENT, errno.ENOTDIR)):
243
# This may throw an exception, in which case success will
245
rename_func(old, new)
247
except (IOError, OSError), e:
248
# source and target may be aliases of each other (e.g. on a
249
# case-insensitive filesystem), so we may have accidentally renamed
250
# source by when we tried to rename target
251
failure_exc = sys.exc_info()
252
if (file_existed and e.errno in (None, errno.ENOENT)
253
and old.lower() == new.lower()):
254
# source and target are the same file on a case-insensitive
255
# filesystem, so we don't generate an exception
259
# If the file used to exist, rename it back into place
260
# otherwise just delete it from the tmp location
262
unlink_func(tmp_name)
264
rename_func(tmp_name, new)
265
if failure_exc is not None:
266
raise failure_exc[0], failure_exc[1], failure_exc[2]
269
# In Python 2.4.2 and older, os.path.abspath and os.path.realpath
270
# choke on a Unicode string containing a relative path if
271
# os.getcwd() returns a non-sys.getdefaultencoding()-encoded
273
_fs_enc = sys.getfilesystemencoding() or 'utf-8'
274
def _posix_abspath(path):
275
# jam 20060426 rather than encoding to fsencoding
276
# copy posixpath.abspath, but use os.getcwdu instead
277
if not posixpath.isabs(path):
278
path = posixpath.join(getcwd(), path)
279
return posixpath.normpath(path)
282
def _posix_realpath(path):
283
return posixpath.realpath(path.encode(_fs_enc)).decode(_fs_enc)
286
def _win32_fixdrive(path):
287
"""Force drive letters to be consistent.
289
win32 is inconsistent whether it returns lower or upper case
290
and even if it was consistent the user might type the other
291
so we force it to uppercase
292
running python.exe under cmd.exe return capital C:\\
293
running win32 python inside a cygwin shell returns lowercase c:\\
295
drive, path = _nt_splitdrive(path)
296
return drive.upper() + path
299
def _win32_abspath(path):
300
# Real _nt_abspath doesn't have a problem with a unicode cwd
301
return _win32_fixdrive(_nt_abspath(unicode(path)).replace('\\', '/'))
304
def _win98_abspath(path):
305
"""Return the absolute version of a path.
306
Windows 98 safe implementation (python reimplementation
307
of Win32 API function GetFullPathNameW)
312
# \\HOST\path => //HOST/path
313
# //HOST/path => //HOST/path
314
# path => C:/cwd/path
317
# check for absolute path
318
drive = _nt_splitdrive(path)[0]
319
if drive == '' and path[:2] not in('//','\\\\'):
321
# we cannot simply os.path.join cwd and path
322
# because os.path.join('C:','/path') produce '/path'
323
# and this is incorrect
324
if path[:1] in ('/','\\'):
325
cwd = _nt_splitdrive(cwd)[0]
327
path = cwd + '\\' + path
328
return _win32_fixdrive(_nt_normpath(path).replace('\\', '/'))
331
def _win32_realpath(path):
332
# Real _nt_realpath doesn't have a problem with a unicode cwd
333
return _win32_fixdrive(_nt_realpath(unicode(path)).replace('\\', '/'))
336
def _win32_pathjoin(*args):
337
return _nt_join(*args).replace('\\', '/')
340
def _win32_normpath(path):
341
return _win32_fixdrive(_nt_normpath(unicode(path)).replace('\\', '/'))
345
return _win32_fixdrive(os.getcwdu().replace('\\', '/'))
348
def _win32_mkdtemp(*args, **kwargs):
349
return _win32_fixdrive(tempfile.mkdtemp(*args, **kwargs).replace('\\', '/'))
352
def _win32_rename(old, new):
353
"""We expect to be able to atomically replace 'new' with old.
355
On win32, if new exists, it must be moved out of the way first,
359
fancy_rename(old, new, rename_func=os.rename, unlink_func=os.unlink)
361
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY, errno.EINVAL):
362
# If we try to rename a non-existant file onto cwd, we get
363
# EPERM or EACCES instead of ENOENT, this will raise ENOENT
364
# if the old path doesn't exist, sometimes we get EACCES
365
# On Linux, we seem to get EBUSY, on Mac we get EINVAL
371
return unicodedata.normalize('NFC', os.getcwdu())
374
# Default is to just use the python builtins, but these can be rebound on
375
# particular platforms.
376
abspath = _posix_abspath
377
realpath = _posix_realpath
378
pathjoin = os.path.join
379
normpath = os.path.normpath
382
dirname = os.path.dirname
383
basename = os.path.basename
384
split = os.path.split
385
splitext = os.path.splitext
386
# These were already imported into local scope
387
# mkdtemp = tempfile.mkdtemp
388
# rmtree = shutil.rmtree
390
MIN_ABS_PATHLENGTH = 1
393
if sys.platform == 'win32':
394
if win32utils.winver == 'Windows 98':
395
abspath = _win98_abspath
397
abspath = _win32_abspath
398
realpath = _win32_realpath
399
pathjoin = _win32_pathjoin
400
normpath = _win32_normpath
401
getcwd = _win32_getcwd
402
mkdtemp = _win32_mkdtemp
403
rename = _win32_rename
405
MIN_ABS_PATHLENGTH = 3
407
def _win32_delete_readonly(function, path, excinfo):
408
"""Error handler for shutil.rmtree function [for win32]
409
Helps to remove files and dirs marked as read-only.
411
exception = excinfo[1]
412
if function in (os.remove, os.rmdir) \
413
and isinstance(exception, OSError) \
414
and exception.errno == errno.EACCES:
420
def rmtree(path, ignore_errors=False, onerror=_win32_delete_readonly):
421
"""Replacer for shutil.rmtree: could remove readonly dirs/files"""
422
return shutil.rmtree(path, ignore_errors, onerror)
424
f = win32utils.get_unicode_argv # special function or None
428
elif sys.platform == 'darwin':
432
def get_terminal_encoding():
433
"""Find the best encoding for printing to the screen.
435
This attempts to check both sys.stdout and sys.stdin to see
436
what encoding they are in, and if that fails it falls back to
437
osutils.get_user_encoding().
438
The problem is that on Windows, locale.getpreferredencoding()
439
is not the same encoding as that used by the console:
440
http://mail.python.org/pipermail/python-list/2003-May/162357.html
442
On my standard US Windows XP, the preferred encoding is
443
cp1252, but the console is cp437
445
from bzrlib.trace import mutter
446
output_encoding = getattr(sys.stdout, 'encoding', None)
447
if not output_encoding:
448
input_encoding = getattr(sys.stdin, 'encoding', None)
449
if not input_encoding:
450
output_encoding = get_user_encoding()
451
mutter('encoding stdout as osutils.get_user_encoding() %r',
454
output_encoding = input_encoding
455
mutter('encoding stdout as sys.stdin encoding %r', output_encoding)
457
mutter('encoding stdout as sys.stdout encoding %r', output_encoding)
458
if output_encoding == 'cp0':
459
# invalid encoding (cp0 means 'no codepage' on Windows)
460
output_encoding = get_user_encoding()
461
mutter('cp0 is invalid encoding.'
462
' encoding stdout as osutils.get_user_encoding() %r',
466
codecs.lookup(output_encoding)
468
sys.stderr.write('bzr: warning:'
469
' unknown terminal encoding %s.\n'
470
' Using encoding %s instead.\n'
471
% (output_encoding, get_user_encoding())
473
output_encoding = get_user_encoding()
475
return output_encoding
478
def normalizepath(f):
479
if getattr(os.path, 'realpath', None) is not None:
483
[p,e] = os.path.split(f)
484
if e == "" or e == "." or e == "..":
487
return pathjoin(F(p), e)
491
"""True if f is an accessible directory."""
493
return S_ISDIR(os.lstat(f)[ST_MODE])
499
"""True if f is a regular file."""
501
return S_ISREG(os.lstat(f)[ST_MODE])
506
"""True if f is a symlink."""
508
return S_ISLNK(os.lstat(f)[ST_MODE])
512
def is_inside(dir, fname):
513
"""True if fname is inside dir.
515
The parameters should typically be passed to osutils.normpath first, so
516
that . and .. and repeated slashes are eliminated, and the separators
517
are canonical for the platform.
519
The empty string as a dir name is taken as top-of-tree and matches
522
# XXX: Most callers of this can actually do something smarter by
523
# looking at the inventory
533
return fname.startswith(dir)
536
def is_inside_any(dir_list, fname):
537
"""True if fname is inside any of given dirs."""
538
for dirname in dir_list:
539
if is_inside(dirname, fname):
544
def is_inside_or_parent_of_any(dir_list, fname):
545
"""True if fname is a child or a parent of any of the given files."""
546
for dirname in dir_list:
547
if is_inside(dirname, fname) or is_inside(fname, dirname):
552
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
553
report_activity=None, direction='read'):
554
"""Copy contents of one file to another.
556
The read_length can either be -1 to read to end-of-file (EOF) or
557
it can specify the maximum number of bytes to read.
559
The buff_size represents the maximum size for each read operation
560
performed on from_file.
562
:param report_activity: Call this as bytes are read, see
563
Transport._report_activity
564
:param direction: Will be passed to report_activity
566
:return: The number of bytes copied.
570
# read specified number of bytes
572
while read_length > 0:
573
num_bytes_to_read = min(read_length, buff_size)
575
block = from_file.read(num_bytes_to_read)
579
if report_activity is not None:
580
report_activity(len(block), direction)
583
actual_bytes_read = len(block)
584
read_length -= actual_bytes_read
585
length += actual_bytes_read
589
block = from_file.read(buff_size)
593
if report_activity is not None:
594
report_activity(len(block), direction)
600
def pump_string_file(bytes, file_handle, segment_size=None):
601
"""Write bytes to file_handle in many smaller writes.
603
:param bytes: The string to write.
604
:param file_handle: The file to write to.
606
# Write data in chunks rather than all at once, because very large
607
# writes fail on some platforms (e.g. Windows with SMB mounted
610
segment_size = 5242880 # 5MB
611
segments = range(len(bytes) / segment_size + 1)
612
write = file_handle.write
613
for segment_index in segments:
614
segment = buffer(bytes, segment_index * segment_size, segment_size)
618
def file_iterator(input_file, readsize=32768):
620
b = input_file.read(readsize)
627
"""Calculate the hexdigest of an open file.
629
The file cursor should be already at the start.
641
def size_sha_file(f):
642
"""Calculate the size and hexdigest of an open file.
644
The file cursor should be already at the start and
645
the caller is responsible for closing the file afterwards.
656
return size, s.hexdigest()
659
def sha_file_by_name(fname):
660
"""Calculate the SHA1 of a file by reading the full text"""
662
f = os.open(fname, os.O_RDONLY | O_BINARY)
665
b = os.read(f, 1<<16)
673
def sha_strings(strings, _factory=sha):
674
"""Return the sha-1 of concatenation of strings"""
676
map(s.update, strings)
680
def sha_string(f, _factory=sha):
681
return _factory(f).hexdigest()
684
def fingerprint_file(f):
686
return {'size': len(b),
687
'sha1': sha(b).hexdigest()}
690
def compare_files(a, b):
691
"""Returns true if equal in contents"""
702
def local_time_offset(t=None):
703
"""Return offset of local zone from GMT, either at present or at time t."""
706
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
707
return offset.days * 86400 + offset.seconds
709
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
710
_default_format_by_weekday_num = [wd + " %Y-%m-%d %H:%M:%S" for wd in weekdays]
713
def format_date(t, offset=0, timezone='original', date_fmt=None,
715
"""Return a formatted date string.
717
:param t: Seconds since the epoch.
718
:param offset: Timezone offset in seconds east of utc.
719
:param timezone: How to display the time: 'utc', 'original' for the
720
timezone specified by offset, or 'local' for the process's current
722
:param date_fmt: strftime format.
723
:param show_offset: Whether to append the timezone.
725
(date_fmt, tt, offset_str) = \
726
_format_date(t, offset, timezone, date_fmt, show_offset)
727
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
728
date_str = time.strftime(date_fmt, tt)
729
return date_str + offset_str
732
# Cache of formatted offset strings
736
def format_date_with_offset_in_original_timezone(t, offset=0,
737
_cache=_offset_cache):
738
"""Return a formatted date string in the original timezone.
740
This routine may be faster then format_date.
742
:param t: Seconds since the epoch.
743
:param offset: Timezone offset in seconds east of utc.
747
tt = time.gmtime(t + offset)
748
date_fmt = _default_format_by_weekday_num[tt[6]]
749
date_str = time.strftime(date_fmt, tt)
750
offset_str = _cache.get(offset, None)
751
if offset_str is None:
752
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
753
_cache[offset] = offset_str
754
return date_str + offset_str
757
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
759
"""Return an unicode date string formatted according to the current locale.
761
:param t: Seconds since the epoch.
762
:param offset: Timezone offset in seconds east of utc.
763
:param timezone: How to display the time: 'utc', 'original' for the
764
timezone specified by offset, or 'local' for the process's current
766
:param date_fmt: strftime format.
767
:param show_offset: Whether to append the timezone.
769
(date_fmt, tt, offset_str) = \
770
_format_date(t, offset, timezone, date_fmt, show_offset)
771
date_str = time.strftime(date_fmt, tt)
772
if not isinstance(date_str, unicode):
773
date_str = date_str.decode(get_user_encoding(), 'replace')
774
return date_str + offset_str
777
def _format_date(t, offset, timezone, date_fmt, show_offset):
778
if timezone == 'utc':
781
elif timezone == 'original':
784
tt = time.gmtime(t + offset)
785
elif timezone == 'local':
786
tt = time.localtime(t)
787
offset = local_time_offset(t)
789
raise errors.UnsupportedTimezoneFormat(timezone)
791
date_fmt = "%a %Y-%m-%d %H:%M:%S"
793
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
796
return (date_fmt, tt, offset_str)
799
def compact_date(when):
800
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
803
def format_delta(delta):
804
"""Get a nice looking string for a time delta.
806
:param delta: The time difference in seconds, can be positive or negative.
807
positive indicates time in the past, negative indicates time in the
808
future. (usually time.time() - stored_time)
809
:return: String formatted to show approximate resolution
815
direction = 'in the future'
819
if seconds < 90: # print seconds up to 90 seconds
821
return '%d second %s' % (seconds, direction,)
823
return '%d seconds %s' % (seconds, direction)
825
minutes = int(seconds / 60)
826
seconds -= 60 * minutes
831
if minutes < 90: # print minutes, seconds up to 90 minutes
833
return '%d minute, %d second%s %s' % (
834
minutes, seconds, plural_seconds, direction)
836
return '%d minutes, %d second%s %s' % (
837
minutes, seconds, plural_seconds, direction)
839
hours = int(minutes / 60)
840
minutes -= 60 * hours
847
return '%d hour, %d minute%s %s' % (hours, minutes,
848
plural_minutes, direction)
849
return '%d hours, %d minute%s %s' % (hours, minutes,
850
plural_minutes, direction)
853
"""Return size of given open file."""
854
return os.fstat(f.fileno())[ST_SIZE]
857
# Define rand_bytes based on platform.
859
# Python 2.4 and later have os.urandom,
860
# but it doesn't work on some arches
862
rand_bytes = os.urandom
863
except (NotImplementedError, AttributeError):
864
# If python doesn't have os.urandom, or it doesn't work,
865
# then try to first pull random data from /dev/urandom
867
rand_bytes = file('/dev/urandom', 'rb').read
868
# Otherwise, use this hack as a last resort
869
except (IOError, OSError):
870
# not well seeded, but better than nothing
875
s += chr(random.randint(0, 255))
880
ALNUM = '0123456789abcdefghijklmnopqrstuvwxyz'
882
"""Return a random string of num alphanumeric characters
884
The result only contains lowercase chars because it may be used on
885
case-insensitive filesystems.
888
for raw_byte in rand_bytes(num):
889
s += ALNUM[ord(raw_byte) % 36]
893
## TODO: We could later have path objects that remember their list
894
## decomposition (might be too tricksy though.)
897
"""Turn string into list of parts."""
898
# split on either delimiter because people might use either on
900
ps = re.split(r'[\\/]', p)
905
raise errors.BzrError("sorry, %r not allowed in path" % f)
906
elif (f == '.') or (f == ''):
915
if (f == '..') or (f is None) or (f == ''):
916
raise errors.BzrError("sorry, %r not allowed in path" % f)
920
def parent_directories(filename):
921
"""Return the list of parent directories, deepest first.
923
For example, parent_directories("a/b/c") -> ["a/b", "a"].
926
parts = splitpath(dirname(filename))
928
parents.append(joinpath(parts))
933
_extension_load_failures = []
936
def failed_to_load_extension(exception):
937
"""Handle failing to load a binary extension.
939
This should be called from the ImportError block guarding the attempt to
940
import the native extension. If this function returns, the pure-Python
941
implementation should be loaded instead::
944
>>> import bzrlib._fictional_extension_pyx
945
>>> except ImportError, e:
946
>>> bzrlib.osutils.failed_to_load_extension(e)
947
>>> import bzrlib._fictional_extension_py
949
# NB: This docstring is just an example, not a doctest, because doctest
950
# currently can't cope with the use of lazy imports in this namespace --
953
# This currently doesn't report the failure at the time it occurs, because
954
# they tend to happen very early in startup when we can't check config
955
# files etc, and also we want to report all failures but not spam the user
957
from bzrlib import trace
958
exception_str = str(exception)
959
if exception_str not in _extension_load_failures:
960
trace.mutter("failed to load compiled extension: %s" % exception_str)
961
_extension_load_failures.append(exception_str)
964
def report_extension_load_failures():
965
if not _extension_load_failures:
967
from bzrlib.config import GlobalConfig
968
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
970
# the warnings framework should by default show this only once
971
from bzrlib.trace import warning
973
"bzr: warning: some compiled extensions could not be loaded; "
974
"see <https://answers.launchpad.net/bzr/+faq/703>")
975
# we no longer show the specific missing extensions here, because it makes
976
# the message too long and scary - see
977
# https://bugs.launchpad.net/bzr/+bug/430529
981
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
982
except ImportError, e:
983
failed_to_load_extension(e)
984
from bzrlib._chunks_to_lines_py import chunks_to_lines
988
"""Split s into lines, but without removing the newline characters."""
989
# Trivially convert a fulltext into a 'chunked' representation, and let
990
# chunks_to_lines do the heavy lifting.
991
if isinstance(s, str):
992
# chunks_to_lines only supports 8-bit strings
993
return chunks_to_lines([s])
995
return _split_lines(s)
999
"""Split s into lines, but without removing the newline characters.
1001
This supports Unicode or plain string objects.
1003
lines = s.split('\n')
1004
result = [line + '\n' for line in lines[:-1]]
1006
result.append(lines[-1])
1010
def hardlinks_good():
1011
return sys.platform not in ('win32', 'cygwin', 'darwin')
1014
def link_or_copy(src, dest):
1015
"""Hardlink a file, or copy it if it can't be hardlinked."""
1016
if not hardlinks_good():
1017
shutil.copyfile(src, dest)
1021
except (OSError, IOError), e:
1022
if e.errno != errno.EXDEV:
1024
shutil.copyfile(src, dest)
1027
def delete_any(path):
1028
"""Delete a file, symlink or directory.
1030
Will delete even if readonly.
1033
_delete_file_or_dir(path)
1034
except (OSError, IOError), e:
1035
if e.errno in (errno.EPERM, errno.EACCES):
1036
# make writable and try again
1039
except (OSError, IOError):
1041
_delete_file_or_dir(path)
1046
def _delete_file_or_dir(path):
1047
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1048
# Forgiveness than Permission (EAFP) because:
1049
# - root can damage a solaris file system by using unlink,
1050
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1051
# EACCES, OSX: EPERM) when invoked on a directory.
1052
if isdir(path): # Takes care of symlinks
1059
if getattr(os, 'symlink', None) is not None:
1065
def has_hardlinks():
1066
if getattr(os, 'link', None) is not None:
1072
def host_os_dereferences_symlinks():
1073
return (has_symlinks()
1074
and sys.platform not in ('cygwin', 'win32'))
1077
def readlink(abspath):
1078
"""Return a string representing the path to which the symbolic link points.
1080
:param abspath: The link absolute unicode path.
1082
This his guaranteed to return the symbolic link in unicode in all python
1085
link = abspath.encode(_fs_enc)
1086
target = os.readlink(link)
1087
target = target.decode(_fs_enc)
1091
def contains_whitespace(s):
1092
"""True if there are any whitespace characters in s."""
1093
# string.whitespace can include '\xa0' in certain locales, because it is
1094
# considered "non-breaking-space" as part of ISO-8859-1. But it
1095
# 1) Isn't a breaking whitespace
1096
# 2) Isn't one of ' \t\r\n' which are characters we sometimes use as
1098
# 3) '\xa0' isn't unicode safe since it is >128.
1100
# This should *not* be a unicode set of characters in case the source
1101
# string is not a Unicode string. We can auto-up-cast the characters since
1102
# they are ascii, but we don't want to auto-up-cast the string in case it
1104
for ch in ' \t\n\r\v\f':
1111
def contains_linebreaks(s):
1112
"""True if there is any vertical whitespace in s."""
1120
def relpath(base, path):
1121
"""Return path relative to base, or raise exception.
1123
The path may be either an absolute path or a path relative to the
1124
current working directory.
1126
os.path.commonprefix (python2.4) has a bad bug that it works just
1127
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
1128
avoids that problem.
1131
if len(base) < MIN_ABS_PATHLENGTH:
1132
# must have space for e.g. a drive letter
1133
raise ValueError('%r is too short to calculate a relative path'
1141
if len(head) <= len(base) and head != base:
1142
raise errors.PathNotChild(rp, base)
1145
head, tail = split(head)
1150
return pathjoin(*reversed(s))
1155
def _cicp_canonical_relpath(base, path):
1156
"""Return the canonical path relative to base.
1158
Like relpath, but on case-insensitive-case-preserving file-systems, this
1159
will return the relpath as stored on the file-system rather than in the
1160
case specified in the input string, for all existing portions of the path.
1162
This will cause O(N) behaviour if called for every path in a tree; if you
1163
have a number of paths to convert, you should use canonical_relpaths().
1165
# TODO: it should be possible to optimize this for Windows by using the
1166
# win32 API FindFiles function to look for the specified name - but using
1167
# os.listdir() still gives us the correct, platform agnostic semantics in
1170
rel = relpath(base, path)
1171
# '.' will have been turned into ''
1175
abs_base = abspath(base)
1177
_listdir = os.listdir
1179
# use an explicit iterator so we can easily consume the rest on early exit.
1180
bit_iter = iter(rel.split('/'))
1181
for bit in bit_iter:
1184
next_entries = _listdir(current)
1185
except OSError: # enoent, eperm, etc
1186
# We can't find this in the filesystem, so just append the
1188
current = pathjoin(current, bit, *list(bit_iter))
1190
for look in next_entries:
1191
if lbit == look.lower():
1192
current = pathjoin(current, look)
1195
# got to the end, nothing matched, so we just return the
1196
# non-existing bits as they were specified (the filename may be
1197
# the target of a move, for example).
1198
current = pathjoin(current, bit, *list(bit_iter))
1200
return current[len(abs_base):].lstrip('/')
1202
# XXX - TODO - we need better detection/integration of case-insensitive
1203
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1204
# filesystems), for example, so could probably benefit from the same basic
1205
# support there. For now though, only Windows and OSX get that support, and
1206
# they get it for *all* file-systems!
1207
if sys.platform in ('win32', 'darwin'):
1208
canonical_relpath = _cicp_canonical_relpath
1210
canonical_relpath = relpath
1212
def canonical_relpaths(base, paths):
1213
"""Create an iterable to canonicalize a sequence of relative paths.
1215
The intent is for this implementation to use a cache, vastly speeding
1216
up multiple transformations in the same directory.
1218
# but for now, we haven't optimized...
1219
return [canonical_relpath(base, p) for p in paths]
1221
def safe_unicode(unicode_or_utf8_string):
1222
"""Coerce unicode_or_utf8_string into unicode.
1224
If it is unicode, it is returned.
1225
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1226
wrapped in a BzrBadParameterNotUnicode exception.
1228
if isinstance(unicode_or_utf8_string, unicode):
1229
return unicode_or_utf8_string
1231
return unicode_or_utf8_string.decode('utf8')
1232
except UnicodeDecodeError:
1233
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1236
def safe_utf8(unicode_or_utf8_string):
1237
"""Coerce unicode_or_utf8_string to a utf8 string.
1239
If it is a str, it is returned.
1240
If it is Unicode, it is encoded into a utf-8 string.
1242
if isinstance(unicode_or_utf8_string, str):
1243
# TODO: jam 20070209 This is overkill, and probably has an impact on
1244
# performance if we are dealing with lots of apis that want a
1247
# Make sure it is a valid utf-8 string
1248
unicode_or_utf8_string.decode('utf-8')
1249
except UnicodeDecodeError:
1250
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1251
return unicode_or_utf8_string
1252
return unicode_or_utf8_string.encode('utf-8')
1255
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1256
' Revision id generators should be creating utf8'
1260
def safe_revision_id(unicode_or_utf8_string, warn=True):
1261
"""Revision ids should now be utf8, but at one point they were unicode.
1263
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1265
:param warn: Functions that are sanitizing user data can set warn=False
1266
:return: None or a utf8 revision id.
1268
if (unicode_or_utf8_string is None
1269
or unicode_or_utf8_string.__class__ == str):
1270
return unicode_or_utf8_string
1272
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1274
return cache_utf8.encode(unicode_or_utf8_string)
1277
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1278
' generators should be creating utf8 file ids.')
1281
def safe_file_id(unicode_or_utf8_string, warn=True):
1282
"""File ids should now be utf8, but at one point they were unicode.
1284
This is the same as safe_utf8, except it uses the cached encode functions
1285
to save a little bit of performance.
1287
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1289
:param warn: Functions that are sanitizing user data can set warn=False
1290
:return: None or a utf8 file id.
1292
if (unicode_or_utf8_string is None
1293
or unicode_or_utf8_string.__class__ == str):
1294
return unicode_or_utf8_string
1296
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1298
return cache_utf8.encode(unicode_or_utf8_string)
1301
_platform_normalizes_filenames = False
1302
if sys.platform == 'darwin':
1303
_platform_normalizes_filenames = True
1306
def normalizes_filenames():
1307
"""Return True if this platform normalizes unicode filenames.
1309
Mac OSX does, Windows/Linux do not.
1311
return _platform_normalizes_filenames
1314
def _accessible_normalized_filename(path):
1315
"""Get the unicode normalized path, and if you can access the file.
1317
On platforms where the system normalizes filenames (Mac OSX),
1318
you can access a file by any path which will normalize correctly.
1319
On platforms where the system does not normalize filenames
1320
(Windows, Linux), you have to access a file by its exact path.
1322
Internally, bzr only supports NFC normalization, since that is
1323
the standard for XML documents.
1325
So return the normalized path, and a flag indicating if the file
1326
can be accessed by that path.
1329
return unicodedata.normalize('NFC', unicode(path)), True
1332
def _inaccessible_normalized_filename(path):
1333
__doc__ = _accessible_normalized_filename.__doc__
1335
normalized = unicodedata.normalize('NFC', unicode(path))
1336
return normalized, normalized == path
1339
if _platform_normalizes_filenames:
1340
normalized_filename = _accessible_normalized_filename
1342
normalized_filename = _inaccessible_normalized_filename
1345
default_terminal_width = 80
1346
"""The default terminal width for ttys.
1348
This is defined so that higher levels can share a common fallback value when
1349
terminal_width() returns None.
1353
def terminal_width():
1354
"""Return terminal width.
1356
None is returned if the width can't established precisely.
1359
- if BZR_COLUMNS is set, returns its value
1360
- if there is no controlling terminal, returns None
1361
- if COLUMNS is set, returns its value,
1363
From there, we need to query the OS to get the size of the controlling
1367
- get termios.TIOCGWINSZ
1368
- if an error occurs or a negative value is obtained, returns None
1372
- win32utils.get_console_size() decides,
1373
- returns None on error (provided default value)
1376
# If BZR_COLUMNS is set, take it, user is always right
1378
return int(os.environ['BZR_COLUMNS'])
1379
except (KeyError, ValueError):
1382
isatty = getattr(sys.stdout, 'isatty', None)
1383
if isatty is None or not isatty():
1384
# Don't guess, setting BZR_COLUMNS is the recommended way to override.
1387
# If COLUMNS is set, take it, the terminal knows better (even inside a
1388
# given terminal, the application can decide to set COLUMNS to a lower
1389
# value (splitted screen) or a bigger value (scroll bars))
1391
return int(os.environ['COLUMNS'])
1392
except (KeyError, ValueError):
1395
width, height = _terminal_size(None, None)
1397
# Consider invalid values as meaning no width
1403
def _win32_terminal_size(width, height):
1404
width, height = win32utils.get_console_size(defaultx=width, defaulty=height)
1405
return width, height
1408
def _ioctl_terminal_size(width, height):
1410
import struct, fcntl, termios
1411
s = struct.pack('HHHH', 0, 0, 0, 0)
1412
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
1413
height, width = struct.unpack('HHHH', x)[0:2]
1414
except (IOError, AttributeError):
1416
return width, height
1418
_terminal_size = None
1419
"""Returns the terminal size as (width, height).
1421
:param width: Default value for width.
1422
:param height: Default value for height.
1424
This is defined specifically for each OS and query the size of the controlling
1425
terminal. If any error occurs, the provided default values should be returned.
1427
if sys.platform == 'win32':
1428
_terminal_size = _win32_terminal_size
1430
_terminal_size = _ioctl_terminal_size
1433
def _terminal_size_changed(signum, frame):
1434
"""Set COLUMNS upon receiving a SIGnal for WINdow size CHange."""
1435
width, height = _terminal_size(None, None)
1436
if width is not None:
1437
os.environ['COLUMNS'] = str(width)
1439
if sys.platform == 'win32':
1440
# Martin (gz) mentioned WINDOW_BUFFER_SIZE_RECORD from ReadConsoleInput but
1441
# I've no idea how to plug that in the current design -- vila 20091216
1444
signal.signal(signal.SIGWINCH, _terminal_size_changed)
1447
def supports_executable():
1448
return sys.platform != "win32"
1451
def supports_posix_readonly():
1452
"""Return True if 'readonly' has POSIX semantics, False otherwise.
1454
Notably, a win32 readonly file cannot be deleted, unlike POSIX where the
1455
directory controls creation/deletion, etc.
1457
And under win32, readonly means that the directory itself cannot be
1458
deleted. The contents of a readonly directory can be changed, unlike POSIX
1459
where files in readonly directories cannot be added, deleted or renamed.
1461
return sys.platform != "win32"
1464
def set_or_unset_env(env_variable, value):
1465
"""Modify the environment, setting or removing the env_variable.
1467
:param env_variable: The environment variable in question
1468
:param value: The value to set the environment to. If None, then
1469
the variable will be removed.
1470
:return: The original value of the environment variable.
1472
orig_val = os.environ.get(env_variable)
1474
if orig_val is not None:
1475
del os.environ[env_variable]
1477
if isinstance(value, unicode):
1478
value = value.encode(get_user_encoding())
1479
os.environ[env_variable] = value
1483
_validWin32PathRE = re.compile(r'^([A-Za-z]:[/\\])?[^:<>*"?\|]*$')
1486
def check_legal_path(path):
1487
"""Check whether the supplied path is legal.
1488
This is only required on Windows, so we don't test on other platforms
1491
if sys.platform != "win32":
1493
if _validWin32PathRE.match(path) is None:
1494
raise errors.IllegalPath(path)
1497
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1499
def _is_error_enotdir(e):
1500
"""Check if this exception represents ENOTDIR.
1502
Unfortunately, python is very inconsistent about the exception
1503
here. The cases are:
1504
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1505
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1506
which is the windows error code.
1507
3) Windows, Python2.5 uses errno == EINVAL and
1508
winerror == ERROR_DIRECTORY
1510
:param e: An Exception object (expected to be OSError with an errno
1511
attribute, but we should be able to cope with anything)
1512
:return: True if this represents an ENOTDIR error. False otherwise.
1514
en = getattr(e, 'errno', None)
1515
if (en == errno.ENOTDIR
1516
or (sys.platform == 'win32'
1517
and (en == _WIN32_ERROR_DIRECTORY
1518
or (en == errno.EINVAL
1519
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
1525
def walkdirs(top, prefix=""):
1526
"""Yield data about all the directories in a tree.
1528
This yields all the data about the contents of a directory at a time.
1529
After each directory has been yielded, if the caller has mutated the list
1530
to exclude some directories, they are then not descended into.
1532
The data yielded is of the form:
1533
((directory-relpath, directory-path-from-top),
1534
[(relpath, basename, kind, lstat, path-from-top), ...]),
1535
- directory-relpath is the relative path of the directory being returned
1536
with respect to top. prefix is prepended to this.
1537
- directory-path-from-root is the path including top for this directory.
1538
It is suitable for use with os functions.
1539
- relpath is the relative path within the subtree being walked.
1540
- basename is the basename of the path
1541
- kind is the kind of the file now. If unknown then the file is not
1542
present within the tree - but it may be recorded as versioned. See
1544
- lstat is the stat data *if* the file was statted.
1545
- planned, not implemented:
1546
path_from_tree_root is the path from the root of the tree.
1548
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1549
allows one to walk a subtree but get paths that are relative to a tree
1551
:return: an iterator over the dirs.
1553
#TODO there is a bit of a smell where the results of the directory-
1554
# summary in this, and the path from the root, may not agree
1555
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
1556
# potentially confusing output. We should make this more robust - but
1557
# not at a speed cost. RBC 20060731
1559
_directory = _directory_kind
1560
_listdir = os.listdir
1561
_kind_from_mode = file_kind_from_stat_mode
1562
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
1564
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1565
relroot, _, _, _, top = pending.pop()
1567
relprefix = relroot + u'/'
1570
top_slash = top + u'/'
1573
append = dirblock.append
1575
names = sorted(_listdir(top))
1577
if not _is_error_enotdir(e):
1581
abspath = top_slash + name
1582
statvalue = _lstat(abspath)
1583
kind = _kind_from_mode(statvalue.st_mode)
1584
append((relprefix + name, name, kind, statvalue, abspath))
1585
yield (relroot, top), dirblock
1587
# push the user specified dirs from dirblock
1588
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1591
class DirReader(object):
1592
"""An interface for reading directories."""
1594
def top_prefix_to_starting_dir(self, top, prefix=""):
1595
"""Converts top and prefix to a starting dir entry
1597
:param top: A utf8 path
1598
:param prefix: An optional utf8 path to prefix output relative paths
1600
:return: A tuple starting with prefix, and ending with the native
1603
raise NotImplementedError(self.top_prefix_to_starting_dir)
1605
def read_dir(self, prefix, top):
1606
"""Read a specific dir.
1608
:param prefix: A utf8 prefix to be preprended to the path basenames.
1609
:param top: A natively encoded path to read.
1610
:return: A list of the directories contents. Each item contains:
1611
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1613
raise NotImplementedError(self.read_dir)
1616
_selected_dir_reader = None
1619
def _walkdirs_utf8(top, prefix=""):
1620
"""Yield data about all the directories in a tree.
1622
This yields the same information as walkdirs() only each entry is yielded
1623
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1624
are returned as exact byte-strings.
1626
:return: yields a tuple of (dir_info, [file_info])
1627
dir_info is (utf8_relpath, path-from-top)
1628
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1629
if top is an absolute path, path-from-top is also an absolute path.
1630
path-from-top might be unicode or utf8, but it is the correct path to
1631
pass to os functions to affect the file in question. (such as os.lstat)
1633
global _selected_dir_reader
1634
if _selected_dir_reader is None:
1635
fs_encoding = _fs_enc.upper()
1636
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1637
# Win98 doesn't have unicode apis like FindFirstFileW
1638
# TODO: We possibly could support Win98 by falling back to the
1639
# original FindFirstFile, and using TCHAR instead of WCHAR,
1640
# but that gets a bit tricky, and requires custom compiling
1643
from bzrlib._walkdirs_win32 import Win32ReadDir
1644
_selected_dir_reader = Win32ReadDir()
1647
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1648
# ANSI_X3.4-1968 is a form of ASCII
1650
from bzrlib._readdir_pyx import UTF8DirReader
1651
_selected_dir_reader = UTF8DirReader()
1652
except ImportError, e:
1653
failed_to_load_extension(e)
1656
if _selected_dir_reader is None:
1657
# Fallback to the python version
1658
_selected_dir_reader = UnicodeDirReader()
1660
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1661
# But we don't actually uses 1-3 in pending, so set them to None
1662
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1663
read_dir = _selected_dir_reader.read_dir
1664
_directory = _directory_kind
1666
relroot, _, _, _, top = pending[-1].pop()
1669
dirblock = sorted(read_dir(relroot, top))
1670
yield (relroot, top), dirblock
1671
# push the user specified dirs from dirblock
1672
next = [d for d in reversed(dirblock) if d[2] == _directory]
1674
pending.append(next)
1677
class UnicodeDirReader(DirReader):
1678
"""A dir reader for non-utf8 file systems, which transcodes."""
1680
__slots__ = ['_utf8_encode']
1683
self._utf8_encode = codecs.getencoder('utf8')
1685
def top_prefix_to_starting_dir(self, top, prefix=""):
1686
"""See DirReader.top_prefix_to_starting_dir."""
1687
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1689
def read_dir(self, prefix, top):
1690
"""Read a single directory from a non-utf8 file system.
1692
top, and the abspath element in the output are unicode, all other paths
1693
are utf8. Local disk IO is done via unicode calls to listdir etc.
1695
This is currently the fallback code path when the filesystem encoding is
1696
not UTF-8. It may be better to implement an alternative so that we can
1697
safely handle paths that are not properly decodable in the current
1700
See DirReader.read_dir for details.
1702
_utf8_encode = self._utf8_encode
1704
_listdir = os.listdir
1705
_kind_from_mode = file_kind_from_stat_mode
1708
relprefix = prefix + '/'
1711
top_slash = top + u'/'
1714
append = dirblock.append
1715
for name in sorted(_listdir(top)):
1717
name_utf8 = _utf8_encode(name)[0]
1718
except UnicodeDecodeError:
1719
raise errors.BadFilenameEncoding(
1720
_utf8_encode(relprefix)[0] + name, _fs_enc)
1721
abspath = top_slash + name
1722
statvalue = _lstat(abspath)
1723
kind = _kind_from_mode(statvalue.st_mode)
1724
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
1728
def copy_tree(from_path, to_path, handlers={}):
1729
"""Copy all of the entries in from_path into to_path.
1731
:param from_path: The base directory to copy.
1732
:param to_path: The target directory. If it does not exist, it will
1734
:param handlers: A dictionary of functions, which takes a source and
1735
destinations for files, directories, etc.
1736
It is keyed on the file kind, such as 'directory', 'symlink', or 'file'
1737
'file', 'directory', and 'symlink' should always exist.
1738
If they are missing, they will be replaced with 'os.mkdir()',
1739
'os.readlink() + os.symlink()', and 'shutil.copy2()', respectively.
1741
# Now, just copy the existing cached tree to the new location
1742
# We use a cheap trick here.
1743
# Absolute paths are prefixed with the first parameter
1744
# relative paths are prefixed with the second.
1745
# So we can get both the source and target returned
1746
# without any extra work.
1748
def copy_dir(source, dest):
1751
def copy_link(source, dest):
1752
"""Copy the contents of a symlink"""
1753
link_to = os.readlink(source)
1754
os.symlink(link_to, dest)
1756
real_handlers = {'file':shutil.copy2,
1757
'symlink':copy_link,
1758
'directory':copy_dir,
1760
real_handlers.update(handlers)
1762
if not os.path.exists(to_path):
1763
real_handlers['directory'](from_path, to_path)
1765
for dir_info, entries in walkdirs(from_path, prefix=to_path):
1766
for relpath, name, kind, st, abspath in entries:
1767
real_handlers[kind](abspath, relpath)
1770
def path_prefix_key(path):
1771
"""Generate a prefix-order path key for path.
1773
This can be used to sort paths in the same way that walkdirs does.
1775
return (dirname(path) , path)
1778
def compare_paths_prefix_order(path_a, path_b):
1779
"""Compare path_a and path_b to generate the same order walkdirs uses."""
1780
key_a = path_prefix_key(path_a)
1781
key_b = path_prefix_key(path_b)
1782
return cmp(key_a, key_b)
1785
_cached_user_encoding = None
1788
def get_user_encoding(use_cache=True):
1789
"""Find out what the preferred user encoding is.
1791
This is generally the encoding that is used for command line parameters
1792
and file contents. This may be different from the terminal encoding
1793
or the filesystem encoding.
1795
:param use_cache: Enable cache for detected encoding.
1796
(This parameter is turned on by default,
1797
and required only for selftesting)
1799
:return: A string defining the preferred user encoding
1801
global _cached_user_encoding
1802
if _cached_user_encoding is not None and use_cache:
1803
return _cached_user_encoding
1805
if sys.platform == 'darwin':
1806
# python locale.getpreferredencoding() always return
1807
# 'mac-roman' on darwin. That's a lie.
1808
sys.platform = 'posix'
1810
if os.environ.get('LANG', None) is None:
1811
# If LANG is not set, we end up with 'ascii', which is bad
1812
# ('mac-roman' is more than ascii), so we set a default which
1813
# will give us UTF-8 (which appears to work in all cases on
1814
# OSX). Users are still free to override LANG of course, as
1815
# long as it give us something meaningful. This work-around
1816
# *may* not be needed with python 3k and/or OSX 10.5, but will
1817
# work with them too -- vila 20080908
1818
os.environ['LANG'] = 'en_US.UTF-8'
1821
sys.platform = 'darwin'
1826
user_encoding = locale.getpreferredencoding()
1827
except locale.Error, e:
1828
sys.stderr.write('bzr: warning: %s\n'
1829
' Could not determine what text encoding to use.\n'
1830
' This error usually means your Python interpreter\n'
1831
' doesn\'t support the locale set by $LANG (%s)\n'
1832
" Continuing with ascii encoding.\n"
1833
% (e, os.environ.get('LANG')))
1834
user_encoding = 'ascii'
1836
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1837
# treat that as ASCII, and not support printing unicode characters to the
1840
# For python scripts run under vim, we get '', so also treat that as ASCII
1841
if user_encoding in (None, 'cp0', ''):
1842
user_encoding = 'ascii'
1846
codecs.lookup(user_encoding)
1848
sys.stderr.write('bzr: warning:'
1849
' unknown encoding %s.'
1850
' Continuing with ascii encoding.\n'
1853
user_encoding = 'ascii'
1856
_cached_user_encoding = user_encoding
1858
return user_encoding
1861
def get_host_name():
1862
"""Return the current unicode host name.
1864
This is meant to be used in place of socket.gethostname() because that
1865
behaves inconsistently on different platforms.
1867
if sys.platform == "win32":
1869
return win32utils.get_host_name()
1872
return socket.gethostname().decode(get_user_encoding())
1875
def recv_all(socket, bytes):
1876
"""Receive an exact number of bytes.
1878
Regular Socket.recv() may return less than the requested number of bytes,
1879
dependning on what's in the OS buffer. MSG_WAITALL is not available
1880
on all platforms, but this should work everywhere. This will return
1881
less than the requested amount if the remote end closes.
1883
This isn't optimized and is intended mostly for use in testing.
1886
while len(b) < bytes:
1887
new = until_no_eintr(socket.recv, bytes - len(b))
1894
def send_all(socket, bytes, report_activity=None):
1895
"""Send all bytes on a socket.
1897
Regular socket.sendall() can give socket error 10053 on Windows. This
1898
implementation sends no more than 64k at a time, which avoids this problem.
1900
:param report_activity: Call this as bytes are read, see
1901
Transport._report_activity
1904
for pos in xrange(0, len(bytes), chunk_size):
1905
block = bytes[pos:pos+chunk_size]
1906
if report_activity is not None:
1907
report_activity(len(block), 'write')
1908
until_no_eintr(socket.sendall, block)
1911
def dereference_path(path):
1912
"""Determine the real path to a file.
1914
All parent elements are dereferenced. But the file itself is not
1916
:param path: The original path. May be absolute or relative.
1917
:return: the real path *to* the file
1919
parent, base = os.path.split(path)
1920
# The pathjoin for '.' is a workaround for Python bug #1213894.
1921
# (initial path components aren't dereferenced)
1922
return pathjoin(realpath(pathjoin('.', parent)), base)
1925
def supports_mapi():
1926
"""Return True if we can use MAPI to launch a mail client."""
1927
return sys.platform == "win32"
1930
def resource_string(package, resource_name):
1931
"""Load a resource from a package and return it as a string.
1933
Note: Only packages that start with bzrlib are currently supported.
1935
This is designed to be a lightweight implementation of resource
1936
loading in a way which is API compatible with the same API from
1938
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1939
If and when pkg_resources becomes a standard library, this routine
1942
# Check package name is within bzrlib
1943
if package == "bzrlib":
1944
resource_relpath = resource_name
1945
elif package.startswith("bzrlib."):
1946
package = package[len("bzrlib."):].replace('.', os.sep)
1947
resource_relpath = pathjoin(package, resource_name)
1949
raise errors.BzrError('resource package %s not in bzrlib' % package)
1951
# Map the resource to a file and read its contents
1952
base = dirname(bzrlib.__file__)
1953
if getattr(sys, 'frozen', None): # bzr.exe
1954
base = abspath(pathjoin(base, '..', '..'))
1955
filename = pathjoin(base, resource_relpath)
1956
return open(filename, 'rU').read()
1959
def file_kind_from_stat_mode_thunk(mode):
1960
global file_kind_from_stat_mode
1961
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1963
from bzrlib._readdir_pyx import UTF8DirReader
1964
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1965
except ImportError, e:
1966
# This is one time where we won't warn that an extension failed to
1967
# load. The extension is never available on Windows anyway.
1968
from bzrlib._readdir_py import (
1969
_kind_from_mode as file_kind_from_stat_mode
1971
return file_kind_from_stat_mode(mode)
1972
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1975
def file_kind(f, _lstat=os.lstat):
1977
return file_kind_from_stat_mode(_lstat(f).st_mode)
1979
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1980
raise errors.NoSuchFile(f)
1984
def until_no_eintr(f, *a, **kw):
1985
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1986
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1990
except (IOError, OSError), e:
1991
if e.errno == errno.EINTR:
1995
def re_compile_checked(re_string, flags=0, where=""):
1996
"""Return a compiled re, or raise a sensible error.
1998
This should only be used when compiling user-supplied REs.
2000
:param re_string: Text form of regular expression.
2001
:param flags: eg re.IGNORECASE
2002
:param where: Message explaining to the user the context where
2003
it occurred, eg 'log search filter'.
2005
# from https://bugs.launchpad.net/bzr/+bug/251352
2007
re_obj = re.compile(re_string, flags)
2012
where = ' in ' + where
2013
# despite the name 'error' is a type
2014
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
2015
% (where, re_string, e))
2018
if sys.platform == "win32":
2021
return msvcrt.getch()
2026
fd = sys.stdin.fileno()
2027
settings = termios.tcgetattr(fd)
2030
ch = sys.stdin.read(1)
2032
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
2036
if sys.platform == 'linux2':
2037
def _local_concurrency():
2039
prefix = 'processor'
2040
for line in file('/proc/cpuinfo', 'rb'):
2041
if line.startswith(prefix):
2042
concurrency = int(line[line.find(':')+1:]) + 1
2044
elif sys.platform == 'darwin':
2045
def _local_concurrency():
2046
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
2047
stdout=subprocess.PIPE).communicate()[0]
2048
elif sys.platform[0:7] == 'freebsd':
2049
def _local_concurrency():
2050
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
2051
stdout=subprocess.PIPE).communicate()[0]
2052
elif sys.platform == 'sunos5':
2053
def _local_concurrency():
2054
return subprocess.Popen(['psrinfo', '-p',],
2055
stdout=subprocess.PIPE).communicate()[0]
2056
elif sys.platform == "win32":
2057
def _local_concurrency():
2058
# This appears to return the number of cores.
2059
return os.environ.get('NUMBER_OF_PROCESSORS')
2061
def _local_concurrency():
2066
_cached_local_concurrency = None
2068
def local_concurrency(use_cache=True):
2069
"""Return how many processes can be run concurrently.
2071
Rely on platform specific implementations and default to 1 (one) if
2072
anything goes wrong.
2074
global _cached_local_concurrency
2076
if _cached_local_concurrency is not None and use_cache:
2077
return _cached_local_concurrency
2079
concurrency = os.environ.get('BZR_CONCURRENCY', None)
2080
if concurrency is None:
2082
concurrency = _local_concurrency()
2083
except (OSError, IOError):
2086
concurrency = int(concurrency)
2087
except (TypeError, ValueError):
2090
_cached_concurrency = concurrency
2094
class UnicodeOrBytesToBytesWriter(codecs.StreamWriter):
2095
"""A stream writer that doesn't decode str arguments."""
2097
def __init__(self, encode, stream, errors='strict'):
2098
codecs.StreamWriter.__init__(self, stream, errors)
2099
self.encode = encode
2101
def write(self, object):
2102
if type(object) is str:
2103
self.stream.write(object)
2105
data, _ = self.encode(object, self.errors)
2106
self.stream.write(data)