1
# Copyright (C) 2005, 2006, 2007, 2009 Canonical Ltd
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License as published by
5
# the Free Software Foundation; either version 2 of the License, or
6
# (at your option) any later version.
8
# This program is distributed in the hope that it will be useful,
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
# GNU General Public License for more details.
13
# You should have received a copy of the GNU General Public License
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
from stat import (S_ISREG, S_ISDIR, S_ISLNK, ST_MODE, ST_SIZE,
21
S_ISCHR, S_ISBLK, S_ISFIFO, S_ISSOCK)
26
from bzrlib.lazy_import import lazy_import
27
lazy_import(globals(), """
29
from datetime import datetime
31
from ntpath import (abspath as _nt_abspath,
33
normpath as _nt_normpath,
34
realpath as _nt_realpath,
35
splitdrive as _nt_splitdrive,
44
from tempfile import (
56
# sha and md5 modules are deprecated in python2.6 but hashlib is available as
58
if sys.version_info < (2, 5):
59
import md5 as _mod_md5
61
import sha as _mod_sha
71
from bzrlib import symbol_versioning
74
timer_func = time.time
75
if sys.platform == 'win32':
76
timer_func = time.clock
78
# On win32, O_BINARY is used to indicate the file should
79
# be opened in binary mode, rather than text mode.
80
# On other platforms, O_BINARY doesn't exist, because
81
# they always open in binary mode, so it is okay to
82
# OR with 0 on those platforms
83
O_BINARY = getattr(os, 'O_BINARY', 0)
86
def get_unicode_argv():
88
user_encoding = get_user_encoding()
89
return [a.decode(user_encoding) for a in sys.argv[1:]]
90
except UnicodeDecodeError:
91
raise errors.BzrError(("Parameter '%r' is unsupported by the current "
95
def make_readonly(filename):
96
"""Make a filename read-only."""
97
mod = os.lstat(filename).st_mode
98
if not stat.S_ISLNK(mod):
100
os.chmod(filename, mod)
103
def make_writable(filename):
104
mod = os.lstat(filename).st_mode
105
if not stat.S_ISLNK(mod):
107
os.chmod(filename, mod)
110
def minimum_path_selection(paths):
111
"""Return the smallset subset of paths which are outside paths.
113
:param paths: A container (and hence not None) of paths.
114
:return: A set of paths sufficient to include everything in paths via
115
is_inside, drawn from the paths parameter.
121
return path.split('/')
122
sorted_paths = sorted(list(paths), key=sort_key)
124
search_paths = [sorted_paths[0]]
125
for path in sorted_paths[1:]:
126
if not is_inside(search_paths[-1], path):
127
# This path is unique, add it
128
search_paths.append(path)
130
return set(search_paths)
137
"""Return a quoted filename filename
139
This previously used backslash quoting, but that works poorly on
141
# TODO: I'm not really sure this is the best format either.x
143
if _QUOTE_RE is None:
144
_QUOTE_RE = re.compile(r'([^a-zA-Z0-9.,:/\\_~-])')
146
if _QUOTE_RE.search(f):
152
_directory_kind = 'directory'
155
"""Return the current umask"""
156
# Assume that people aren't messing with the umask while running
157
# XXX: This is not thread safe, but there is no way to get the
158
# umask without setting it
166
_directory_kind: "/",
168
'tree-reference': '+',
172
def kind_marker(kind):
174
return _kind_marker_map[kind]
176
raise errors.BzrError('invalid file kind %r' % kind)
179
lexists = getattr(os.path, 'lexists', None)
183
stat = getattr(os, 'lstat', os.stat)
187
if e.errno == errno.ENOENT:
190
raise errors.BzrError("lstat/stat of (%r): %r" % (f, e))
193
def fancy_rename(old, new, rename_func, unlink_func):
194
"""A fancy rename, when you don't have atomic rename.
196
:param old: The old path, to rename from
197
:param new: The new path, to rename to
198
:param rename_func: The potentially non-atomic rename function
199
:param unlink_func: A way to delete the target file if the full rename succeeds
202
# sftp rename doesn't allow overwriting, so play tricks:
203
base = os.path.basename(new)
204
dirname = os.path.dirname(new)
205
tmp_name = u'tmp.%s.%.9f.%d.%s' % (base, time.time(), os.getpid(), rand_chars(10))
206
tmp_name = pathjoin(dirname, tmp_name)
208
# Rename the file out of the way, but keep track if it didn't exist
209
# We don't want to grab just any exception
210
# something like EACCES should prevent us from continuing
211
# The downside is that the rename_func has to throw an exception
212
# with an errno = ENOENT, or NoSuchFile
215
rename_func(new, tmp_name)
216
except (errors.NoSuchFile,), e:
219
# RBC 20060103 abstraction leakage: the paramiko SFTP clients rename
220
# function raises an IOError with errno is None when a rename fails.
221
# This then gets caught here.
222
if e.errno not in (None, errno.ENOENT, errno.ENOTDIR):
225
if (getattr(e, 'errno', None) is None
226
or e.errno not in (errno.ENOENT, errno.ENOTDIR)):
235
# This may throw an exception, in which case success will
237
rename_func(old, new)
239
except (IOError, OSError), e:
240
# source and target may be aliases of each other (e.g. on a
241
# case-insensitive filesystem), so we may have accidentally renamed
242
# source by when we tried to rename target
243
failure_exc = sys.exc_info()
244
if (file_existed and e.errno in (None, errno.ENOENT)
245
and old.lower() == new.lower()):
246
# source and target are the same file on a case-insensitive
247
# filesystem, so we don't generate an exception
251
# If the file used to exist, rename it back into place
252
# otherwise just delete it from the tmp location
254
unlink_func(tmp_name)
256
rename_func(tmp_name, new)
257
if failure_exc is not None:
258
raise failure_exc[0], failure_exc[1], failure_exc[2]
261
# In Python 2.4.2 and older, os.path.abspath and os.path.realpath
262
# choke on a Unicode string containing a relative path if
263
# os.getcwd() returns a non-sys.getdefaultencoding()-encoded
265
_fs_enc = sys.getfilesystemencoding() or 'utf-8'
266
def _posix_abspath(path):
267
# jam 20060426 rather than encoding to fsencoding
268
# copy posixpath.abspath, but use os.getcwdu instead
269
if not posixpath.isabs(path):
270
path = posixpath.join(getcwd(), path)
271
return posixpath.normpath(path)
274
def _posix_realpath(path):
275
return posixpath.realpath(path.encode(_fs_enc)).decode(_fs_enc)
278
def _win32_fixdrive(path):
279
"""Force drive letters to be consistent.
281
win32 is inconsistent whether it returns lower or upper case
282
and even if it was consistent the user might type the other
283
so we force it to uppercase
284
running python.exe under cmd.exe return capital C:\\
285
running win32 python inside a cygwin shell returns lowercase c:\\
287
drive, path = _nt_splitdrive(path)
288
return drive.upper() + path
291
def _win32_abspath(path):
292
# Real _nt_abspath doesn't have a problem with a unicode cwd
293
return _win32_fixdrive(_nt_abspath(unicode(path)).replace('\\', '/'))
296
def _win98_abspath(path):
297
"""Return the absolute version of a path.
298
Windows 98 safe implementation (python reimplementation
299
of Win32 API function GetFullPathNameW)
304
# \\HOST\path => //HOST/path
305
# //HOST/path => //HOST/path
306
# path => C:/cwd/path
309
# check for absolute path
310
drive = _nt_splitdrive(path)[0]
311
if drive == '' and path[:2] not in('//','\\\\'):
313
# we cannot simply os.path.join cwd and path
314
# because os.path.join('C:','/path') produce '/path'
315
# and this is incorrect
316
if path[:1] in ('/','\\'):
317
cwd = _nt_splitdrive(cwd)[0]
319
path = cwd + '\\' + path
320
return _win32_fixdrive(_nt_normpath(path).replace('\\', '/'))
323
def _win32_realpath(path):
324
# Real _nt_realpath doesn't have a problem with a unicode cwd
325
return _win32_fixdrive(_nt_realpath(unicode(path)).replace('\\', '/'))
328
def _win32_pathjoin(*args):
329
return _nt_join(*args).replace('\\', '/')
332
def _win32_normpath(path):
333
return _win32_fixdrive(_nt_normpath(unicode(path)).replace('\\', '/'))
337
return _win32_fixdrive(os.getcwdu().replace('\\', '/'))
340
def _win32_mkdtemp(*args, **kwargs):
341
return _win32_fixdrive(tempfile.mkdtemp(*args, **kwargs).replace('\\', '/'))
344
def _win32_rename(old, new):
345
"""We expect to be able to atomically replace 'new' with old.
347
On win32, if new exists, it must be moved out of the way first,
351
fancy_rename(old, new, rename_func=os.rename, unlink_func=os.unlink)
353
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY, errno.EINVAL):
354
# If we try to rename a non-existant file onto cwd, we get
355
# EPERM or EACCES instead of ENOENT, this will raise ENOENT
356
# if the old path doesn't exist, sometimes we get EACCES
357
# On Linux, we seem to get EBUSY, on Mac we get EINVAL
363
return unicodedata.normalize('NFC', os.getcwdu())
366
# Default is to just use the python builtins, but these can be rebound on
367
# particular platforms.
368
abspath = _posix_abspath
369
realpath = _posix_realpath
370
pathjoin = os.path.join
371
normpath = os.path.normpath
374
dirname = os.path.dirname
375
basename = os.path.basename
376
split = os.path.split
377
splitext = os.path.splitext
378
# These were already imported into local scope
379
# mkdtemp = tempfile.mkdtemp
380
# rmtree = shutil.rmtree
382
MIN_ABS_PATHLENGTH = 1
385
if sys.platform == 'win32':
386
if win32utils.winver == 'Windows 98':
387
abspath = _win98_abspath
389
abspath = _win32_abspath
390
realpath = _win32_realpath
391
pathjoin = _win32_pathjoin
392
normpath = _win32_normpath
393
getcwd = _win32_getcwd
394
mkdtemp = _win32_mkdtemp
395
rename = _win32_rename
397
MIN_ABS_PATHLENGTH = 3
399
def _win32_delete_readonly(function, path, excinfo):
400
"""Error handler for shutil.rmtree function [for win32]
401
Helps to remove files and dirs marked as read-only.
403
exception = excinfo[1]
404
if function in (os.remove, os.rmdir) \
405
and isinstance(exception, OSError) \
406
and exception.errno == errno.EACCES:
412
def rmtree(path, ignore_errors=False, onerror=_win32_delete_readonly):
413
"""Replacer for shutil.rmtree: could remove readonly dirs/files"""
414
return shutil.rmtree(path, ignore_errors, onerror)
416
f = win32utils.get_unicode_argv # special function or None
420
elif sys.platform == 'darwin':
424
def get_terminal_encoding():
425
"""Find the best encoding for printing to the screen.
427
This attempts to check both sys.stdout and sys.stdin to see
428
what encoding they are in, and if that fails it falls back to
429
osutils.get_user_encoding().
430
The problem is that on Windows, locale.getpreferredencoding()
431
is not the same encoding as that used by the console:
432
http://mail.python.org/pipermail/python-list/2003-May/162357.html
434
On my standard US Windows XP, the preferred encoding is
435
cp1252, but the console is cp437
437
from bzrlib.trace import mutter
438
output_encoding = getattr(sys.stdout, 'encoding', None)
439
if not output_encoding:
440
input_encoding = getattr(sys.stdin, 'encoding', None)
441
if not input_encoding:
442
output_encoding = get_user_encoding()
443
mutter('encoding stdout as osutils.get_user_encoding() %r',
446
output_encoding = input_encoding
447
mutter('encoding stdout as sys.stdin encoding %r', output_encoding)
449
mutter('encoding stdout as sys.stdout encoding %r', output_encoding)
450
if output_encoding == 'cp0':
451
# invalid encoding (cp0 means 'no codepage' on Windows)
452
output_encoding = get_user_encoding()
453
mutter('cp0 is invalid encoding.'
454
' encoding stdout as osutils.get_user_encoding() %r',
458
codecs.lookup(output_encoding)
460
sys.stderr.write('bzr: warning:'
461
' unknown terminal encoding %s.\n'
462
' Using encoding %s instead.\n'
463
% (output_encoding, get_user_encoding())
465
output_encoding = get_user_encoding()
467
return output_encoding
470
def normalizepath(f):
471
if getattr(os.path, 'realpath', None) is not None:
475
[p,e] = os.path.split(f)
476
if e == "" or e == "." or e == "..":
479
return pathjoin(F(p), e)
483
"""True if f is an accessible directory."""
485
return S_ISDIR(os.lstat(f)[ST_MODE])
491
"""True if f is a regular file."""
493
return S_ISREG(os.lstat(f)[ST_MODE])
498
"""True if f is a symlink."""
500
return S_ISLNK(os.lstat(f)[ST_MODE])
504
def is_inside(dir, fname):
505
"""True if fname is inside dir.
507
The parameters should typically be passed to osutils.normpath first, so
508
that . and .. and repeated slashes are eliminated, and the separators
509
are canonical for the platform.
511
The empty string as a dir name is taken as top-of-tree and matches
514
# XXX: Most callers of this can actually do something smarter by
515
# looking at the inventory
525
return fname.startswith(dir)
528
def is_inside_any(dir_list, fname):
529
"""True if fname is inside any of given dirs."""
530
for dirname in dir_list:
531
if is_inside(dirname, fname):
536
def is_inside_or_parent_of_any(dir_list, fname):
537
"""True if fname is a child or a parent of any of the given files."""
538
for dirname in dir_list:
539
if is_inside(dirname, fname) or is_inside(fname, dirname):
544
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
545
report_activity=None, direction='read'):
546
"""Copy contents of one file to another.
548
The read_length can either be -1 to read to end-of-file (EOF) or
549
it can specify the maximum number of bytes to read.
551
The buff_size represents the maximum size for each read operation
552
performed on from_file.
554
:param report_activity: Call this as bytes are read, see
555
Transport._report_activity
556
:param direction: Will be passed to report_activity
558
:return: The number of bytes copied.
562
# read specified number of bytes
564
while read_length > 0:
565
num_bytes_to_read = min(read_length, buff_size)
567
block = from_file.read(num_bytes_to_read)
571
if report_activity is not None:
572
report_activity(len(block), direction)
575
actual_bytes_read = len(block)
576
read_length -= actual_bytes_read
577
length += actual_bytes_read
581
block = from_file.read(buff_size)
585
if report_activity is not None:
586
report_activity(len(block), direction)
592
def pump_string_file(bytes, file_handle, segment_size=None):
593
"""Write bytes to file_handle in many smaller writes.
595
:param bytes: The string to write.
596
:param file_handle: The file to write to.
598
# Write data in chunks rather than all at once, because very large
599
# writes fail on some platforms (e.g. Windows with SMB mounted
602
segment_size = 5242880 # 5MB
603
segments = range(len(bytes) / segment_size + 1)
604
write = file_handle.write
605
for segment_index in segments:
606
segment = buffer(bytes, segment_index * segment_size, segment_size)
610
def file_iterator(input_file, readsize=32768):
612
b = input_file.read(readsize)
619
"""Calculate the hexdigest of an open file.
621
The file cursor should be already at the start.
633
def size_sha_file(f):
634
"""Calculate the size and hexdigest of an open file.
636
The file cursor should be already at the start and
637
the caller is responsible for closing the file afterwards.
648
return size, s.hexdigest()
651
def sha_file_by_name(fname):
652
"""Calculate the SHA1 of a file by reading the full text"""
654
f = os.open(fname, os.O_RDONLY | O_BINARY)
657
b = os.read(f, 1<<16)
665
def sha_strings(strings, _factory=sha):
666
"""Return the sha-1 of concatenation of strings"""
668
map(s.update, strings)
672
def sha_string(f, _factory=sha):
673
return _factory(f).hexdigest()
676
def fingerprint_file(f):
678
return {'size': len(b),
679
'sha1': sha(b).hexdigest()}
682
def compare_files(a, b):
683
"""Returns true if equal in contents"""
694
def local_time_offset(t=None):
695
"""Return offset of local zone from GMT, either at present or at time t."""
698
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
699
return offset.days * 86400 + offset.seconds
701
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
702
_default_format_by_weekday_num = [wd + " %Y-%m-%d %H:%M:%S" for wd in weekdays]
705
def format_date(t, offset=0, timezone='original', date_fmt=None,
707
"""Return a formatted date string.
709
:param t: Seconds since the epoch.
710
:param offset: Timezone offset in seconds east of utc.
711
:param timezone: How to display the time: 'utc', 'original' for the
712
timezone specified by offset, or 'local' for the process's current
714
:param date_fmt: strftime format.
715
:param show_offset: Whether to append the timezone.
717
(date_fmt, tt, offset_str) = \
718
_format_date(t, offset, timezone, date_fmt, show_offset)
719
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
720
date_str = time.strftime(date_fmt, tt)
721
return date_str + offset_str
724
# Cache of formatted offset strings
728
def format_date_with_offset_in_original_timezone(t, offset=0,
729
_cache=_offset_cache):
730
"""Return a formatted date string in the original timezone.
732
This routine may be faster then format_date.
734
:param t: Seconds since the epoch.
735
:param offset: Timezone offset in seconds east of utc.
739
tt = time.gmtime(t + offset)
740
date_fmt = _default_format_by_weekday_num[tt[6]]
741
date_str = time.strftime(date_fmt, tt)
742
offset_str = _cache.get(offset, None)
743
if offset_str is None:
744
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
745
_cache[offset] = offset_str
746
return date_str + offset_str
749
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
751
"""Return an unicode date string formatted according to the current locale.
753
:param t: Seconds since the epoch.
754
:param offset: Timezone offset in seconds east of utc.
755
:param timezone: How to display the time: 'utc', 'original' for the
756
timezone specified by offset, or 'local' for the process's current
758
:param date_fmt: strftime format.
759
:param show_offset: Whether to append the timezone.
761
(date_fmt, tt, offset_str) = \
762
_format_date(t, offset, timezone, date_fmt, show_offset)
763
date_str = time.strftime(date_fmt, tt)
764
if not isinstance(date_str, unicode):
765
date_str = date_str.decode(get_user_encoding(), 'replace')
766
return date_str + offset_str
769
def _format_date(t, offset, timezone, date_fmt, show_offset):
770
if timezone == 'utc':
773
elif timezone == 'original':
776
tt = time.gmtime(t + offset)
777
elif timezone == 'local':
778
tt = time.localtime(t)
779
offset = local_time_offset(t)
781
raise errors.UnsupportedTimezoneFormat(timezone)
783
date_fmt = "%a %Y-%m-%d %H:%M:%S"
785
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
788
return (date_fmt, tt, offset_str)
791
def compact_date(when):
792
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
795
def format_delta(delta):
796
"""Get a nice looking string for a time delta.
798
:param delta: The time difference in seconds, can be positive or negative.
799
positive indicates time in the past, negative indicates time in the
800
future. (usually time.time() - stored_time)
801
:return: String formatted to show approximate resolution
807
direction = 'in the future'
811
if seconds < 90: # print seconds up to 90 seconds
813
return '%d second %s' % (seconds, direction,)
815
return '%d seconds %s' % (seconds, direction)
817
minutes = int(seconds / 60)
818
seconds -= 60 * minutes
823
if minutes < 90: # print minutes, seconds up to 90 minutes
825
return '%d minute, %d second%s %s' % (
826
minutes, seconds, plural_seconds, direction)
828
return '%d minutes, %d second%s %s' % (
829
minutes, seconds, plural_seconds, direction)
831
hours = int(minutes / 60)
832
minutes -= 60 * hours
839
return '%d hour, %d minute%s %s' % (hours, minutes,
840
plural_minutes, direction)
841
return '%d hours, %d minute%s %s' % (hours, minutes,
842
plural_minutes, direction)
845
"""Return size of given open file."""
846
return os.fstat(f.fileno())[ST_SIZE]
849
# Define rand_bytes based on platform.
851
# Python 2.4 and later have os.urandom,
852
# but it doesn't work on some arches
854
rand_bytes = os.urandom
855
except (NotImplementedError, AttributeError):
856
# If python doesn't have os.urandom, or it doesn't work,
857
# then try to first pull random data from /dev/urandom
859
rand_bytes = file('/dev/urandom', 'rb').read
860
# Otherwise, use this hack as a last resort
861
except (IOError, OSError):
862
# not well seeded, but better than nothing
867
s += chr(random.randint(0, 255))
872
ALNUM = '0123456789abcdefghijklmnopqrstuvwxyz'
874
"""Return a random string of num alphanumeric characters
876
The result only contains lowercase chars because it may be used on
877
case-insensitive filesystems.
880
for raw_byte in rand_bytes(num):
881
s += ALNUM[ord(raw_byte) % 36]
885
## TODO: We could later have path objects that remember their list
886
## decomposition (might be too tricksy though.)
889
"""Turn string into list of parts."""
890
# split on either delimiter because people might use either on
892
ps = re.split(r'[\\/]', p)
897
raise errors.BzrError("sorry, %r not allowed in path" % f)
898
elif (f == '.') or (f == ''):
907
if (f == '..') or (f is None) or (f == ''):
908
raise errors.BzrError("sorry, %r not allowed in path" % f)
912
def parent_directories(filename):
913
"""Return the list of parent directories, deepest first.
915
For example, parent_directories("a/b/c") -> ["a/b", "a"].
918
parts = splitpath(dirname(filename))
920
parents.append(joinpath(parts))
925
_extension_load_failures = []
928
def failed_to_load_extension(exception):
929
"""Handle failing to load a binary extension.
931
This should be called from the ImportError block guarding the attempt to
932
import the native extension. If this function returns, the pure-Python
933
implementation should be loaded instead::
936
>>> import bzrlib._fictional_extension_pyx
937
>>> except ImportError, e:
938
>>> bzrlib.osutils.failed_to_load_extension(e)
939
>>> import bzrlib._fictional_extension_py
941
# NB: This docstring is just an example, not a doctest, because doctest
942
# currently can't cope with the use of lazy imports in this namespace --
945
# This currently doesn't report the failure at the time it occurs, because
946
# they tend to happen very early in startup when we can't check config
947
# files etc, and also we want to report all failures but not spam the user
949
from bzrlib import trace
950
exception_str = str(exception)
951
if exception_str not in _extension_load_failures:
952
trace.mutter("failed to load compiled extension: %s" % exception_str)
953
_extension_load_failures.append(exception_str)
956
def report_extension_load_failures():
957
if not _extension_load_failures:
959
from bzrlib.config import GlobalConfig
960
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
962
# the warnings framework should by default show this only once
963
from bzrlib.trace import warning
965
"bzr: warning: some compiled extensions could not be loaded; "
966
"see <https://answers.launchpad.net/bzr/+faq/703>")
967
# we no longer show the specific missing extensions here, because it makes
968
# the message too long and scary - see
969
# https://bugs.launchpad.net/bzr/+bug/430529
973
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
974
except ImportError, e:
975
failed_to_load_extension(e)
976
from bzrlib._chunks_to_lines_py import chunks_to_lines
980
"""Split s into lines, but without removing the newline characters."""
981
# Trivially convert a fulltext into a 'chunked' representation, and let
982
# chunks_to_lines do the heavy lifting.
983
if isinstance(s, str):
984
# chunks_to_lines only supports 8-bit strings
985
return chunks_to_lines([s])
987
return _split_lines(s)
991
"""Split s into lines, but without removing the newline characters.
993
This supports Unicode or plain string objects.
995
lines = s.split('\n')
996
result = [line + '\n' for line in lines[:-1]]
998
result.append(lines[-1])
1002
def hardlinks_good():
1003
return sys.platform not in ('win32', 'cygwin', 'darwin')
1006
def link_or_copy(src, dest):
1007
"""Hardlink a file, or copy it if it can't be hardlinked."""
1008
if not hardlinks_good():
1009
shutil.copyfile(src, dest)
1013
except (OSError, IOError), e:
1014
if e.errno != errno.EXDEV:
1016
shutil.copyfile(src, dest)
1019
def delete_any(path):
1020
"""Delete a file, symlink or directory.
1022
Will delete even if readonly.
1025
_delete_file_or_dir(path)
1026
except (OSError, IOError), e:
1027
if e.errno in (errno.EPERM, errno.EACCES):
1028
# make writable and try again
1031
except (OSError, IOError):
1033
_delete_file_or_dir(path)
1038
def _delete_file_or_dir(path):
1039
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1040
# Forgiveness than Permission (EAFP) because:
1041
# - root can damage a solaris file system by using unlink,
1042
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1043
# EACCES, OSX: EPERM) when invoked on a directory.
1044
if isdir(path): # Takes care of symlinks
1051
if getattr(os, 'symlink', None) is not None:
1057
def has_hardlinks():
1058
if getattr(os, 'link', None) is not None:
1064
def host_os_dereferences_symlinks():
1065
return (has_symlinks()
1066
and sys.platform not in ('cygwin', 'win32'))
1069
def readlink(abspath):
1070
"""Return a string representing the path to which the symbolic link points.
1072
:param abspath: The link absolute unicode path.
1074
This his guaranteed to return the symbolic link in unicode in all python
1077
link = abspath.encode(_fs_enc)
1078
target = os.readlink(link)
1079
target = target.decode(_fs_enc)
1083
def contains_whitespace(s):
1084
"""True if there are any whitespace characters in s."""
1085
# string.whitespace can include '\xa0' in certain locales, because it is
1086
# considered "non-breaking-space" as part of ISO-8859-1. But it
1087
# 1) Isn't a breaking whitespace
1088
# 2) Isn't one of ' \t\r\n' which are characters we sometimes use as
1090
# 3) '\xa0' isn't unicode safe since it is >128.
1092
# This should *not* be a unicode set of characters in case the source
1093
# string is not a Unicode string. We can auto-up-cast the characters since
1094
# they are ascii, but we don't want to auto-up-cast the string in case it
1096
for ch in ' \t\n\r\v\f':
1103
def contains_linebreaks(s):
1104
"""True if there is any vertical whitespace in s."""
1112
def relpath(base, path):
1113
"""Return path relative to base, or raise exception.
1115
The path may be either an absolute path or a path relative to the
1116
current working directory.
1118
os.path.commonprefix (python2.4) has a bad bug that it works just
1119
on string prefixes, assuming that '/u' is a prefix of '/u2'. This
1120
avoids that problem.
1123
if len(base) < MIN_ABS_PATHLENGTH:
1124
# must have space for e.g. a drive letter
1125
raise ValueError('%r is too short to calculate a relative path'
1133
if len(head) <= len(base) and head != base:
1134
raise errors.PathNotChild(rp, base)
1137
head, tail = split(head)
1142
return pathjoin(*reversed(s))
1147
def _cicp_canonical_relpath(base, path):
1148
"""Return the canonical path relative to base.
1150
Like relpath, but on case-insensitive-case-preserving file-systems, this
1151
will return the relpath as stored on the file-system rather than in the
1152
case specified in the input string, for all existing portions of the path.
1154
This will cause O(N) behaviour if called for every path in a tree; if you
1155
have a number of paths to convert, you should use canonical_relpaths().
1157
# TODO: it should be possible to optimize this for Windows by using the
1158
# win32 API FindFiles function to look for the specified name - but using
1159
# os.listdir() still gives us the correct, platform agnostic semantics in
1162
rel = relpath(base, path)
1163
# '.' will have been turned into ''
1167
abs_base = abspath(base)
1169
_listdir = os.listdir
1171
# use an explicit iterator so we can easily consume the rest on early exit.
1172
bit_iter = iter(rel.split('/'))
1173
for bit in bit_iter:
1176
next_entries = _listdir(current)
1177
except OSError: # enoent, eperm, etc
1178
# We can't find this in the filesystem, so just append the
1180
current = pathjoin(current, bit, *list(bit_iter))
1182
for look in next_entries:
1183
if lbit == look.lower():
1184
current = pathjoin(current, look)
1187
# got to the end, nothing matched, so we just return the
1188
# non-existing bits as they were specified (the filename may be
1189
# the target of a move, for example).
1190
current = pathjoin(current, bit, *list(bit_iter))
1192
return current[len(abs_base):].lstrip('/')
1194
# XXX - TODO - we need better detection/integration of case-insensitive
1195
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1196
# filesystems), for example, so could probably benefit from the same basic
1197
# support there. For now though, only Windows and OSX get that support, and
1198
# they get it for *all* file-systems!
1199
if sys.platform in ('win32', 'darwin'):
1200
canonical_relpath = _cicp_canonical_relpath
1202
canonical_relpath = relpath
1204
def canonical_relpaths(base, paths):
1205
"""Create an iterable to canonicalize a sequence of relative paths.
1207
The intent is for this implementation to use a cache, vastly speeding
1208
up multiple transformations in the same directory.
1210
# but for now, we haven't optimized...
1211
return [canonical_relpath(base, p) for p in paths]
1213
def safe_unicode(unicode_or_utf8_string):
1214
"""Coerce unicode_or_utf8_string into unicode.
1216
If it is unicode, it is returned.
1217
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1218
wrapped in a BzrBadParameterNotUnicode exception.
1220
if isinstance(unicode_or_utf8_string, unicode):
1221
return unicode_or_utf8_string
1223
return unicode_or_utf8_string.decode('utf8')
1224
except UnicodeDecodeError:
1225
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1228
def safe_utf8(unicode_or_utf8_string):
1229
"""Coerce unicode_or_utf8_string to a utf8 string.
1231
If it is a str, it is returned.
1232
If it is Unicode, it is encoded into a utf-8 string.
1234
if isinstance(unicode_or_utf8_string, str):
1235
# TODO: jam 20070209 This is overkill, and probably has an impact on
1236
# performance if we are dealing with lots of apis that want a
1239
# Make sure it is a valid utf-8 string
1240
unicode_or_utf8_string.decode('utf-8')
1241
except UnicodeDecodeError:
1242
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1243
return unicode_or_utf8_string
1244
return unicode_or_utf8_string.encode('utf-8')
1247
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1248
' Revision id generators should be creating utf8'
1252
def safe_revision_id(unicode_or_utf8_string, warn=True):
1253
"""Revision ids should now be utf8, but at one point they were unicode.
1255
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1257
:param warn: Functions that are sanitizing user data can set warn=False
1258
:return: None or a utf8 revision id.
1260
if (unicode_or_utf8_string is None
1261
or unicode_or_utf8_string.__class__ == str):
1262
return unicode_or_utf8_string
1264
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1266
return cache_utf8.encode(unicode_or_utf8_string)
1269
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1270
' generators should be creating utf8 file ids.')
1273
def safe_file_id(unicode_or_utf8_string, warn=True):
1274
"""File ids should now be utf8, but at one point they were unicode.
1276
This is the same as safe_utf8, except it uses the cached encode functions
1277
to save a little bit of performance.
1279
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1281
:param warn: Functions that are sanitizing user data can set warn=False
1282
:return: None or a utf8 file id.
1284
if (unicode_or_utf8_string is None
1285
or unicode_or_utf8_string.__class__ == str):
1286
return unicode_or_utf8_string
1288
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1290
return cache_utf8.encode(unicode_or_utf8_string)
1293
_platform_normalizes_filenames = False
1294
if sys.platform == 'darwin':
1295
_platform_normalizes_filenames = True
1298
def normalizes_filenames():
1299
"""Return True if this platform normalizes unicode filenames.
1301
Mac OSX does, Windows/Linux do not.
1303
return _platform_normalizes_filenames
1306
def _accessible_normalized_filename(path):
1307
"""Get the unicode normalized path, and if you can access the file.
1309
On platforms where the system normalizes filenames (Mac OSX),
1310
you can access a file by any path which will normalize correctly.
1311
On platforms where the system does not normalize filenames
1312
(Windows, Linux), you have to access a file by its exact path.
1314
Internally, bzr only supports NFC normalization, since that is
1315
the standard for XML documents.
1317
So return the normalized path, and a flag indicating if the file
1318
can be accessed by that path.
1321
return unicodedata.normalize('NFC', unicode(path)), True
1324
def _inaccessible_normalized_filename(path):
1325
__doc__ = _accessible_normalized_filename.__doc__
1327
normalized = unicodedata.normalize('NFC', unicode(path))
1328
return normalized, normalized == path
1331
if _platform_normalizes_filenames:
1332
normalized_filename = _accessible_normalized_filename
1334
normalized_filename = _inaccessible_normalized_filename
1337
default_terminal_width = 80
1338
"""The default terminal width for ttys.
1340
This is defined so that higher levels can share a common fallback value when
1341
terminal_width() returns None.
1345
def terminal_width():
1346
"""Return terminal width.
1348
None is returned if the width can't established precisely.
1351
- if BZR_COLUMNS is set, returns its value
1352
- if there is no controlling terminal, returns None
1353
- if COLUMNS is set, returns its value,
1355
From there, we need to query the OS to get the size of the controlling
1359
- get termios.TIOCGWINSZ
1360
- if an error occurs or a negative value is obtained, returns None
1364
- win32utils.get_console_size() decides,
1365
- returns None on error (provided default value)
1368
# If BZR_COLUMNS is set, take it, user is always right
1370
return int(os.environ['BZR_COLUMNS'])
1371
except (KeyError, ValueError):
1374
isatty = getattr(sys.stdout, 'isatty', None)
1375
if isatty is None or not isatty():
1376
# Don't guess, setting BZR_COLUMNS is the recommended way to override.
1379
# If COLUMNS is set, take it, the terminal knows better (even inside a
1380
# given terminal, the application can decide to set COLUMNS to a lower
1381
# value (splitted screen) or a bigger value (scroll bars))
1383
return int(os.environ['COLUMNS'])
1384
except (KeyError, ValueError):
1387
width, height = _terminal_size(None, None)
1389
# Consider invalid values as meaning no width
1395
def _win32_terminal_size(width, height):
1396
width, height = win32utils.get_console_size(defaultx=width, defaulty=height)
1397
return width, height
1400
def _ioctl_terminal_size(width, height):
1402
import struct, fcntl, termios
1403
s = struct.pack('HHHH', 0, 0, 0, 0)
1404
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
1405
height, width = struct.unpack('HHHH', x)[0:2]
1406
except (IOError, AttributeError):
1408
return width, height
1410
_terminal_size = None
1411
"""Returns the terminal size as (width, height).
1413
:param width: Default value for width.
1414
:param height: Default value for height.
1416
This is defined specifically for each OS and query the size of the controlling
1417
terminal. If any error occurs, the provided default values should be returned.
1419
if sys.platform == 'win32':
1420
_terminal_size = _win32_terminal_size
1422
_terminal_size = _ioctl_terminal_size
1425
def supports_executable():
1426
return sys.platform != "win32"
1429
def supports_posix_readonly():
1430
"""Return True if 'readonly' has POSIX semantics, False otherwise.
1432
Notably, a win32 readonly file cannot be deleted, unlike POSIX where the
1433
directory controls creation/deletion, etc.
1435
And under win32, readonly means that the directory itself cannot be
1436
deleted. The contents of a readonly directory can be changed, unlike POSIX
1437
where files in readonly directories cannot be added, deleted or renamed.
1439
return sys.platform != "win32"
1442
def set_or_unset_env(env_variable, value):
1443
"""Modify the environment, setting or removing the env_variable.
1445
:param env_variable: The environment variable in question
1446
:param value: The value to set the environment to. If None, then
1447
the variable will be removed.
1448
:return: The original value of the environment variable.
1450
orig_val = os.environ.get(env_variable)
1452
if orig_val is not None:
1453
del os.environ[env_variable]
1455
if isinstance(value, unicode):
1456
value = value.encode(get_user_encoding())
1457
os.environ[env_variable] = value
1461
_validWin32PathRE = re.compile(r'^([A-Za-z]:[/\\])?[^:<>*"?\|]*$')
1464
def check_legal_path(path):
1465
"""Check whether the supplied path is legal.
1466
This is only required on Windows, so we don't test on other platforms
1469
if sys.platform != "win32":
1471
if _validWin32PathRE.match(path) is None:
1472
raise errors.IllegalPath(path)
1475
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1477
def _is_error_enotdir(e):
1478
"""Check if this exception represents ENOTDIR.
1480
Unfortunately, python is very inconsistent about the exception
1481
here. The cases are:
1482
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1483
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1484
which is the windows error code.
1485
3) Windows, Python2.5 uses errno == EINVAL and
1486
winerror == ERROR_DIRECTORY
1488
:param e: An Exception object (expected to be OSError with an errno
1489
attribute, but we should be able to cope with anything)
1490
:return: True if this represents an ENOTDIR error. False otherwise.
1492
en = getattr(e, 'errno', None)
1493
if (en == errno.ENOTDIR
1494
or (sys.platform == 'win32'
1495
and (en == _WIN32_ERROR_DIRECTORY
1496
or (en == errno.EINVAL
1497
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
1503
def walkdirs(top, prefix=""):
1504
"""Yield data about all the directories in a tree.
1506
This yields all the data about the contents of a directory at a time.
1507
After each directory has been yielded, if the caller has mutated the list
1508
to exclude some directories, they are then not descended into.
1510
The data yielded is of the form:
1511
((directory-relpath, directory-path-from-top),
1512
[(relpath, basename, kind, lstat, path-from-top), ...]),
1513
- directory-relpath is the relative path of the directory being returned
1514
with respect to top. prefix is prepended to this.
1515
- directory-path-from-root is the path including top for this directory.
1516
It is suitable for use with os functions.
1517
- relpath is the relative path within the subtree being walked.
1518
- basename is the basename of the path
1519
- kind is the kind of the file now. If unknown then the file is not
1520
present within the tree - but it may be recorded as versioned. See
1522
- lstat is the stat data *if* the file was statted.
1523
- planned, not implemented:
1524
path_from_tree_root is the path from the root of the tree.
1526
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1527
allows one to walk a subtree but get paths that are relative to a tree
1529
:return: an iterator over the dirs.
1531
#TODO there is a bit of a smell where the results of the directory-
1532
# summary in this, and the path from the root, may not agree
1533
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
1534
# potentially confusing output. We should make this more robust - but
1535
# not at a speed cost. RBC 20060731
1537
_directory = _directory_kind
1538
_listdir = os.listdir
1539
_kind_from_mode = file_kind_from_stat_mode
1540
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
1542
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1543
relroot, _, _, _, top = pending.pop()
1545
relprefix = relroot + u'/'
1548
top_slash = top + u'/'
1551
append = dirblock.append
1553
names = sorted(_listdir(top))
1555
if not _is_error_enotdir(e):
1559
abspath = top_slash + name
1560
statvalue = _lstat(abspath)
1561
kind = _kind_from_mode(statvalue.st_mode)
1562
append((relprefix + name, name, kind, statvalue, abspath))
1563
yield (relroot, top), dirblock
1565
# push the user specified dirs from dirblock
1566
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1569
class DirReader(object):
1570
"""An interface for reading directories."""
1572
def top_prefix_to_starting_dir(self, top, prefix=""):
1573
"""Converts top and prefix to a starting dir entry
1575
:param top: A utf8 path
1576
:param prefix: An optional utf8 path to prefix output relative paths
1578
:return: A tuple starting with prefix, and ending with the native
1581
raise NotImplementedError(self.top_prefix_to_starting_dir)
1583
def read_dir(self, prefix, top):
1584
"""Read a specific dir.
1586
:param prefix: A utf8 prefix to be preprended to the path basenames.
1587
:param top: A natively encoded path to read.
1588
:return: A list of the directories contents. Each item contains:
1589
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1591
raise NotImplementedError(self.read_dir)
1594
_selected_dir_reader = None
1597
def _walkdirs_utf8(top, prefix=""):
1598
"""Yield data about all the directories in a tree.
1600
This yields the same information as walkdirs() only each entry is yielded
1601
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1602
are returned as exact byte-strings.
1604
:return: yields a tuple of (dir_info, [file_info])
1605
dir_info is (utf8_relpath, path-from-top)
1606
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1607
if top is an absolute path, path-from-top is also an absolute path.
1608
path-from-top might be unicode or utf8, but it is the correct path to
1609
pass to os functions to affect the file in question. (such as os.lstat)
1611
global _selected_dir_reader
1612
if _selected_dir_reader is None:
1613
fs_encoding = _fs_enc.upper()
1614
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1615
# Win98 doesn't have unicode apis like FindFirstFileW
1616
# TODO: We possibly could support Win98 by falling back to the
1617
# original FindFirstFile, and using TCHAR instead of WCHAR,
1618
# but that gets a bit tricky, and requires custom compiling
1621
from bzrlib._walkdirs_win32 import Win32ReadDir
1622
_selected_dir_reader = Win32ReadDir()
1625
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1626
# ANSI_X3.4-1968 is a form of ASCII
1628
from bzrlib._readdir_pyx import UTF8DirReader
1629
_selected_dir_reader = UTF8DirReader()
1630
except ImportError, e:
1631
failed_to_load_extension(e)
1634
if _selected_dir_reader is None:
1635
# Fallback to the python version
1636
_selected_dir_reader = UnicodeDirReader()
1638
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1639
# But we don't actually uses 1-3 in pending, so set them to None
1640
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1641
read_dir = _selected_dir_reader.read_dir
1642
_directory = _directory_kind
1644
relroot, _, _, _, top = pending[-1].pop()
1647
dirblock = sorted(read_dir(relroot, top))
1648
yield (relroot, top), dirblock
1649
# push the user specified dirs from dirblock
1650
next = [d for d in reversed(dirblock) if d[2] == _directory]
1652
pending.append(next)
1655
class UnicodeDirReader(DirReader):
1656
"""A dir reader for non-utf8 file systems, which transcodes."""
1658
__slots__ = ['_utf8_encode']
1661
self._utf8_encode = codecs.getencoder('utf8')
1663
def top_prefix_to_starting_dir(self, top, prefix=""):
1664
"""See DirReader.top_prefix_to_starting_dir."""
1665
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1667
def read_dir(self, prefix, top):
1668
"""Read a single directory from a non-utf8 file system.
1670
top, and the abspath element in the output are unicode, all other paths
1671
are utf8. Local disk IO is done via unicode calls to listdir etc.
1673
This is currently the fallback code path when the filesystem encoding is
1674
not UTF-8. It may be better to implement an alternative so that we can
1675
safely handle paths that are not properly decodable in the current
1678
See DirReader.read_dir for details.
1680
_utf8_encode = self._utf8_encode
1682
_listdir = os.listdir
1683
_kind_from_mode = file_kind_from_stat_mode
1686
relprefix = prefix + '/'
1689
top_slash = top + u'/'
1692
append = dirblock.append
1693
for name in sorted(_listdir(top)):
1695
name_utf8 = _utf8_encode(name)[0]
1696
except UnicodeDecodeError:
1697
raise errors.BadFilenameEncoding(
1698
_utf8_encode(relprefix)[0] + name, _fs_enc)
1699
abspath = top_slash + name
1700
statvalue = _lstat(abspath)
1701
kind = _kind_from_mode(statvalue.st_mode)
1702
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
1706
def copy_tree(from_path, to_path, handlers={}):
1707
"""Copy all of the entries in from_path into to_path.
1709
:param from_path: The base directory to copy.
1710
:param to_path: The target directory. If it does not exist, it will
1712
:param handlers: A dictionary of functions, which takes a source and
1713
destinations for files, directories, etc.
1714
It is keyed on the file kind, such as 'directory', 'symlink', or 'file'
1715
'file', 'directory', and 'symlink' should always exist.
1716
If they are missing, they will be replaced with 'os.mkdir()',
1717
'os.readlink() + os.symlink()', and 'shutil.copy2()', respectively.
1719
# Now, just copy the existing cached tree to the new location
1720
# We use a cheap trick here.
1721
# Absolute paths are prefixed with the first parameter
1722
# relative paths are prefixed with the second.
1723
# So we can get both the source and target returned
1724
# without any extra work.
1726
def copy_dir(source, dest):
1729
def copy_link(source, dest):
1730
"""Copy the contents of a symlink"""
1731
link_to = os.readlink(source)
1732
os.symlink(link_to, dest)
1734
real_handlers = {'file':shutil.copy2,
1735
'symlink':copy_link,
1736
'directory':copy_dir,
1738
real_handlers.update(handlers)
1740
if not os.path.exists(to_path):
1741
real_handlers['directory'](from_path, to_path)
1743
for dir_info, entries in walkdirs(from_path, prefix=to_path):
1744
for relpath, name, kind, st, abspath in entries:
1745
real_handlers[kind](abspath, relpath)
1748
def path_prefix_key(path):
1749
"""Generate a prefix-order path key for path.
1751
This can be used to sort paths in the same way that walkdirs does.
1753
return (dirname(path) , path)
1756
def compare_paths_prefix_order(path_a, path_b):
1757
"""Compare path_a and path_b to generate the same order walkdirs uses."""
1758
key_a = path_prefix_key(path_a)
1759
key_b = path_prefix_key(path_b)
1760
return cmp(key_a, key_b)
1763
_cached_user_encoding = None
1766
def get_user_encoding(use_cache=True):
1767
"""Find out what the preferred user encoding is.
1769
This is generally the encoding that is used for command line parameters
1770
and file contents. This may be different from the terminal encoding
1771
or the filesystem encoding.
1773
:param use_cache: Enable cache for detected encoding.
1774
(This parameter is turned on by default,
1775
and required only for selftesting)
1777
:return: A string defining the preferred user encoding
1779
global _cached_user_encoding
1780
if _cached_user_encoding is not None and use_cache:
1781
return _cached_user_encoding
1783
if sys.platform == 'darwin':
1784
# python locale.getpreferredencoding() always return
1785
# 'mac-roman' on darwin. That's a lie.
1786
sys.platform = 'posix'
1788
if os.environ.get('LANG', None) is None:
1789
# If LANG is not set, we end up with 'ascii', which is bad
1790
# ('mac-roman' is more than ascii), so we set a default which
1791
# will give us UTF-8 (which appears to work in all cases on
1792
# OSX). Users are still free to override LANG of course, as
1793
# long as it give us something meaningful. This work-around
1794
# *may* not be needed with python 3k and/or OSX 10.5, but will
1795
# work with them too -- vila 20080908
1796
os.environ['LANG'] = 'en_US.UTF-8'
1799
sys.platform = 'darwin'
1804
user_encoding = locale.getpreferredencoding()
1805
except locale.Error, e:
1806
sys.stderr.write('bzr: warning: %s\n'
1807
' Could not determine what text encoding to use.\n'
1808
' This error usually means your Python interpreter\n'
1809
' doesn\'t support the locale set by $LANG (%s)\n'
1810
" Continuing with ascii encoding.\n"
1811
% (e, os.environ.get('LANG')))
1812
user_encoding = 'ascii'
1814
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1815
# treat that as ASCII, and not support printing unicode characters to the
1818
# For python scripts run under vim, we get '', so also treat that as ASCII
1819
if user_encoding in (None, 'cp0', ''):
1820
user_encoding = 'ascii'
1824
codecs.lookup(user_encoding)
1826
sys.stderr.write('bzr: warning:'
1827
' unknown encoding %s.'
1828
' Continuing with ascii encoding.\n'
1831
user_encoding = 'ascii'
1834
_cached_user_encoding = user_encoding
1836
return user_encoding
1839
def get_host_name():
1840
"""Return the current unicode host name.
1842
This is meant to be used in place of socket.gethostname() because that
1843
behaves inconsistently on different platforms.
1845
if sys.platform == "win32":
1847
return win32utils.get_host_name()
1850
return socket.gethostname().decode(get_user_encoding())
1853
def recv_all(socket, bytes):
1854
"""Receive an exact number of bytes.
1856
Regular Socket.recv() may return less than the requested number of bytes,
1857
dependning on what's in the OS buffer. MSG_WAITALL is not available
1858
on all platforms, but this should work everywhere. This will return
1859
less than the requested amount if the remote end closes.
1861
This isn't optimized and is intended mostly for use in testing.
1864
while len(b) < bytes:
1865
new = until_no_eintr(socket.recv, bytes - len(b))
1872
def send_all(socket, bytes, report_activity=None):
1873
"""Send all bytes on a socket.
1875
Regular socket.sendall() can give socket error 10053 on Windows. This
1876
implementation sends no more than 64k at a time, which avoids this problem.
1878
:param report_activity: Call this as bytes are read, see
1879
Transport._report_activity
1882
for pos in xrange(0, len(bytes), chunk_size):
1883
block = bytes[pos:pos+chunk_size]
1884
if report_activity is not None:
1885
report_activity(len(block), 'write')
1886
until_no_eintr(socket.sendall, block)
1889
def dereference_path(path):
1890
"""Determine the real path to a file.
1892
All parent elements are dereferenced. But the file itself is not
1894
:param path: The original path. May be absolute or relative.
1895
:return: the real path *to* the file
1897
parent, base = os.path.split(path)
1898
# The pathjoin for '.' is a workaround for Python bug #1213894.
1899
# (initial path components aren't dereferenced)
1900
return pathjoin(realpath(pathjoin('.', parent)), base)
1903
def supports_mapi():
1904
"""Return True if we can use MAPI to launch a mail client."""
1905
return sys.platform == "win32"
1908
def resource_string(package, resource_name):
1909
"""Load a resource from a package and return it as a string.
1911
Note: Only packages that start with bzrlib are currently supported.
1913
This is designed to be a lightweight implementation of resource
1914
loading in a way which is API compatible with the same API from
1916
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1917
If and when pkg_resources becomes a standard library, this routine
1920
# Check package name is within bzrlib
1921
if package == "bzrlib":
1922
resource_relpath = resource_name
1923
elif package.startswith("bzrlib."):
1924
package = package[len("bzrlib."):].replace('.', os.sep)
1925
resource_relpath = pathjoin(package, resource_name)
1927
raise errors.BzrError('resource package %s not in bzrlib' % package)
1929
# Map the resource to a file and read its contents
1930
base = dirname(bzrlib.__file__)
1931
if getattr(sys, 'frozen', None): # bzr.exe
1932
base = abspath(pathjoin(base, '..', '..'))
1933
filename = pathjoin(base, resource_relpath)
1934
return open(filename, 'rU').read()
1937
def file_kind_from_stat_mode_thunk(mode):
1938
global file_kind_from_stat_mode
1939
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1941
from bzrlib._readdir_pyx import UTF8DirReader
1942
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1943
except ImportError, e:
1944
# This is one time where we won't warn that an extension failed to
1945
# load. The extension is never available on Windows anyway.
1946
from bzrlib._readdir_py import (
1947
_kind_from_mode as file_kind_from_stat_mode
1949
return file_kind_from_stat_mode(mode)
1950
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1953
def file_kind(f, _lstat=os.lstat):
1955
return file_kind_from_stat_mode(_lstat(f).st_mode)
1957
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1958
raise errors.NoSuchFile(f)
1962
def until_no_eintr(f, *a, **kw):
1963
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1964
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1968
except (IOError, OSError), e:
1969
if e.errno == errno.EINTR:
1973
def re_compile_checked(re_string, flags=0, where=""):
1974
"""Return a compiled re, or raise a sensible error.
1976
This should only be used when compiling user-supplied REs.
1978
:param re_string: Text form of regular expression.
1979
:param flags: eg re.IGNORECASE
1980
:param where: Message explaining to the user the context where
1981
it occurred, eg 'log search filter'.
1983
# from https://bugs.launchpad.net/bzr/+bug/251352
1985
re_obj = re.compile(re_string, flags)
1990
where = ' in ' + where
1991
# despite the name 'error' is a type
1992
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
1993
% (where, re_string, e))
1996
if sys.platform == "win32":
1999
return msvcrt.getch()
2004
fd = sys.stdin.fileno()
2005
settings = termios.tcgetattr(fd)
2008
ch = sys.stdin.read(1)
2010
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
2014
if sys.platform == 'linux2':
2015
def _local_concurrency():
2017
prefix = 'processor'
2018
for line in file('/proc/cpuinfo', 'rb'):
2019
if line.startswith(prefix):
2020
concurrency = int(line[line.find(':')+1:]) + 1
2022
elif sys.platform == 'darwin':
2023
def _local_concurrency():
2024
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
2025
stdout=subprocess.PIPE).communicate()[0]
2026
elif sys.platform[0:7] == 'freebsd':
2027
def _local_concurrency():
2028
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
2029
stdout=subprocess.PIPE).communicate()[0]
2030
elif sys.platform == 'sunos5':
2031
def _local_concurrency():
2032
return subprocess.Popen(['psrinfo', '-p',],
2033
stdout=subprocess.PIPE).communicate()[0]
2034
elif sys.platform == "win32":
2035
def _local_concurrency():
2036
# This appears to return the number of cores.
2037
return os.environ.get('NUMBER_OF_PROCESSORS')
2039
def _local_concurrency():
2044
_cached_local_concurrency = None
2046
def local_concurrency(use_cache=True):
2047
"""Return how many processes can be run concurrently.
2049
Rely on platform specific implementations and default to 1 (one) if
2050
anything goes wrong.
2052
global _cached_local_concurrency
2054
if _cached_local_concurrency is not None and use_cache:
2055
return _cached_local_concurrency
2057
concurrency = os.environ.get('BZR_CONCURRENCY', None)
2058
if concurrency is None:
2060
concurrency = _local_concurrency()
2061
except (OSError, IOError):
2064
concurrency = int(concurrency)
2065
except (TypeError, ValueError):
2068
_cached_concurrency = concurrency