482
527
for dirname in dir_list:
483
528
if is_inside(dirname, fname) or is_inside(fname, dirname):
533
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
534
report_activity=None, direction='read'):
535
"""Copy contents of one file to another.
537
The read_length can either be -1 to read to end-of-file (EOF) or
538
it can specify the maximum number of bytes to read.
540
The buff_size represents the maximum size for each read operation
541
performed on from_file.
543
:param report_activity: Call this as bytes are read, see
544
Transport._report_activity
545
:param direction: Will be passed to report_activity
547
:return: The number of bytes copied.
551
# read specified number of bytes
553
while read_length > 0:
554
num_bytes_to_read = min(read_length, buff_size)
556
block = from_file.read(num_bytes_to_read)
560
if report_activity is not None:
561
report_activity(len(block), direction)
564
actual_bytes_read = len(block)
565
read_length -= actual_bytes_read
566
length += actual_bytes_read
489
def pumpfile(fromfile, tofile):
490
"""Copy contents of one file to another."""
493
b = fromfile.read(BUFSIZE)
570
block = from_file.read(buff_size)
574
if report_activity is not None:
575
report_activity(len(block), direction)
581
def pump_string_file(bytes, file_handle, segment_size=None):
582
"""Write bytes to file_handle in many smaller writes.
584
:param bytes: The string to write.
585
:param file_handle: The file to write to.
587
# Write data in chunks rather than all at once, because very large
588
# writes fail on some platforms (e.g. Windows with SMB mounted
591
segment_size = 5242880 # 5MB
592
segments = range(len(bytes) / segment_size + 1)
593
write = file_handle.write
594
for segment_index in segments:
595
segment = buffer(bytes, segment_index * segment_size, segment_size)
499
599
def file_iterator(input_file, readsize=32768):
555
683
def local_time_offset(t=None):
556
684
"""Return offset of local zone from GMT, either at present or at time t."""
557
# python2.3 localtime() can't take None
561
if time.localtime(t).tm_isdst and time.daylight:
564
return -time.timezone
567
def format_date(t, offset=0, timezone='original', date_fmt=None,
687
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
688
return offset.days * 86400 + offset.seconds
690
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
692
def format_date(t, offset=0, timezone='original', date_fmt=None,
568
693
show_offset=True):
569
## TODO: Perhaps a global option to use either universal or local time?
570
## Or perhaps just let people set $TZ?
571
assert isinstance(t, float)
694
"""Return a formatted date string.
696
:param t: Seconds since the epoch.
697
:param offset: Timezone offset in seconds east of utc.
698
:param timezone: How to display the time: 'utc', 'original' for the
699
timezone specified by offset, or 'local' for the process's current
701
:param date_fmt: strftime format.
702
:param show_offset: Whether to append the timezone.
704
(date_fmt, tt, offset_str) = \
705
_format_date(t, offset, timezone, date_fmt, show_offset)
706
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
707
date_str = time.strftime(date_fmt, tt)
708
return date_str + offset_str
710
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
712
"""Return an unicode date string formatted according to the current locale.
714
:param t: Seconds since the epoch.
715
:param offset: Timezone offset in seconds east of utc.
716
:param timezone: How to display the time: 'utc', 'original' for the
717
timezone specified by offset, or 'local' for the process's current
719
:param date_fmt: strftime format.
720
:param show_offset: Whether to append the timezone.
722
(date_fmt, tt, offset_str) = \
723
_format_date(t, offset, timezone, date_fmt, show_offset)
724
date_str = time.strftime(date_fmt, tt)
725
if not isinstance(date_str, unicode):
726
date_str = date_str.decode(get_user_encoding(), 'replace')
727
return date_str + offset_str
729
def _format_date(t, offset, timezone, date_fmt, show_offset):
573
730
if timezone == 'utc':
574
731
tt = time.gmtime(t)
581
738
tt = time.localtime(t)
582
739
offset = local_time_offset(t)
584
raise BzrError("unsupported timezone format %r" % timezone,
585
['options are "utc", "original", "local"'])
741
raise errors.UnsupportedTimezoneFormat(timezone)
586
742
if date_fmt is None:
587
743
date_fmt = "%a %Y-%m-%d %H:%M:%S"
589
745
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
592
return (time.strftime(date_fmt, tt) + offset_str)
748
return (date_fmt, tt, offset_str)
595
751
def compact_date(when):
596
752
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
755
def format_delta(delta):
756
"""Get a nice looking string for a time delta.
758
:param delta: The time difference in seconds, can be positive or negative.
759
positive indicates time in the past, negative indicates time in the
760
future. (usually time.time() - stored_time)
761
:return: String formatted to show approximate resolution
767
direction = 'in the future'
771
if seconds < 90: # print seconds up to 90 seconds
773
return '%d second %s' % (seconds, direction,)
775
return '%d seconds %s' % (seconds, direction)
777
minutes = int(seconds / 60)
778
seconds -= 60 * minutes
783
if minutes < 90: # print minutes, seconds up to 90 minutes
785
return '%d minute, %d second%s %s' % (
786
minutes, seconds, plural_seconds, direction)
788
return '%d minutes, %d second%s %s' % (
789
minutes, seconds, plural_seconds, direction)
791
hours = int(minutes / 60)
792
minutes -= 60 * hours
799
return '%d hour, %d minute%s %s' % (hours, minutes,
800
plural_minutes, direction)
801
return '%d hours, %d minute%s %s' % (hours, minutes,
802
plural_minutes, direction)
601
805
"""Return size of given open file."""
669
raise BzrError("sorry, %r not allowed in path" % f)
857
raise errors.BzrError("sorry, %r not allowed in path" % f)
670
858
elif (f == '.') or (f == ''):
677
assert isinstance(p, list)
679
867
if (f == '..') or (f is None) or (f == ''):
680
raise BzrError("sorry, %r not allowed in path" % f)
868
raise errors.BzrError("sorry, %r not allowed in path" % f)
681
869
return pathjoin(*p)
684
@deprecated_function(zero_nine)
685
def appendpath(p1, p2):
689
return pathjoin(p1, p2)
872
def parent_directories(filename):
873
"""Return the list of parent directories, deepest first.
875
For example, parent_directories("a/b/c") -> ["a/b", "a"].
878
parts = splitpath(dirname(filename))
880
parents.append(joinpath(parts))
885
_extension_load_failures = []
888
def failed_to_load_extension(exception):
889
"""Handle failing to load a binary extension.
891
This should be called from the ImportError block guarding the attempt to
892
import the native extension. If this function returns, the pure-Python
893
implementation should be loaded instead::
896
>>> import bzrlib._fictional_extension_pyx
897
>>> except ImportError, e:
898
>>> bzrlib.osutils.failed_to_load_extension(e)
899
>>> import bzrlib._fictional_extension_py
901
# NB: This docstring is just an example, not a doctest, because doctest
902
# currently can't cope with the use of lazy imports in this namespace --
905
# This currently doesn't report the failure at the time it occurs, because
906
# they tend to happen very early in startup when we can't check config
907
# files etc, and also we want to report all failures but not spam the user
909
from bzrlib import trace
910
exception_str = str(exception)
911
if exception_str not in _extension_load_failures:
912
trace.mutter("failed to load compiled extension: %s" % exception_str)
913
_extension_load_failures.append(exception_str)
916
def report_extension_load_failures():
917
if not _extension_load_failures:
919
from bzrlib.config import GlobalConfig
920
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
922
# the warnings framework should by default show this only once
923
from bzrlib.trace import warning
925
"bzr: warning: some compiled extensions could not be loaded; "
926
"see <https://answers.launchpad.net/bzr/+faq/703>")
927
# we no longer show the specific missing extensions here, because it makes
928
# the message too long and scary - see
929
# https://bugs.launchpad.net/bzr/+bug/430529
933
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
934
except ImportError, e:
935
failed_to_load_extension(e)
936
from bzrlib._chunks_to_lines_py import chunks_to_lines
692
939
def split_lines(s):
693
940
"""Split s into lines, but without removing the newline characters."""
941
# Trivially convert a fulltext into a 'chunked' representation, and let
942
# chunks_to_lines do the heavy lifting.
943
if isinstance(s, str):
944
# chunks_to_lines only supports 8-bit strings
945
return chunks_to_lines([s])
947
return _split_lines(s)
951
"""Split s into lines, but without removing the newline characters.
953
This supports Unicode or plain string objects.
694
955
lines = s.split('\n')
695
956
result = [line + '\n' for line in lines[:-1]]
705
966
def link_or_copy(src, dest):
706
967
"""Hardlink a file, or copy it if it can't be hardlinked."""
707
968
if not hardlinks_good():
969
shutil.copyfile(src, dest)
711
972
os.link(src, dest)
712
973
except (OSError, IOError), e:
713
974
if e.errno != errno.EXDEV:
717
def delete_any(full_path):
718
"""Delete a file or directory."""
976
shutil.copyfile(src, dest)
979
def delete_any(path):
980
"""Delete a file, symlink or directory.
982
Will delete even if readonly.
722
# We may be renaming a dangling inventory id
723
if e.errno not in (errno.EISDIR, errno.EACCES, errno.EPERM):
985
_delete_file_or_dir(path)
986
except (OSError, IOError), e:
987
if e.errno in (errno.EPERM, errno.EACCES):
988
# make writable and try again
991
except (OSError, IOError):
993
_delete_file_or_dir(path)
998
def _delete_file_or_dir(path):
999
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1000
# Forgiveness than Permission (EAFP) because:
1001
# - root can damage a solaris file system by using unlink,
1002
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1003
# EACCES, OSX: EPERM) when invoked on a directory.
1004
if isdir(path): # Takes care of symlinks
728
1010
def has_symlinks():
761
1080
avoids that problem.
764
assert len(base) >= MIN_ABS_PATHLENGTH, ('Length of base must be equal or'
765
' exceed the platform minimum length (which is %d)' %
1083
if len(base) < MIN_ABS_PATHLENGTH:
1084
# must have space for e.g. a drive letter
1085
raise ValueError('%r is too short to calculate a relative path'
768
1088
rp = abspath(path)
772
while len(head) >= len(base):
1093
if len(head) <= len(base) and head != base:
1094
raise errors.PathNotChild(rp, base)
773
1095
if head == base:
775
head, tail = os.path.split(head)
1097
head, tail = split(head)
779
raise PathNotChild(rp, base)
1102
return pathjoin(*reversed(s))
1107
def _cicp_canonical_relpath(base, path):
1108
"""Return the canonical path relative to base.
1110
Like relpath, but on case-insensitive-case-preserving file-systems, this
1111
will return the relpath as stored on the file-system rather than in the
1112
case specified in the input string, for all existing portions of the path.
1114
This will cause O(N) behaviour if called for every path in a tree; if you
1115
have a number of paths to convert, you should use canonical_relpaths().
1117
# TODO: it should be possible to optimize this for Windows by using the
1118
# win32 API FindFiles function to look for the specified name - but using
1119
# os.listdir() still gives us the correct, platform agnostic semantics in
1122
rel = relpath(base, path)
1123
# '.' will have been turned into ''
1127
abs_base = abspath(base)
1129
_listdir = os.listdir
1131
# use an explicit iterator so we can easily consume the rest on early exit.
1132
bit_iter = iter(rel.split('/'))
1133
for bit in bit_iter:
1136
next_entries = _listdir(current)
1137
except OSError: # enoent, eperm, etc
1138
# We can't find this in the filesystem, so just append the
1140
current = pathjoin(current, bit, *list(bit_iter))
1142
for look in next_entries:
1143
if lbit == look.lower():
1144
current = pathjoin(current, look)
1147
# got to the end, nothing matched, so we just return the
1148
# non-existing bits as they were specified (the filename may be
1149
# the target of a move, for example).
1150
current = pathjoin(current, bit, *list(bit_iter))
1152
return current[len(abs_base):].lstrip('/')
1154
# XXX - TODO - we need better detection/integration of case-insensitive
1155
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1156
# filesystems), for example, so could probably benefit from the same basic
1157
# support there. For now though, only Windows and OSX get that support, and
1158
# they get it for *all* file-systems!
1159
if sys.platform in ('win32', 'darwin'):
1160
canonical_relpath = _cicp_canonical_relpath
1162
canonical_relpath = relpath
1164
def canonical_relpaths(base, paths):
1165
"""Create an iterable to canonicalize a sequence of relative paths.
1167
The intent is for this implementation to use a cache, vastly speeding
1168
up multiple transformations in the same directory.
1170
# but for now, we haven't optimized...
1171
return [canonical_relpath(base, p) for p in paths]
787
1173
def safe_unicode(unicode_or_utf8_string):
788
1174
"""Coerce unicode_or_utf8_string into unicode.
790
1176
If it is unicode, it is returned.
791
Otherwise it is decoded from utf-8. If a decoding error
792
occurs, it is wrapped as a If the decoding fails, the exception is wrapped
793
as a BzrBadParameter exception.
1177
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1178
wrapped in a BzrBadParameterNotUnicode exception.
795
1180
if isinstance(unicode_or_utf8_string, unicode):
796
1181
return unicode_or_utf8_string
798
1183
return unicode_or_utf8_string.decode('utf8')
799
1184
except UnicodeDecodeError:
800
raise BzrBadParameterNotUnicode(unicode_or_utf8_string)
1185
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1188
def safe_utf8(unicode_or_utf8_string):
1189
"""Coerce unicode_or_utf8_string to a utf8 string.
1191
If it is a str, it is returned.
1192
If it is Unicode, it is encoded into a utf-8 string.
1194
if isinstance(unicode_or_utf8_string, str):
1195
# TODO: jam 20070209 This is overkill, and probably has an impact on
1196
# performance if we are dealing with lots of apis that want a
1199
# Make sure it is a valid utf-8 string
1200
unicode_or_utf8_string.decode('utf-8')
1201
except UnicodeDecodeError:
1202
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1203
return unicode_or_utf8_string
1204
return unicode_or_utf8_string.encode('utf-8')
1207
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1208
' Revision id generators should be creating utf8'
1212
def safe_revision_id(unicode_or_utf8_string, warn=True):
1213
"""Revision ids should now be utf8, but at one point they were unicode.
1215
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1217
:param warn: Functions that are sanitizing user data can set warn=False
1218
:return: None or a utf8 revision id.
1220
if (unicode_or_utf8_string is None
1221
or unicode_or_utf8_string.__class__ == str):
1222
return unicode_or_utf8_string
1224
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1226
return cache_utf8.encode(unicode_or_utf8_string)
1229
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1230
' generators should be creating utf8 file ids.')
1233
def safe_file_id(unicode_or_utf8_string, warn=True):
1234
"""File ids should now be utf8, but at one point they were unicode.
1236
This is the same as safe_utf8, except it uses the cached encode functions
1237
to save a little bit of performance.
1239
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1241
:param warn: Functions that are sanitizing user data can set warn=False
1242
:return: None or a utf8 file id.
1244
if (unicode_or_utf8_string is None
1245
or unicode_or_utf8_string.__class__ == str):
1246
return unicode_or_utf8_string
1248
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1250
return cache_utf8.encode(unicode_or_utf8_string)
803
1253
_platform_normalizes_filenames = False
897
1359
def check_legal_path(path):
898
"""Check whether the supplied path is legal.
1360
"""Check whether the supplied path is legal.
899
1361
This is only required on Windows, so we don't test on other platforms
902
1364
if sys.platform != "win32":
904
1366
if _validWin32PathRE.match(path) is None:
905
raise IllegalPath(path)
1367
raise errors.IllegalPath(path)
1370
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1372
def _is_error_enotdir(e):
1373
"""Check if this exception represents ENOTDIR.
1375
Unfortunately, python is very inconsistent about the exception
1376
here. The cases are:
1377
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1378
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1379
which is the windows error code.
1380
3) Windows, Python2.5 uses errno == EINVAL and
1381
winerror == ERROR_DIRECTORY
1383
:param e: An Exception object (expected to be OSError with an errno
1384
attribute, but we should be able to cope with anything)
1385
:return: True if this represents an ENOTDIR error. False otherwise.
1387
en = getattr(e, 'errno', None)
1388
if (en == errno.ENOTDIR
1389
or (sys.platform == 'win32'
1390
and (en == _WIN32_ERROR_DIRECTORY
1391
or (en == errno.EINVAL
1392
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
908
1398
def walkdirs(top, prefix=""):
909
1399
"""Yield data about all the directories in a tree.
911
1401
This yields all the data about the contents of a directory at a time.
912
1402
After each directory has been yielded, if the caller has mutated the list
913
1403
to exclude some directories, they are then not descended into.
915
1405
The data yielded is of the form:
916
1406
((directory-relpath, directory-path-from-top),
917
[(relpath, basename, kind, lstat), ...]),
1407
[(relpath, basename, kind, lstat, path-from-top), ...]),
918
1408
- directory-relpath is the relative path of the directory being returned
919
1409
with respect to top. prefix is prepended to this.
920
- directory-path-from-root is the path including top for this directory.
1410
- directory-path-from-root is the path including top for this directory.
921
1411
It is suitable for use with os functions.
922
1412
- relpath is the relative path within the subtree being walked.
923
1413
- basename is the basename of the path
925
1415
present within the tree - but it may be recorded as versioned. See
927
1417
- lstat is the stat data *if* the file was statted.
928
- planned, not implemented:
1418
- planned, not implemented:
929
1419
path_from_tree_root is the path from the root of the tree.
931
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1421
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
932
1422
allows one to walk a subtree but get paths that are relative to a tree
933
1423
rooted higher up.
934
1424
:return: an iterator over the dirs.
936
1426
#TODO there is a bit of a smell where the results of the directory-
937
# summary in this, and the path from the root, may not agree
1427
# summary in this, and the path from the root, may not agree
938
1428
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
939
1429
# potentially confusing output. We should make this more robust - but
940
1430
# not at a speed cost. RBC 20060731
943
1432
_directory = _directory_kind
945
pending = [(prefix, "", _directory, None, top)]
1433
_listdir = os.listdir
1434
_kind_from_mode = file_kind_from_stat_mode
1435
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
948
currentdir = pending.pop()
949
1437
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
952
relroot = currentdir[0] + '/'
1438
relroot, _, _, _, top = pending.pop()
1440
relprefix = relroot + u'/'
1443
top_slash = top + u'/'
1446
append = dirblock.append
1448
names = sorted(_listdir(top))
1450
if not _is_error_enotdir(e):
1454
abspath = top_slash + name
1455
statvalue = _lstat(abspath)
1456
kind = _kind_from_mode(statvalue.st_mode)
1457
append((relprefix + name, name, kind, statvalue, abspath))
1458
yield (relroot, top), dirblock
1460
# push the user specified dirs from dirblock
1461
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1464
class DirReader(object):
1465
"""An interface for reading directories."""
1467
def top_prefix_to_starting_dir(self, top, prefix=""):
1468
"""Converts top and prefix to a starting dir entry
1470
:param top: A utf8 path
1471
:param prefix: An optional utf8 path to prefix output relative paths
1473
:return: A tuple starting with prefix, and ending with the native
1476
raise NotImplementedError(self.top_prefix_to_starting_dir)
1478
def read_dir(self, prefix, top):
1479
"""Read a specific dir.
1481
:param prefix: A utf8 prefix to be preprended to the path basenames.
1482
:param top: A natively encoded path to read.
1483
:return: A list of the directories contents. Each item contains:
1484
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1486
raise NotImplementedError(self.read_dir)
1489
_selected_dir_reader = None
1492
def _walkdirs_utf8(top, prefix=""):
1493
"""Yield data about all the directories in a tree.
1495
This yields the same information as walkdirs() only each entry is yielded
1496
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1497
are returned as exact byte-strings.
1499
:return: yields a tuple of (dir_info, [file_info])
1500
dir_info is (utf8_relpath, path-from-top)
1501
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1502
if top is an absolute path, path-from-top is also an absolute path.
1503
path-from-top might be unicode or utf8, but it is the correct path to
1504
pass to os functions to affect the file in question. (such as os.lstat)
1506
global _selected_dir_reader
1507
if _selected_dir_reader is None:
1508
fs_encoding = _fs_enc.upper()
1509
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1510
# Win98 doesn't have unicode apis like FindFirstFileW
1511
# TODO: We possibly could support Win98 by falling back to the
1512
# original FindFirstFile, and using TCHAR instead of WCHAR,
1513
# but that gets a bit tricky, and requires custom compiling
1516
from bzrlib._walkdirs_win32 import Win32ReadDir
1517
_selected_dir_reader = Win32ReadDir()
1520
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1521
# ANSI_X3.4-1968 is a form of ASCII
1523
from bzrlib._readdir_pyx import UTF8DirReader
1524
_selected_dir_reader = UTF8DirReader()
1525
except ImportError, e:
1526
failed_to_load_extension(e)
1529
if _selected_dir_reader is None:
1530
# Fallback to the python version
1531
_selected_dir_reader = UnicodeDirReader()
1533
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1534
# But we don't actually uses 1-3 in pending, so set them to None
1535
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1536
read_dir = _selected_dir_reader.read_dir
1537
_directory = _directory_kind
1539
relroot, _, _, _, top = pending[-1].pop()
1542
dirblock = sorted(read_dir(relroot, top))
1543
yield (relroot, top), dirblock
1544
# push the user specified dirs from dirblock
1545
next = [d for d in reversed(dirblock) if d[2] == _directory]
1547
pending.append(next)
1550
class UnicodeDirReader(DirReader):
1551
"""A dir reader for non-utf8 file systems, which transcodes."""
1553
__slots__ = ['_utf8_encode']
1556
self._utf8_encode = codecs.getencoder('utf8')
1558
def top_prefix_to_starting_dir(self, top, prefix=""):
1559
"""See DirReader.top_prefix_to_starting_dir."""
1560
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1562
def read_dir(self, prefix, top):
1563
"""Read a single directory from a non-utf8 file system.
1565
top, and the abspath element in the output are unicode, all other paths
1566
are utf8. Local disk IO is done via unicode calls to listdir etc.
1568
This is currently the fallback code path when the filesystem encoding is
1569
not UTF-8. It may be better to implement an alternative so that we can
1570
safely handle paths that are not properly decodable in the current
1573
See DirReader.read_dir for details.
1575
_utf8_encode = self._utf8_encode
1577
_listdir = os.listdir
1578
_kind_from_mode = file_kind_from_stat_mode
1581
relprefix = prefix + '/'
1584
top_slash = top + u'/'
1587
append = dirblock.append
955
1588
for name in sorted(_listdir(top)):
956
abspath = top + '/' + name
957
statvalue = lstat(abspath)
958
dirblock.append((relroot + name, name,
959
file_kind_from_stat_mode(statvalue.st_mode),
961
yield (currentdir[0], top), dirblock
962
# push the user specified dirs from dirblock
963
for dir in reversed(dirblock):
964
if dir[2] == _directory:
1590
name_utf8 = _utf8_encode(name)[0]
1591
except UnicodeDecodeError:
1592
raise errors.BadFilenameEncoding(
1593
_utf8_encode(relprefix)[0] + name, _fs_enc)
1594
abspath = top_slash + name
1595
statvalue = _lstat(abspath)
1596
kind = _kind_from_mode(statvalue.st_mode)
1597
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
968
1601
def copy_tree(from_path, to_path, handlers={}):
969
1602
"""Copy all of the entries in from_path into to_path.
971
:param from_path: The base directory to copy.
1604
:param from_path: The base directory to copy.
972
1605
:param to_path: The target directory. If it does not exist, it will
974
1607
:param handlers: A dictionary of functions, which takes a source and
1025
1658
_cached_user_encoding = None
1028
def get_user_encoding():
1661
def get_user_encoding(use_cache=True):
1029
1662
"""Find out what the preferred user encoding is.
1031
1664
This is generally the encoding that is used for command line parameters
1032
1665
and file contents. This may be different from the terminal encoding
1033
1666
or the filesystem encoding.
1668
:param use_cache: Enable cache for detected encoding.
1669
(This parameter is turned on by default,
1670
and required only for selftesting)
1035
1672
:return: A string defining the preferred user encoding
1037
1674
global _cached_user_encoding
1038
if _cached_user_encoding is not None:
1675
if _cached_user_encoding is not None and use_cache:
1039
1676
return _cached_user_encoding
1041
1678
if sys.platform == 'darwin':
1042
# work around egregious python 2.4 bug
1679
# python locale.getpreferredencoding() always return
1680
# 'mac-roman' on darwin. That's a lie.
1043
1681
sys.platform = 'posix'
1683
if os.environ.get('LANG', None) is None:
1684
# If LANG is not set, we end up with 'ascii', which is bad
1685
# ('mac-roman' is more than ascii), so we set a default which
1686
# will give us UTF-8 (which appears to work in all cases on
1687
# OSX). Users are still free to override LANG of course, as
1688
# long as it give us something meaningful. This work-around
1689
# *may* not be needed with python 3k and/or OSX 10.5, but will
1690
# work with them too -- vila 20080908
1691
os.environ['LANG'] = 'en_US.UTF-8'
1047
1694
sys.platform = 'darwin'
1057
1704
' doesn\'t support the locale set by $LANG (%s)\n'
1058
1705
" Continuing with ascii encoding.\n"
1059
1706
% (e, os.environ.get('LANG')))
1061
if _cached_user_encoding is None:
1062
_cached_user_encoding = 'ascii'
1063
return _cached_user_encoding
1707
user_encoding = 'ascii'
1709
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1710
# treat that as ASCII, and not support printing unicode characters to the
1713
# For python scripts run under vim, we get '', so also treat that as ASCII
1714
if user_encoding in (None, 'cp0', ''):
1715
user_encoding = 'ascii'
1719
codecs.lookup(user_encoding)
1721
sys.stderr.write('bzr: warning:'
1722
' unknown encoding %s.'
1723
' Continuing with ascii encoding.\n'
1726
user_encoding = 'ascii'
1729
_cached_user_encoding = user_encoding
1731
return user_encoding
1734
def get_host_name():
1735
"""Return the current unicode host name.
1737
This is meant to be used in place of socket.gethostname() because that
1738
behaves inconsistently on different platforms.
1740
if sys.platform == "win32":
1742
return win32utils.get_host_name()
1745
return socket.gethostname().decode(get_user_encoding())
1748
def recv_all(socket, bytes):
1749
"""Receive an exact number of bytes.
1751
Regular Socket.recv() may return less than the requested number of bytes,
1752
dependning on what's in the OS buffer. MSG_WAITALL is not available
1753
on all platforms, but this should work everywhere. This will return
1754
less than the requested amount if the remote end closes.
1756
This isn't optimized and is intended mostly for use in testing.
1759
while len(b) < bytes:
1760
new = until_no_eintr(socket.recv, bytes - len(b))
1767
def send_all(socket, bytes, report_activity=None):
1768
"""Send all bytes on a socket.
1770
Regular socket.sendall() can give socket error 10053 on Windows. This
1771
implementation sends no more than 64k at a time, which avoids this problem.
1773
:param report_activity: Call this as bytes are read, see
1774
Transport._report_activity
1777
for pos in xrange(0, len(bytes), chunk_size):
1778
block = bytes[pos:pos+chunk_size]
1779
if report_activity is not None:
1780
report_activity(len(block), 'write')
1781
until_no_eintr(socket.sendall, block)
1784
def dereference_path(path):
1785
"""Determine the real path to a file.
1787
All parent elements are dereferenced. But the file itself is not
1789
:param path: The original path. May be absolute or relative.
1790
:return: the real path *to* the file
1792
parent, base = os.path.split(path)
1793
# The pathjoin for '.' is a workaround for Python bug #1213894.
1794
# (initial path components aren't dereferenced)
1795
return pathjoin(realpath(pathjoin('.', parent)), base)
1798
def supports_mapi():
1799
"""Return True if we can use MAPI to launch a mail client."""
1800
return sys.platform == "win32"
1803
def resource_string(package, resource_name):
1804
"""Load a resource from a package and return it as a string.
1806
Note: Only packages that start with bzrlib are currently supported.
1808
This is designed to be a lightweight implementation of resource
1809
loading in a way which is API compatible with the same API from
1811
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1812
If and when pkg_resources becomes a standard library, this routine
1815
# Check package name is within bzrlib
1816
if package == "bzrlib":
1817
resource_relpath = resource_name
1818
elif package.startswith("bzrlib."):
1819
package = package[len("bzrlib."):].replace('.', os.sep)
1820
resource_relpath = pathjoin(package, resource_name)
1822
raise errors.BzrError('resource package %s not in bzrlib' % package)
1824
# Map the resource to a file and read its contents
1825
base = dirname(bzrlib.__file__)
1826
if getattr(sys, 'frozen', None): # bzr.exe
1827
base = abspath(pathjoin(base, '..', '..'))
1828
filename = pathjoin(base, resource_relpath)
1829
return open(filename, 'rU').read()
1832
def file_kind_from_stat_mode_thunk(mode):
1833
global file_kind_from_stat_mode
1834
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1836
from bzrlib._readdir_pyx import UTF8DirReader
1837
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1838
except ImportError, e:
1839
# This is one time where we won't warn that an extension failed to
1840
# load. The extension is never available on Windows anyway.
1841
from bzrlib._readdir_py import (
1842
_kind_from_mode as file_kind_from_stat_mode
1844
return file_kind_from_stat_mode(mode)
1845
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1848
def file_kind(f, _lstat=os.lstat):
1850
return file_kind_from_stat_mode(_lstat(f).st_mode)
1852
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1853
raise errors.NoSuchFile(f)
1857
def until_no_eintr(f, *a, **kw):
1858
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1859
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1863
except (IOError, OSError), e:
1864
if e.errno == errno.EINTR:
1868
def re_compile_checked(re_string, flags=0, where=""):
1869
"""Return a compiled re, or raise a sensible error.
1871
This should only be used when compiling user-supplied REs.
1873
:param re_string: Text form of regular expression.
1874
:param flags: eg re.IGNORECASE
1875
:param where: Message explaining to the user the context where
1876
it occurred, eg 'log search filter'.
1878
# from https://bugs.launchpad.net/bzr/+bug/251352
1880
re_obj = re.compile(re_string, flags)
1885
where = ' in ' + where
1886
# despite the name 'error' is a type
1887
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
1888
% (where, re_string, e))
1891
if sys.platform == "win32":
1894
return msvcrt.getch()
1899
fd = sys.stdin.fileno()
1900
settings = termios.tcgetattr(fd)
1903
ch = sys.stdin.read(1)
1905
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
1909
if sys.platform == 'linux2':
1910
def _local_concurrency():
1912
prefix = 'processor'
1913
for line in file('/proc/cpuinfo', 'rb'):
1914
if line.startswith(prefix):
1915
concurrency = int(line[line.find(':')+1:]) + 1
1917
elif sys.platform == 'darwin':
1918
def _local_concurrency():
1919
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
1920
stdout=subprocess.PIPE).communicate()[0]
1921
elif sys.platform[0:7] == 'freebsd':
1922
def _local_concurrency():
1923
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
1924
stdout=subprocess.PIPE).communicate()[0]
1925
elif sys.platform == 'sunos5':
1926
def _local_concurrency():
1927
return subprocess.Popen(['psrinfo', '-p',],
1928
stdout=subprocess.PIPE).communicate()[0]
1929
elif sys.platform == "win32":
1930
def _local_concurrency():
1931
# This appears to return the number of cores.
1932
return os.environ.get('NUMBER_OF_PROCESSORS')
1934
def _local_concurrency():
1939
_cached_local_concurrency = None
1941
def local_concurrency(use_cache=True):
1942
"""Return how many processes can be run concurrently.
1944
Rely on platform specific implementations and default to 1 (one) if
1945
anything goes wrong.
1947
global _cached_local_concurrency
1948
if _cached_local_concurrency is not None and use_cache:
1949
return _cached_local_concurrency
1952
concurrency = _local_concurrency()
1953
except (OSError, IOError):
1956
concurrency = int(concurrency)
1957
except (TypeError, ValueError):
1960
_cached_concurrency = concurrency