482
534
for dirname in dir_list:
483
535
if is_inside(dirname, fname) or is_inside(fname, dirname):
540
def pumpfile(from_file, to_file, read_length=-1, buff_size=32768,
541
report_activity=None, direction='read'):
542
"""Copy contents of one file to another.
544
The read_length can either be -1 to read to end-of-file (EOF) or
545
it can specify the maximum number of bytes to read.
547
The buff_size represents the maximum size for each read operation
548
performed on from_file.
550
:param report_activity: Call this as bytes are read, see
551
Transport._report_activity
552
:param direction: Will be passed to report_activity
554
:return: The number of bytes copied.
558
# read specified number of bytes
560
while read_length > 0:
561
num_bytes_to_read = min(read_length, buff_size)
563
block = from_file.read(num_bytes_to_read)
567
if report_activity is not None:
568
report_activity(len(block), direction)
571
actual_bytes_read = len(block)
572
read_length -= actual_bytes_read
573
length += actual_bytes_read
489
def pumpfile(fromfile, tofile):
490
"""Copy contents of one file to another."""
493
b = fromfile.read(BUFSIZE)
577
block = from_file.read(buff_size)
581
if report_activity is not None:
582
report_activity(len(block), direction)
588
def pump_string_file(bytes, file_handle, segment_size=None):
589
"""Write bytes to file_handle in many smaller writes.
591
:param bytes: The string to write.
592
:param file_handle: The file to write to.
594
# Write data in chunks rather than all at once, because very large
595
# writes fail on some platforms (e.g. Windows with SMB mounted
598
segment_size = 5242880 # 5MB
599
segments = range(len(bytes) / segment_size + 1)
600
write = file_handle.write
601
for segment_index in segments:
602
segment = buffer(bytes, segment_index * segment_size, segment_size)
499
606
def file_iterator(input_file, readsize=32768):
555
690
def local_time_offset(t=None):
556
691
"""Return offset of local zone from GMT, either at present or at time t."""
557
# python2.3 localtime() can't take None
561
if time.localtime(t).tm_isdst and time.daylight:
564
return -time.timezone
567
def format_date(t, offset=0, timezone='original', date_fmt=None,
694
offset = datetime.fromtimestamp(t) - datetime.utcfromtimestamp(t)
695
return offset.days * 86400 + offset.seconds
697
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
698
_default_format_by_weekday_num = [wd + " %Y-%m-%d %H:%M:%S" for wd in weekdays]
701
def format_date(t, offset=0, timezone='original', date_fmt=None,
568
702
show_offset=True):
569
## TODO: Perhaps a global option to use either universal or local time?
570
## Or perhaps just let people set $TZ?
571
assert isinstance(t, float)
703
"""Return a formatted date string.
705
:param t: Seconds since the epoch.
706
:param offset: Timezone offset in seconds east of utc.
707
:param timezone: How to display the time: 'utc', 'original' for the
708
timezone specified by offset, or 'local' for the process's current
710
:param date_fmt: strftime format.
711
:param show_offset: Whether to append the timezone.
713
(date_fmt, tt, offset_str) = \
714
_format_date(t, offset, timezone, date_fmt, show_offset)
715
date_fmt = date_fmt.replace('%a', weekdays[tt[6]])
716
date_str = time.strftime(date_fmt, tt)
717
return date_str + offset_str
720
# Cache of formatted offset strings
724
def format_date_with_offset_in_original_timezone(t, offset=0,
725
_cache=_offset_cache):
726
"""Return a formatted date string in the original timezone.
728
This routine may be faster then format_date.
730
:param t: Seconds since the epoch.
731
:param offset: Timezone offset in seconds east of utc.
735
tt = time.gmtime(t + offset)
736
date_fmt = _default_format_by_weekday_num[tt[6]]
737
date_str = time.strftime(date_fmt, tt)
738
offset_str = _cache.get(offset, None)
739
if offset_str is None:
740
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
741
_cache[offset] = offset_str
742
return date_str + offset_str
745
def format_local_date(t, offset=0, timezone='original', date_fmt=None,
747
"""Return an unicode date string formatted according to the current locale.
749
:param t: Seconds since the epoch.
750
:param offset: Timezone offset in seconds east of utc.
751
:param timezone: How to display the time: 'utc', 'original' for the
752
timezone specified by offset, or 'local' for the process's current
754
:param date_fmt: strftime format.
755
:param show_offset: Whether to append the timezone.
757
(date_fmt, tt, offset_str) = \
758
_format_date(t, offset, timezone, date_fmt, show_offset)
759
date_str = time.strftime(date_fmt, tt)
760
if not isinstance(date_str, unicode):
761
date_str = date_str.decode(get_user_encoding(), 'replace')
762
return date_str + offset_str
765
def _format_date(t, offset, timezone, date_fmt, show_offset):
573
766
if timezone == 'utc':
574
767
tt = time.gmtime(t)
581
774
tt = time.localtime(t)
582
775
offset = local_time_offset(t)
584
raise BzrError("unsupported timezone format %r" % timezone,
585
['options are "utc", "original", "local"'])
777
raise errors.UnsupportedTimezoneFormat(timezone)
586
778
if date_fmt is None:
587
779
date_fmt = "%a %Y-%m-%d %H:%M:%S"
589
781
offset_str = ' %+03d%02d' % (offset / 3600, (offset / 60) % 60)
592
return (time.strftime(date_fmt, tt) + offset_str)
784
return (date_fmt, tt, offset_str)
595
787
def compact_date(when):
596
788
return time.strftime('%Y%m%d%H%M%S', time.gmtime(when))
791
def format_delta(delta):
792
"""Get a nice looking string for a time delta.
794
:param delta: The time difference in seconds, can be positive or negative.
795
positive indicates time in the past, negative indicates time in the
796
future. (usually time.time() - stored_time)
797
:return: String formatted to show approximate resolution
803
direction = 'in the future'
807
if seconds < 90: # print seconds up to 90 seconds
809
return '%d second %s' % (seconds, direction,)
811
return '%d seconds %s' % (seconds, direction)
813
minutes = int(seconds / 60)
814
seconds -= 60 * minutes
819
if minutes < 90: # print minutes, seconds up to 90 minutes
821
return '%d minute, %d second%s %s' % (
822
minutes, seconds, plural_seconds, direction)
824
return '%d minutes, %d second%s %s' % (
825
minutes, seconds, plural_seconds, direction)
827
hours = int(minutes / 60)
828
minutes -= 60 * hours
835
return '%d hour, %d minute%s %s' % (hours, minutes,
836
plural_minutes, direction)
837
return '%d hours, %d minute%s %s' % (hours, minutes,
838
plural_minutes, direction)
601
841
"""Return size of given open file."""
669
raise BzrError("sorry, %r not allowed in path" % f)
893
raise errors.BzrError("sorry, %r not allowed in path" % f)
670
894
elif (f == '.') or (f == ''):
677
assert isinstance(p, list)
679
903
if (f == '..') or (f is None) or (f == ''):
680
raise BzrError("sorry, %r not allowed in path" % f)
904
raise errors.BzrError("sorry, %r not allowed in path" % f)
681
905
return pathjoin(*p)
684
@deprecated_function(zero_nine)
685
def appendpath(p1, p2):
689
return pathjoin(p1, p2)
908
def parent_directories(filename):
909
"""Return the list of parent directories, deepest first.
911
For example, parent_directories("a/b/c") -> ["a/b", "a"].
914
parts = splitpath(dirname(filename))
916
parents.append(joinpath(parts))
921
_extension_load_failures = []
924
def failed_to_load_extension(exception):
925
"""Handle failing to load a binary extension.
927
This should be called from the ImportError block guarding the attempt to
928
import the native extension. If this function returns, the pure-Python
929
implementation should be loaded instead::
932
>>> import bzrlib._fictional_extension_pyx
933
>>> except ImportError, e:
934
>>> bzrlib.osutils.failed_to_load_extension(e)
935
>>> import bzrlib._fictional_extension_py
937
# NB: This docstring is just an example, not a doctest, because doctest
938
# currently can't cope with the use of lazy imports in this namespace --
941
# This currently doesn't report the failure at the time it occurs, because
942
# they tend to happen very early in startup when we can't check config
943
# files etc, and also we want to report all failures but not spam the user
945
from bzrlib import trace
946
exception_str = str(exception)
947
if exception_str not in _extension_load_failures:
948
trace.mutter("failed to load compiled extension: %s" % exception_str)
949
_extension_load_failures.append(exception_str)
952
def report_extension_load_failures():
953
if not _extension_load_failures:
955
from bzrlib.config import GlobalConfig
956
if GlobalConfig().get_user_option_as_bool('ignore_missing_extensions'):
958
# the warnings framework should by default show this only once
959
from bzrlib.trace import warning
961
"bzr: warning: some compiled extensions could not be loaded; "
962
"see <https://answers.launchpad.net/bzr/+faq/703>")
963
# we no longer show the specific missing extensions here, because it makes
964
# the message too long and scary - see
965
# https://bugs.launchpad.net/bzr/+bug/430529
969
from bzrlib._chunks_to_lines_pyx import chunks_to_lines
970
except ImportError, e:
971
failed_to_load_extension(e)
972
from bzrlib._chunks_to_lines_py import chunks_to_lines
692
975
def split_lines(s):
693
976
"""Split s into lines, but without removing the newline characters."""
977
# Trivially convert a fulltext into a 'chunked' representation, and let
978
# chunks_to_lines do the heavy lifting.
979
if isinstance(s, str):
980
# chunks_to_lines only supports 8-bit strings
981
return chunks_to_lines([s])
983
return _split_lines(s)
987
"""Split s into lines, but without removing the newline characters.
989
This supports Unicode or plain string objects.
694
991
lines = s.split('\n')
695
992
result = [line + '\n' for line in lines[:-1]]
705
1002
def link_or_copy(src, dest):
706
1003
"""Hardlink a file, or copy it if it can't be hardlinked."""
707
1004
if not hardlinks_good():
1005
shutil.copyfile(src, dest)
711
1008
os.link(src, dest)
712
1009
except (OSError, IOError), e:
713
1010
if e.errno != errno.EXDEV:
717
def delete_any(full_path):
718
"""Delete a file or directory."""
1012
shutil.copyfile(src, dest)
1015
def delete_any(path):
1016
"""Delete a file, symlink or directory.
1018
Will delete even if readonly.
722
# We may be renaming a dangling inventory id
723
if e.errno not in (errno.EISDIR, errno.EACCES, errno.EPERM):
1021
_delete_file_or_dir(path)
1022
except (OSError, IOError), e:
1023
if e.errno in (errno.EPERM, errno.EACCES):
1024
# make writable and try again
1027
except (OSError, IOError):
1029
_delete_file_or_dir(path)
1034
def _delete_file_or_dir(path):
1035
# Look Before You Leap (LBYL) is appropriate here instead of Easier to Ask for
1036
# Forgiveness than Permission (EAFP) because:
1037
# - root can damage a solaris file system by using unlink,
1038
# - unlink raises different exceptions on different OSes (linux: EISDIR, win32:
1039
# EACCES, OSX: EPERM) when invoked on a directory.
1040
if isdir(path): # Takes care of symlinks
728
1046
def has_symlinks():
761
1116
avoids that problem.
764
assert len(base) >= MIN_ABS_PATHLENGTH, ('Length of base must be equal or'
765
' exceed the platform minimum length (which is %d)' %
1119
if len(base) < MIN_ABS_PATHLENGTH:
1120
# must have space for e.g. a drive letter
1121
raise ValueError('%r is too short to calculate a relative path'
768
1124
rp = abspath(path)
772
while len(head) >= len(base):
1129
if len(head) <= len(base) and head != base:
1130
raise errors.PathNotChild(rp, base)
773
1131
if head == base:
775
head, tail = os.path.split(head)
1133
head, tail = split(head)
779
raise PathNotChild(rp, base)
1138
return pathjoin(*reversed(s))
1143
def _cicp_canonical_relpath(base, path):
1144
"""Return the canonical path relative to base.
1146
Like relpath, but on case-insensitive-case-preserving file-systems, this
1147
will return the relpath as stored on the file-system rather than in the
1148
case specified in the input string, for all existing portions of the path.
1150
This will cause O(N) behaviour if called for every path in a tree; if you
1151
have a number of paths to convert, you should use canonical_relpaths().
1153
# TODO: it should be possible to optimize this for Windows by using the
1154
# win32 API FindFiles function to look for the specified name - but using
1155
# os.listdir() still gives us the correct, platform agnostic semantics in
1158
rel = relpath(base, path)
1159
# '.' will have been turned into ''
1163
abs_base = abspath(base)
1165
_listdir = os.listdir
1167
# use an explicit iterator so we can easily consume the rest on early exit.
1168
bit_iter = iter(rel.split('/'))
1169
for bit in bit_iter:
1172
next_entries = _listdir(current)
1173
except OSError: # enoent, eperm, etc
1174
# We can't find this in the filesystem, so just append the
1176
current = pathjoin(current, bit, *list(bit_iter))
1178
for look in next_entries:
1179
if lbit == look.lower():
1180
current = pathjoin(current, look)
1183
# got to the end, nothing matched, so we just return the
1184
# non-existing bits as they were specified (the filename may be
1185
# the target of a move, for example).
1186
current = pathjoin(current, bit, *list(bit_iter))
1188
return current[len(abs_base):].lstrip('/')
1190
# XXX - TODO - we need better detection/integration of case-insensitive
1191
# file-systems; Linux often sees FAT32 devices (or NFS-mounted OSX
1192
# filesystems), for example, so could probably benefit from the same basic
1193
# support there. For now though, only Windows and OSX get that support, and
1194
# they get it for *all* file-systems!
1195
if sys.platform in ('win32', 'darwin'):
1196
canonical_relpath = _cicp_canonical_relpath
1198
canonical_relpath = relpath
1200
def canonical_relpaths(base, paths):
1201
"""Create an iterable to canonicalize a sequence of relative paths.
1203
The intent is for this implementation to use a cache, vastly speeding
1204
up multiple transformations in the same directory.
1206
# but for now, we haven't optimized...
1207
return [canonical_relpath(base, p) for p in paths]
787
1209
def safe_unicode(unicode_or_utf8_string):
788
1210
"""Coerce unicode_or_utf8_string into unicode.
790
1212
If it is unicode, it is returned.
791
Otherwise it is decoded from utf-8. If a decoding error
792
occurs, it is wrapped as a If the decoding fails, the exception is wrapped
793
as a BzrBadParameter exception.
1213
Otherwise it is decoded from utf-8. If decoding fails, the exception is
1214
wrapped in a BzrBadParameterNotUnicode exception.
795
1216
if isinstance(unicode_or_utf8_string, unicode):
796
1217
return unicode_or_utf8_string
798
1219
return unicode_or_utf8_string.decode('utf8')
799
1220
except UnicodeDecodeError:
800
raise BzrBadParameterNotUnicode(unicode_or_utf8_string)
1221
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1224
def safe_utf8(unicode_or_utf8_string):
1225
"""Coerce unicode_or_utf8_string to a utf8 string.
1227
If it is a str, it is returned.
1228
If it is Unicode, it is encoded into a utf-8 string.
1230
if isinstance(unicode_or_utf8_string, str):
1231
# TODO: jam 20070209 This is overkill, and probably has an impact on
1232
# performance if we are dealing with lots of apis that want a
1235
# Make sure it is a valid utf-8 string
1236
unicode_or_utf8_string.decode('utf-8')
1237
except UnicodeDecodeError:
1238
raise errors.BzrBadParameterNotUnicode(unicode_or_utf8_string)
1239
return unicode_or_utf8_string
1240
return unicode_or_utf8_string.encode('utf-8')
1243
_revision_id_warning = ('Unicode revision ids were deprecated in bzr 0.15.'
1244
' Revision id generators should be creating utf8'
1248
def safe_revision_id(unicode_or_utf8_string, warn=True):
1249
"""Revision ids should now be utf8, but at one point they were unicode.
1251
:param unicode_or_utf8_string: A possibly Unicode revision_id. (can also be
1253
:param warn: Functions that are sanitizing user data can set warn=False
1254
:return: None or a utf8 revision id.
1256
if (unicode_or_utf8_string is None
1257
or unicode_or_utf8_string.__class__ == str):
1258
return unicode_or_utf8_string
1260
symbol_versioning.warn(_revision_id_warning, DeprecationWarning,
1262
return cache_utf8.encode(unicode_or_utf8_string)
1265
_file_id_warning = ('Unicode file ids were deprecated in bzr 0.15. File id'
1266
' generators should be creating utf8 file ids.')
1269
def safe_file_id(unicode_or_utf8_string, warn=True):
1270
"""File ids should now be utf8, but at one point they were unicode.
1272
This is the same as safe_utf8, except it uses the cached encode functions
1273
to save a little bit of performance.
1275
:param unicode_or_utf8_string: A possibly Unicode file_id. (can also be
1277
:param warn: Functions that are sanitizing user data can set warn=False
1278
:return: None or a utf8 file id.
1280
if (unicode_or_utf8_string is None
1281
or unicode_or_utf8_string.__class__ == str):
1282
return unicode_or_utf8_string
1284
symbol_versioning.warn(_file_id_warning, DeprecationWarning,
1286
return cache_utf8.encode(unicode_or_utf8_string)
803
1289
_platform_normalizes_filenames = False
844
1330
normalized_filename = _inaccessible_normalized_filename
1333
default_terminal_width = 80
1334
"""The default terminal width for ttys.
1336
This is defined so that higher levels can share a common fallback value when
1337
terminal_width() returns None.
847
1341
def terminal_width():
848
"""Return estimated terminal width."""
1342
"""Return terminal width.
1344
None is returned if the width can't established precisely.
1347
# If BZR_COLUMNS is set, take it, user is always right
1349
return int(os.environ['BZR_COLUMNS'])
1350
except (KeyError, ValueError):
1353
isatty = getattr(sys.stdout, 'isatty', None)
1354
if isatty is None or not isatty():
1355
# Don't guess, setting BZR_COLUMNS is the recommended way to override.
849
1358
if sys.platform == 'win32':
850
import bzrlib.win32console
851
return bzrlib.win32console.get_console_size()[0]
1359
return win32utils.get_console_size(defaultx=None)[0]
854
1362
import struct, fcntl, termios
855
1363
s = struct.pack('HHHH', 0, 0, 0, 0)
856
1364
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
857
1365
width = struct.unpack('HHHH', x)[1]
1366
except (IOError, AttributeError):
1367
# If COLUMNS is set, take it
862
width = int(os.environ['COLUMNS'])
1369
return int(os.environ['COLUMNS'])
1370
except (KeyError, ValueError):
1374
# Consider invalid values as meaning no width
897
1419
def check_legal_path(path):
898
"""Check whether the supplied path is legal.
1420
"""Check whether the supplied path is legal.
899
1421
This is only required on Windows, so we don't test on other platforms
902
1424
if sys.platform != "win32":
904
1426
if _validWin32PathRE.match(path) is None:
905
raise IllegalPath(path)
1427
raise errors.IllegalPath(path)
1430
_WIN32_ERROR_DIRECTORY = 267 # Similar to errno.ENOTDIR
1432
def _is_error_enotdir(e):
1433
"""Check if this exception represents ENOTDIR.
1435
Unfortunately, python is very inconsistent about the exception
1436
here. The cases are:
1437
1) Linux, Mac OSX all versions seem to set errno == ENOTDIR
1438
2) Windows, Python2.4, uses errno == ERROR_DIRECTORY (267)
1439
which is the windows error code.
1440
3) Windows, Python2.5 uses errno == EINVAL and
1441
winerror == ERROR_DIRECTORY
1443
:param e: An Exception object (expected to be OSError with an errno
1444
attribute, but we should be able to cope with anything)
1445
:return: True if this represents an ENOTDIR error. False otherwise.
1447
en = getattr(e, 'errno', None)
1448
if (en == errno.ENOTDIR
1449
or (sys.platform == 'win32'
1450
and (en == _WIN32_ERROR_DIRECTORY
1451
or (en == errno.EINVAL
1452
and getattr(e, 'winerror', None) == _WIN32_ERROR_DIRECTORY)
908
1458
def walkdirs(top, prefix=""):
909
1459
"""Yield data about all the directories in a tree.
911
1461
This yields all the data about the contents of a directory at a time.
912
1462
After each directory has been yielded, if the caller has mutated the list
913
1463
to exclude some directories, they are then not descended into.
915
1465
The data yielded is of the form:
916
1466
((directory-relpath, directory-path-from-top),
917
[(relpath, basename, kind, lstat), ...]),
1467
[(relpath, basename, kind, lstat, path-from-top), ...]),
918
1468
- directory-relpath is the relative path of the directory being returned
919
1469
with respect to top. prefix is prepended to this.
920
- directory-path-from-root is the path including top for this directory.
1470
- directory-path-from-root is the path including top for this directory.
921
1471
It is suitable for use with os functions.
922
1472
- relpath is the relative path within the subtree being walked.
923
1473
- basename is the basename of the path
925
1475
present within the tree - but it may be recorded as versioned. See
927
1477
- lstat is the stat data *if* the file was statted.
928
- planned, not implemented:
1478
- planned, not implemented:
929
1479
path_from_tree_root is the path from the root of the tree.
931
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
1481
:param prefix: Prefix the relpaths that are yielded with 'prefix'. This
932
1482
allows one to walk a subtree but get paths that are relative to a tree
933
1483
rooted higher up.
934
1484
:return: an iterator over the dirs.
936
1486
#TODO there is a bit of a smell where the results of the directory-
937
# summary in this, and the path from the root, may not agree
1487
# summary in this, and the path from the root, may not agree
938
1488
# depending on top and prefix - i.e. ./foo and foo as a pair leads to
939
1489
# potentially confusing output. We should make this more robust - but
940
1490
# not at a speed cost. RBC 20060731
943
1492
_directory = _directory_kind
945
pending = [(prefix, "", _directory, None, top)]
1493
_listdir = os.listdir
1494
_kind_from_mode = file_kind_from_stat_mode
1495
pending = [(safe_unicode(prefix), "", _directory, None, safe_unicode(top))]
948
currentdir = pending.pop()
949
1497
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
952
relroot = currentdir[0] + '/'
1498
relroot, _, _, _, top = pending.pop()
1500
relprefix = relroot + u'/'
1503
top_slash = top + u'/'
1506
append = dirblock.append
1508
names = sorted(_listdir(top))
1510
if not _is_error_enotdir(e):
1514
abspath = top_slash + name
1515
statvalue = _lstat(abspath)
1516
kind = _kind_from_mode(statvalue.st_mode)
1517
append((relprefix + name, name, kind, statvalue, abspath))
1518
yield (relroot, top), dirblock
1520
# push the user specified dirs from dirblock
1521
pending.extend(d for d in reversed(dirblock) if d[2] == _directory)
1524
class DirReader(object):
1525
"""An interface for reading directories."""
1527
def top_prefix_to_starting_dir(self, top, prefix=""):
1528
"""Converts top and prefix to a starting dir entry
1530
:param top: A utf8 path
1531
:param prefix: An optional utf8 path to prefix output relative paths
1533
:return: A tuple starting with prefix, and ending with the native
1536
raise NotImplementedError(self.top_prefix_to_starting_dir)
1538
def read_dir(self, prefix, top):
1539
"""Read a specific dir.
1541
:param prefix: A utf8 prefix to be preprended to the path basenames.
1542
:param top: A natively encoded path to read.
1543
:return: A list of the directories contents. Each item contains:
1544
(utf8_relpath, utf8_name, kind, lstatvalue, native_abspath)
1546
raise NotImplementedError(self.read_dir)
1549
_selected_dir_reader = None
1552
def _walkdirs_utf8(top, prefix=""):
1553
"""Yield data about all the directories in a tree.
1555
This yields the same information as walkdirs() only each entry is yielded
1556
in utf-8. On platforms which have a filesystem encoding of utf8 the paths
1557
are returned as exact byte-strings.
1559
:return: yields a tuple of (dir_info, [file_info])
1560
dir_info is (utf8_relpath, path-from-top)
1561
file_info is (utf8_relpath, utf8_name, kind, lstat, path-from-top)
1562
if top is an absolute path, path-from-top is also an absolute path.
1563
path-from-top might be unicode or utf8, but it is the correct path to
1564
pass to os functions to affect the file in question. (such as os.lstat)
1566
global _selected_dir_reader
1567
if _selected_dir_reader is None:
1568
fs_encoding = _fs_enc.upper()
1569
if sys.platform == "win32" and win32utils.winver == 'Windows NT':
1570
# Win98 doesn't have unicode apis like FindFirstFileW
1571
# TODO: We possibly could support Win98 by falling back to the
1572
# original FindFirstFile, and using TCHAR instead of WCHAR,
1573
# but that gets a bit tricky, and requires custom compiling
1576
from bzrlib._walkdirs_win32 import Win32ReadDir
1577
_selected_dir_reader = Win32ReadDir()
1580
elif fs_encoding in ('UTF-8', 'US-ASCII', 'ANSI_X3.4-1968'):
1581
# ANSI_X3.4-1968 is a form of ASCII
1583
from bzrlib._readdir_pyx import UTF8DirReader
1584
_selected_dir_reader = UTF8DirReader()
1585
except ImportError, e:
1586
failed_to_load_extension(e)
1589
if _selected_dir_reader is None:
1590
# Fallback to the python version
1591
_selected_dir_reader = UnicodeDirReader()
1593
# 0 - relpath, 1- basename, 2- kind, 3- stat, 4-toppath
1594
# But we don't actually uses 1-3 in pending, so set them to None
1595
pending = [[_selected_dir_reader.top_prefix_to_starting_dir(top, prefix)]]
1596
read_dir = _selected_dir_reader.read_dir
1597
_directory = _directory_kind
1599
relroot, _, _, _, top = pending[-1].pop()
1602
dirblock = sorted(read_dir(relroot, top))
1603
yield (relroot, top), dirblock
1604
# push the user specified dirs from dirblock
1605
next = [d for d in reversed(dirblock) if d[2] == _directory]
1607
pending.append(next)
1610
class UnicodeDirReader(DirReader):
1611
"""A dir reader for non-utf8 file systems, which transcodes."""
1613
__slots__ = ['_utf8_encode']
1616
self._utf8_encode = codecs.getencoder('utf8')
1618
def top_prefix_to_starting_dir(self, top, prefix=""):
1619
"""See DirReader.top_prefix_to_starting_dir."""
1620
return (safe_utf8(prefix), None, None, None, safe_unicode(top))
1622
def read_dir(self, prefix, top):
1623
"""Read a single directory from a non-utf8 file system.
1625
top, and the abspath element in the output are unicode, all other paths
1626
are utf8. Local disk IO is done via unicode calls to listdir etc.
1628
This is currently the fallback code path when the filesystem encoding is
1629
not UTF-8. It may be better to implement an alternative so that we can
1630
safely handle paths that are not properly decodable in the current
1633
See DirReader.read_dir for details.
1635
_utf8_encode = self._utf8_encode
1637
_listdir = os.listdir
1638
_kind_from_mode = file_kind_from_stat_mode
1641
relprefix = prefix + '/'
1644
top_slash = top + u'/'
1647
append = dirblock.append
955
1648
for name in sorted(_listdir(top)):
956
abspath = top + '/' + name
957
statvalue = lstat(abspath)
958
dirblock.append((relroot + name, name,
959
file_kind_from_stat_mode(statvalue.st_mode),
961
yield (currentdir[0], top), dirblock
962
# push the user specified dirs from dirblock
963
for dir in reversed(dirblock):
964
if dir[2] == _directory:
1650
name_utf8 = _utf8_encode(name)[0]
1651
except UnicodeDecodeError:
1652
raise errors.BadFilenameEncoding(
1653
_utf8_encode(relprefix)[0] + name, _fs_enc)
1654
abspath = top_slash + name
1655
statvalue = _lstat(abspath)
1656
kind = _kind_from_mode(statvalue.st_mode)
1657
append((relprefix + name_utf8, name_utf8, kind, statvalue, abspath))
968
1661
def copy_tree(from_path, to_path, handlers={}):
969
1662
"""Copy all of the entries in from_path into to_path.
971
:param from_path: The base directory to copy.
1664
:param from_path: The base directory to copy.
972
1665
:param to_path: The target directory. If it does not exist, it will
974
1667
:param handlers: A dictionary of functions, which takes a source and
1025
1718
_cached_user_encoding = None
1028
def get_user_encoding():
1721
def get_user_encoding(use_cache=True):
1029
1722
"""Find out what the preferred user encoding is.
1031
1724
This is generally the encoding that is used for command line parameters
1032
1725
and file contents. This may be different from the terminal encoding
1033
1726
or the filesystem encoding.
1728
:param use_cache: Enable cache for detected encoding.
1729
(This parameter is turned on by default,
1730
and required only for selftesting)
1035
1732
:return: A string defining the preferred user encoding
1037
1734
global _cached_user_encoding
1038
if _cached_user_encoding is not None:
1735
if _cached_user_encoding is not None and use_cache:
1039
1736
return _cached_user_encoding
1041
1738
if sys.platform == 'darwin':
1042
# work around egregious python 2.4 bug
1739
# python locale.getpreferredencoding() always return
1740
# 'mac-roman' on darwin. That's a lie.
1043
1741
sys.platform = 'posix'
1743
if os.environ.get('LANG', None) is None:
1744
# If LANG is not set, we end up with 'ascii', which is bad
1745
# ('mac-roman' is more than ascii), so we set a default which
1746
# will give us UTF-8 (which appears to work in all cases on
1747
# OSX). Users are still free to override LANG of course, as
1748
# long as it give us something meaningful. This work-around
1749
# *may* not be needed with python 3k and/or OSX 10.5, but will
1750
# work with them too -- vila 20080908
1751
os.environ['LANG'] = 'en_US.UTF-8'
1047
1754
sys.platform = 'darwin'
1057
1764
' doesn\'t support the locale set by $LANG (%s)\n'
1058
1765
" Continuing with ascii encoding.\n"
1059
1766
% (e, os.environ.get('LANG')))
1061
if _cached_user_encoding is None:
1062
_cached_user_encoding = 'ascii'
1063
return _cached_user_encoding
1767
user_encoding = 'ascii'
1769
# Windows returns 'cp0' to indicate there is no code page. So we'll just
1770
# treat that as ASCII, and not support printing unicode characters to the
1773
# For python scripts run under vim, we get '', so also treat that as ASCII
1774
if user_encoding in (None, 'cp0', ''):
1775
user_encoding = 'ascii'
1779
codecs.lookup(user_encoding)
1781
sys.stderr.write('bzr: warning:'
1782
' unknown encoding %s.'
1783
' Continuing with ascii encoding.\n'
1786
user_encoding = 'ascii'
1789
_cached_user_encoding = user_encoding
1791
return user_encoding
1794
def get_host_name():
1795
"""Return the current unicode host name.
1797
This is meant to be used in place of socket.gethostname() because that
1798
behaves inconsistently on different platforms.
1800
if sys.platform == "win32":
1802
return win32utils.get_host_name()
1805
return socket.gethostname().decode(get_user_encoding())
1808
def recv_all(socket, bytes):
1809
"""Receive an exact number of bytes.
1811
Regular Socket.recv() may return less than the requested number of bytes,
1812
dependning on what's in the OS buffer. MSG_WAITALL is not available
1813
on all platforms, but this should work everywhere. This will return
1814
less than the requested amount if the remote end closes.
1816
This isn't optimized and is intended mostly for use in testing.
1819
while len(b) < bytes:
1820
new = until_no_eintr(socket.recv, bytes - len(b))
1827
def send_all(socket, bytes, report_activity=None):
1828
"""Send all bytes on a socket.
1830
Regular socket.sendall() can give socket error 10053 on Windows. This
1831
implementation sends no more than 64k at a time, which avoids this problem.
1833
:param report_activity: Call this as bytes are read, see
1834
Transport._report_activity
1837
for pos in xrange(0, len(bytes), chunk_size):
1838
block = bytes[pos:pos+chunk_size]
1839
if report_activity is not None:
1840
report_activity(len(block), 'write')
1841
until_no_eintr(socket.sendall, block)
1844
def dereference_path(path):
1845
"""Determine the real path to a file.
1847
All parent elements are dereferenced. But the file itself is not
1849
:param path: The original path. May be absolute or relative.
1850
:return: the real path *to* the file
1852
parent, base = os.path.split(path)
1853
# The pathjoin for '.' is a workaround for Python bug #1213894.
1854
# (initial path components aren't dereferenced)
1855
return pathjoin(realpath(pathjoin('.', parent)), base)
1858
def supports_mapi():
1859
"""Return True if we can use MAPI to launch a mail client."""
1860
return sys.platform == "win32"
1863
def resource_string(package, resource_name):
1864
"""Load a resource from a package and return it as a string.
1866
Note: Only packages that start with bzrlib are currently supported.
1868
This is designed to be a lightweight implementation of resource
1869
loading in a way which is API compatible with the same API from
1871
http://peak.telecommunity.com/DevCenter/PkgResources#basic-resource-access.
1872
If and when pkg_resources becomes a standard library, this routine
1875
# Check package name is within bzrlib
1876
if package == "bzrlib":
1877
resource_relpath = resource_name
1878
elif package.startswith("bzrlib."):
1879
package = package[len("bzrlib."):].replace('.', os.sep)
1880
resource_relpath = pathjoin(package, resource_name)
1882
raise errors.BzrError('resource package %s not in bzrlib' % package)
1884
# Map the resource to a file and read its contents
1885
base = dirname(bzrlib.__file__)
1886
if getattr(sys, 'frozen', None): # bzr.exe
1887
base = abspath(pathjoin(base, '..', '..'))
1888
filename = pathjoin(base, resource_relpath)
1889
return open(filename, 'rU').read()
1892
def file_kind_from_stat_mode_thunk(mode):
1893
global file_kind_from_stat_mode
1894
if file_kind_from_stat_mode is file_kind_from_stat_mode_thunk:
1896
from bzrlib._readdir_pyx import UTF8DirReader
1897
file_kind_from_stat_mode = UTF8DirReader().kind_from_mode
1898
except ImportError, e:
1899
# This is one time where we won't warn that an extension failed to
1900
# load. The extension is never available on Windows anyway.
1901
from bzrlib._readdir_py import (
1902
_kind_from_mode as file_kind_from_stat_mode
1904
return file_kind_from_stat_mode(mode)
1905
file_kind_from_stat_mode = file_kind_from_stat_mode_thunk
1908
def file_kind(f, _lstat=os.lstat):
1910
return file_kind_from_stat_mode(_lstat(f).st_mode)
1912
if getattr(e, 'errno', None) in (errno.ENOENT, errno.ENOTDIR):
1913
raise errors.NoSuchFile(f)
1917
def until_no_eintr(f, *a, **kw):
1918
"""Run f(*a, **kw), retrying if an EINTR error occurs."""
1919
# Borrowed from Twisted's twisted.python.util.untilConcludes function.
1923
except (IOError, OSError), e:
1924
if e.errno == errno.EINTR:
1928
def re_compile_checked(re_string, flags=0, where=""):
1929
"""Return a compiled re, or raise a sensible error.
1931
This should only be used when compiling user-supplied REs.
1933
:param re_string: Text form of regular expression.
1934
:param flags: eg re.IGNORECASE
1935
:param where: Message explaining to the user the context where
1936
it occurred, eg 'log search filter'.
1938
# from https://bugs.launchpad.net/bzr/+bug/251352
1940
re_obj = re.compile(re_string, flags)
1945
where = ' in ' + where
1946
# despite the name 'error' is a type
1947
raise errors.BzrCommandError('Invalid regular expression%s: %r: %s'
1948
% (where, re_string, e))
1951
if sys.platform == "win32":
1954
return msvcrt.getch()
1959
fd = sys.stdin.fileno()
1960
settings = termios.tcgetattr(fd)
1963
ch = sys.stdin.read(1)
1965
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
1969
if sys.platform == 'linux2':
1970
def _local_concurrency():
1972
prefix = 'processor'
1973
for line in file('/proc/cpuinfo', 'rb'):
1974
if line.startswith(prefix):
1975
concurrency = int(line[line.find(':')+1:]) + 1
1977
elif sys.platform == 'darwin':
1978
def _local_concurrency():
1979
return subprocess.Popen(['sysctl', '-n', 'hw.availcpu'],
1980
stdout=subprocess.PIPE).communicate()[0]
1981
elif sys.platform[0:7] == 'freebsd':
1982
def _local_concurrency():
1983
return subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
1984
stdout=subprocess.PIPE).communicate()[0]
1985
elif sys.platform == 'sunos5':
1986
def _local_concurrency():
1987
return subprocess.Popen(['psrinfo', '-p',],
1988
stdout=subprocess.PIPE).communicate()[0]
1989
elif sys.platform == "win32":
1990
def _local_concurrency():
1991
# This appears to return the number of cores.
1992
return os.environ.get('NUMBER_OF_PROCESSORS')
1994
def _local_concurrency():
1999
_cached_local_concurrency = None
2001
def local_concurrency(use_cache=True):
2002
"""Return how many processes can be run concurrently.
2004
Rely on platform specific implementations and default to 1 (one) if
2005
anything goes wrong.
2007
global _cached_local_concurrency
2009
if _cached_local_concurrency is not None and use_cache:
2010
return _cached_local_concurrency
2012
concurrency = os.environ.get('BZR_CONCURRENCY', None)
2013
if concurrency is None:
2015
concurrency = _local_concurrency()
2016
except (OSError, IOError):
2019
concurrency = int(concurrency)
2020
except (TypeError, ValueError):
2023
_cached_concurrency = concurrency