103
103
from bzrlib.transport.local import LocalTransport
104
104
import bzrlib.tree
105
105
from bzrlib.progress import DummyProgress, ProgressPhase
106
from bzrlib.revision import NULL_REVISION
106
from bzrlib.revision import NULL_REVISION, CURRENT_REVISION
107
107
import bzrlib.revisiontree
108
108
from bzrlib.rio import RioReader, rio_file, Stanza
109
109
from bzrlib.symbol_versioning import (deprecated_passed,
233
233
self._set_inventory(wt._inventory, dirty=False)
234
234
self._format = wt._format
235
235
self.bzrdir = wt.bzrdir
236
from bzrlib.hashcache import HashCache
237
from bzrlib.trace import note, mutter
238
236
assert isinstance(basedir, basestring), \
239
237
"base directory %r is not a string" % basedir
240
238
basedir = safe_unicode(basedir)
268
266
# cache file, and have the parser take the most recent entry for a
269
267
# given path only.
270
268
cache_filename = self.bzrdir.get_workingtree_transport(None).local_abspath('stat-cache')
271
hc = self._hashcache = HashCache(basedir, cache_filename, self._control_files._file_mode)
269
self._hashcache = hashcache.HashCache(basedir, cache_filename,
270
self._control_files._file_mode)
273
273
# is this scan needed ? it makes things kinda slow.
469
469
def get_file_byname(self, filename):
470
470
return file(self.abspath(filename), 'rb')
472
def annotate_iter(self, file_id):
473
"""See Tree.annotate_iter
475
This implementation will use the basis tree implementation if possible.
476
Lines not in the basis are attributed to CURRENT_REVISION
478
If there are pending merges, lines added by those merges will be
479
incorrectly attributed to CURRENT_REVISION (but after committing, the
480
attribution will be correct).
482
basis = self.basis_tree()
483
changes = self._iter_changes(basis, True, [file_id]).next()
484
changed_content, kind = changes[2], changes[6]
485
if not changed_content:
486
return basis.annotate_iter(file_id)
490
if kind[0] != 'file':
493
old_lines = list(basis.annotate_iter(file_id))
495
for tree in self.branch.repository.revision_trees(
496
self.get_parent_ids()[1:]):
497
if file_id not in tree:
499
old.append(list(tree.annotate_iter(file_id)))
500
return annotate.reannotate(old, self.get_file(file_id).readlines(),
472
503
def get_parent_ids(self):
473
504
"""See Tree.get_parent_ids.
555
586
return os.path.getsize(self.id2abspath(file_id))
558
def get_file_sha1(self, file_id, path=None):
589
def get_file_sha1(self, file_id, path=None, stat_value=None):
560
591
path = self._inventory.id2path(file_id)
561
return self._hashcache.get_sha1(path)
592
return self._hashcache.get_sha1(path, stat_value)
563
594
def get_file_mtime(self, file_id, path=None):
1223
1254
subp = pathjoin(path, subf)
1226
def _translate_ignore_rule(self, rule):
1227
"""Translate a single ignore rule to a regex.
1229
There are two types of ignore rules. Those that do not contain a / are
1230
matched against the tail of the filename (that is, they do not care
1231
what directory the file is in.) Rules which do contain a slash must
1232
match the entire path. As a special case, './' at the start of the
1233
string counts as a slash in the string but is removed before matching
1234
(e.g. ./foo.c, ./src/foo.c)
1236
:return: The translated regex.
1238
if rule[:2] in ('./', '.\\'):
1240
result = fnmatch.translate(rule[2:])
1241
elif '/' in rule or '\\' in rule:
1243
result = fnmatch.translate(rule)
1245
# default rule style.
1246
result = "(?:.*/)?(?!.*/)" + fnmatch.translate(rule)
1247
assert result[-1] == '$', "fnmatch.translate did not add the expected $"
1248
return "(" + result + ")"
1250
def _combine_ignore_rules(self, rules):
1251
"""Combine a list of ignore rules into a single regex object.
1253
Each individual rule is combined with | to form a big regex, which then
1254
has $ added to it to form something like ()|()|()$. The group index for
1255
each subregex's outermost group is placed in a dictionary mapping back
1256
to the rule. This allows quick identification of the matching rule that
1258
:return: a list of the compiled regex and the matching-group index
1259
dictionaries. We return a list because python complains if you try to
1260
combine more than 100 regexes.
1265
translated_rules = []
1267
translated_rule = self._translate_ignore_rule(rule)
1268
compiled_rule = re.compile(translated_rule)
1269
groups[next_group] = rule
1270
next_group += compiled_rule.groups
1271
translated_rules.append(translated_rule)
1272
if next_group == 99:
1273
result.append((re.compile("|".join(translated_rules)), groups))
1276
translated_rules = []
1277
if len(translated_rules):
1278
result.append((re.compile("|".join(translated_rules)), groups))
1281
1258
def ignored_files(self):
1282
1259
"""Yield list of PATH, IGNORE_PATTERN"""
1297
1274
ignore_globs = set(bzrlib.DEFAULT_IGNORE)
1298
1275
ignore_globs.update(ignores.get_runtime_ignores())
1300
1276
ignore_globs.update(ignores.get_user_ignores())
1302
1277
if self.has_filename(bzrlib.IGNORE_FILENAME):
1303
1278
f = self.get_file_byname(bzrlib.IGNORE_FILENAME)
1305
1280
ignore_globs.update(ignores.parse_ignore_file(f))
1309
1283
self._ignoreset = ignore_globs
1310
self._ignore_regex = self._combine_ignore_rules(ignore_globs)
1311
1284
return ignore_globs
1313
def _get_ignore_rules_as_regex(self):
1314
"""Return a regex of the ignore rules and a mapping dict.
1316
:return: (ignore rules compiled regex, dictionary mapping rule group
1317
indices to original rule.)
1319
if getattr(self, '_ignoreset', None) is None:
1320
self.get_ignore_list()
1321
return self._ignore_regex
1286
def _flush_ignore_list_cache(self):
1287
"""Resets the cached ignore list to force a cache rebuild."""
1288
self._ignoreset = None
1289
self._ignoreglobster = None
1323
1291
def is_ignored(self, filename):
1324
1292
r"""Check whether the filename matches an ignore pattern.
1329
1297
If the file is ignored, returns the pattern which caused it to
1330
1298
be ignored, otherwise None. So this can simply be used as a
1331
1299
boolean if desired."""
1333
# TODO: Use '**' to match directories, and other extended
1334
# globbing stuff from cvs/rsync.
1336
# XXX: fnmatch is actually not quite what we want: it's only
1337
# approximately the same as real Unix fnmatch, and doesn't
1338
# treat dotfiles correctly and allows * to match /.
1339
# Eventually it should be replaced with something more
1342
rules = self._get_ignore_rules_as_regex()
1343
for regex, mapping in rules:
1344
match = regex.match(filename)
1345
if match is not None:
1346
# one or more of the groups in mapping will have a non-None
1348
groups = match.groups()
1349
rules = [mapping[group] for group in
1350
mapping if groups[group] is not None]
1300
if getattr(self, '_ignoreglobster', None) is None:
1301
self._ignoreglobster = globbing.Globster(self.get_ignore_list())
1302
return self._ignoreglobster.match(filename)
1354
1304
def kind(self, file_id):
1355
1305
return file_kind(self.id2abspath(file_id))
1307
def _comparison_data(self, entry, path):
1308
abspath = self.abspath(path)
1310
stat_value = os.lstat(abspath)
1312
if getattr(e, 'errno', None) == errno.ENOENT:
1319
mode = stat_value.st_mode
1320
kind = osutils.file_kind_from_stat_mode(mode)
1321
if not supports_executable():
1322
executable = entry.executable
1324
executable = bool(stat.S_ISREG(mode) and stat.S_IEXEC & mode)
1325
return kind, executable, stat_value
1327
def _file_size(self, entry, stat_value):
1328
return stat_value.st_size
1357
1330
def last_revision(self):
1358
1331
"""Return the last revision of the branch for this tree.
1726
1699
this_tree=self)
1702
def _write_hashcache_if_dirty(self):
1703
"""Write out the hashcache if it is dirty."""
1704
if self._hashcache.needs_write:
1706
self._hashcache.write()
1708
if e.errno not in (errno.EPERM, errno.EACCES):
1710
# TODO: jam 20061219 Should this be a warning? A single line
1711
# warning might be sufficient to let the user know what
1713
mutter('Could not write hashcache for %s\nError: %s',
1714
self._hashcache.cache_file_name(), e)
1729
1716
@needs_tree_write_lock
1730
1717
def _write_inventory(self, inv):
1731
1718
"""Write inventory as the current inventory."""
1792
1779
# _inventory_is_modified is always False during a read lock.
1793
1780
if self._inventory_is_modified:
1795
if self._hashcache.needs_write:
1796
self._hashcache.write()
1782
self._write_hashcache_if_dirty()
1797
1784
# reverse order of locking.
1799
1786
return self._control_files.unlock()
1861
1848
# _inventory_is_modified is always False during a read lock.
1862
1849
if self._inventory_is_modified:
1864
if self._hashcache.needs_write:
1865
self._hashcache.write()
1851
self._write_hashcache_if_dirty()
1866
1852
# reverse order of locking.
1868
1854
return self._control_files.unlock()