1
# Copyright (C) 2005, 2006 Canonical Ltd
1
# (C) 2005 Canonical Ltd
3
3
# This program is free software; you can redistribute it and/or modify
4
4
# it under the terms of the GNU General Public License as published by
5
5
# the Free Software Foundation; either version 2 of the License, or
6
6
# (at your option) any later version.
8
8
# This program is distributed in the hope that it will be useful,
9
9
# but WITHOUT ANY WARRANTY; without even the implied warranty of
10
10
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
11
# GNU General Public License for more details.
13
13
# You should have received a copy of the GNU General Public License
14
14
# along with this program; if not, write to the Free Software
15
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
15
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17
# TODO: Up-front, stat all files in order and remove those which are deleted or
18
# out-of-date. Don't actually re-read them until they're needed. That ought
19
# to bring all the inodes into core so that future stats to them are fast, and
17
# TODO: Up-front, stat all files in order and remove those which are deleted or
18
# out-of-date. Don't actually re-read them until they're needed. That ought
19
# to bring all the inodes into core so that future stats to them are fast, and
20
20
# it preserves the nice property that any caller will always get up-to-date
21
21
# data except in unavoidable cases.
30
30
CACHE_HEADER = "### bzr hashcache v5\n"
39
filters as _mod_filters,
35
from bzrlib.osutils import sha_file, pathjoin
36
from bzrlib.trace import mutter, warning
37
from bzrlib.atomicfile import AtomicFile
38
from bzrlib.errors import BzrError
45
41
FP_MTIME_COLUMN = 1
46
42
FP_CTIME_COLUMN = 2
45
def _fingerprint(abspath):
47
fs = os.lstat(abspath)
49
# might be missing, etc
52
if stat.S_ISDIR(fs.st_mode):
55
# we discard any high precision because it's not reliable; perhaps we
56
# could do better on some systems?
57
return (fs.st_size, long(fs.st_mtime),
58
long(fs.st_ctime), fs.st_ino, fs.st_dev, fs.st_mode)
51
61
class HashCache(object):
79
89
number of times files have been retrieved from the cache, avoiding a
83
93
number of misses (times files have been completely re-read)
85
95
needs_write = False
87
def __init__(self, root, cache_file_name, mode=None,
88
content_filter_stack_provider=None):
89
"""Create a hash cache in base dir, and set the file mode to mode.
91
:param content_filter_stack_provider: a function that takes a
92
path (relative to the top of the tree) and a file-id as
93
parameters and returns a stack of ContentFilters.
94
If None, no content filtering is performed.
96
self.root = osutils.safe_unicode(root)
97
self.root_utf8 = self.root.encode('utf8') # where is the filesystem encoding ?
97
def __init__(self, basedir, mode=None):
98
"""Create a hash cache in base dir, and set the file mode to mode."""
99
self.basedir = basedir
98
100
self.hit_count = 0
99
101
self.miss_count = 0
100
102
self.stat_count = 0
103
105
self.update_count = 0
105
107
self._mode = mode
106
self._cache_file_name = osutils.safe_unicode(cache_file_name)
107
self._filter_provider = content_filter_stack_provider
109
109
def cache_file_name(self):
110
return self._cache_file_name
110
# FIXME: duplicate path logic here, this should be
111
# something like 'branch.controlfile'.
112
return pathjoin(self.basedir, '.bzr', 'stat-cache')
113
115
"""Discard all cached information.
117
119
self.needs_write = True
121
124
"""Scan all files and remove entries where the cache entry is obsolete.
123
126
Obsolete entries are those where the file has been modified or deleted
124
since the entry was inserted.
127
since the entry was inserted.
126
129
# FIXME optimisation opportunity, on linux [and check other oses]:
127
130
# rather than iteritems order, stat in inode order.
128
131
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()]
131
134
for inum, path, cache_entry in prep:
132
abspath = osutils.pathjoin(self.root, path)
133
fp = self._fingerprint(abspath)
135
abspath = pathjoin(self.basedir, path)
136
fp = _fingerprint(abspath)
134
137
self.stat_count += 1
136
139
cache_fp = cache_entry[1]
138
141
if (not fp) or (cache_fp != fp):
139
142
# not here or not a regular file anymore
140
143
self.removed_count += 1
141
144
self.needs_write = True
142
145
del self._cache[path]
144
def get_sha1(self, path, stat_value=None):
148
def get_sha1(self, path):
145
149
"""Return the sha1 of a file.
147
if path.__class__ is str:
148
abspath = osutils.pathjoin(self.root_utf8, path)
150
abspath = osutils.pathjoin(self.root, path)
151
abspath = pathjoin(self.basedir, path)
151
152
self.stat_count += 1
152
file_fp = self._fingerprint(abspath, stat_value)
153
file_fp = _fingerprint(abspath)
155
156
# not a regular file or not existing
156
157
if path in self._cache:
157
158
self.removed_count += 1
158
159
self.needs_write = True
159
160
del self._cache[path]
162
163
if path in self._cache:
163
164
cache_sha1, cache_fp = self._cache[path]
165
166
cache_sha1, cache_fp = None, None
167
168
if cache_fp == file_fp:
168
## mutter("hashcache hit for %s %r -> %s", path, file_fp, cache_sha1)
169
## mutter("now = %s", time.time())
170
169
self.hit_count += 1
171
170
return cache_sha1
173
172
self.miss_count += 1
175
175
mode = file_fp[FP_MODE_COLUMN]
176
176
if stat.S_ISREG(mode):
177
if self._filter_provider is None:
180
filters = self._filter_provider(path=path, file_id=None)
181
digest = self._really_sha1_file(abspath, filters)
177
digest = sha_file(file(abspath, 'rb', buffering=65000))
182
178
elif stat.S_ISLNK(mode):
183
target = osutils.readlink(osutils.safe_unicode(abspath))
184
digest = osutils.sha_string(target.encode('UTF-8'))
179
digest = sha.new(os.readlink(abspath)).hexdigest()
186
raise errors.BzrError("file %r: unknown file stat mode: %o"
181
raise BzrError("file %r: unknown file stat mode: %o"%(abspath,mode))
189
# window of 3 seconds to allow for 2s resolution on windows,
190
# unsynchronized file servers, etc.
191
cutoff = self._cutoff_time()
192
if file_fp[FP_MTIME_COLUMN] >= cutoff \
193
or file_fp[FP_CTIME_COLUMN] >= cutoff:
183
now = int(time.time())
184
if file_fp[FP_MTIME_COLUMN] >= now or file_fp[FP_CTIME_COLUMN] >= now:
194
185
# changed too recently; can't be cached. we can
195
186
# return the result and it could possibly be cached
202
193
# need to let sufficient time elapse before we may cache this entry
203
194
# again. If we didn't do this, then, for example, a very quick 1
204
195
# byte replacement in the file might go undetected.
205
## mutter('%r modified too recently; not caching', path)
206
self.danger_count += 1
196
self.danger_count += 1
208
198
self.removed_count += 1
209
199
self.needs_write = True
210
200
del self._cache[path]
212
## mutter('%r added to cache: now=%f, mtime=%d, ctime=%d',
213
## path, time.time(), file_fp[FP_MTIME_COLUMN],
214
## file_fp[FP_CTIME_COLUMN])
215
202
self.update_count += 1
216
203
self.needs_write = True
217
204
self._cache[path] = (digest, file_fp)
220
def _really_sha1_file(self, abspath, filters):
221
"""Calculate the SHA1 of a file by reading the full text"""
222
return _mod_filters.internal_size_sha_file_byname(abspath, filters)[1]
225
208
"""Write contents of cache to file."""
226
outf = atomicfile.AtomicFile(self.cache_file_name(), 'wb',
209
outf = AtomicFile(self.cache_file_name(), 'wb', new_mode=self._mode)
229
outf.write(CACHE_HEADER)
211
print >>outf, CACHE_HEADER,
231
213
for path, c in self._cache.iteritems():
232
line_info = [path.encode('utf-8'), '// ', c[0], ' ']
233
line_info.append(' '.join([str(fld) for fld in c[1]]))
234
line_info.append('\n')
235
outf.write(''.join(line_info))
214
assert '//' not in path, path
215
outf.write(path.encode('utf-8'))
217
print >>outf, c[0], # hex sha1
219
print >>outf, "%d" % fld,
237
223
self.needs_write = False
238
## mutter("write hash cache: %s hits=%d misses=%d stat=%d recent=%d updates=%d",
239
## self.cache_file_name(), self.hit_count, self.miss_count,
241
## self.danger_count, self.update_count)
246
229
"""Reinstate cache from file.
248
231
Overwrites existing cache.
250
If the cache file has the wrong version marker, this just clears
233
If the cache file has the wrong version marker, this just clears
256
239
inf = file(fn, 'rb', buffering=65000)
257
240
except IOError, e:
258
trace.mutter("failed to open %s: %s", fn, e)
241
mutter("failed to open %s: %s", fn, e)
259
242
# better write it now so it is valid
260
243
self.needs_write = True
263
247
hdr = inf.readline()
264
248
if hdr != CACHE_HEADER:
265
trace.mutter('cache header marker not found at top of %s;'
266
' discarding cache', fn)
249
mutter('cache header marker not found at top of %s;'
250
' discarding cache', fn)
267
251
self.needs_write = True
271
255
pos = l.index('// ')
272
256
path = l[:pos].decode('utf-8')
273
257
if path in self._cache:
274
trace.warning('duplicated path %r in cache' % path)
258
warning('duplicated path %r in cache' % path)
278
262
fields = l[pos:].split(' ')
279
263
if len(fields) != 7:
280
trace.warning("bad line in hashcache: %r" % l)
264
warning("bad line in hashcache: %r" % l)
284
268
if len(sha1) != 40:
285
trace.warning("bad sha1 in hashcache: %r" % sha1)
269
warning("bad sha1 in hashcache: %r" % sha1)
288
272
fp = tuple(map(long, fields[1:]))
290
274
self._cache[path] = (sha1, fp)
292
276
self.needs_write = False
294
def _cutoff_time(self):
295
"""Return cutoff time.
297
Files modified more recently than this time are at risk of being
298
undetectably modified and so can't be cached.
300
return int(time.time()) - 3
302
def _fingerprint(self, abspath, stat_value=None):
303
if stat_value is None:
305
stat_value = os.lstat(abspath)
307
# might be missing, etc
309
if stat.S_ISDIR(stat_value.st_mode):
311
# we discard any high precision because it's not reliable; perhaps we
312
# could do better on some systems?
313
return (stat_value.st_size, long(stat_value.st_mtime),
314
long(stat_value.st_ctime), stat_value.st_ino,
315
stat_value.st_dev, stat_value.st_mode)