bzr branch
http://gegoxaren.bato24.eu/bzr/brz/remove-bazaar
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
1  | 
# (C) 2005 Canonical Ltd
 | 
2  | 
||
3  | 
# This program is free software; you can redistribute it and/or modify
 | 
|
4  | 
# it under the terms of the GNU General Public License as published by
 | 
|
5  | 
# the Free Software Foundation; either version 2 of the License, or
 | 
|
6  | 
# (at your option) any later version.
 | 
|
7  | 
||
8  | 
# This program is distributed in the hope that it will be useful,
 | 
|
9  | 
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
|
10  | 
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
|
11  | 
# GNU General Public License for more details.
 | 
|
12  | 
||
13  | 
# You should have received a copy of the GNU General Public License
 | 
|
14  | 
# along with this program; if not, write to the Free Software
 | 
|
15  | 
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 | 
|
16  | 
||
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
17  | 
# TODO: Up-front, stat all files in order and remove those which are deleted or 
 | 
18  | 
# out-of-date.  Don't actually re-read them until they're needed.  That ought 
 | 
|
19  | 
# to bring all the inodes into core so that future stats to them are fast, and 
 | 
|
20  | 
# it preserves the nice property that any caller will always get up-to-date
 | 
|
21  | 
# data except in unavoidable cases.
 | 
|
| 
864
by Martin Pool
 doc  | 
22  | 
|
23  | 
# TODO: Perhaps return more details on the file to avoid statting it
 | 
|
24  | 
# again: nonexistent, file type, size, etc
 | 
|
25  | 
||
| 
1213
by Martin Pool
 - move import in hashcache  | 
26  | 
# TODO: Perhaps use a Python pickle instead of a text file; might be faster.
 | 
27  | 
||
| 
864
by Martin Pool
 doc  | 
28  | 
|
29  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
30  | 
CACHE_HEADER = "### bzr hashcache v5\n"  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
31  | 
|
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
32  | 
import os, stat, time  | 
33  | 
||
34  | 
from bzrlib.osutils import sha_file  | 
|
35  | 
from bzrlib.trace import mutter, warning  | 
|
| 
1213
by Martin Pool
 - move import in hashcache  | 
36  | 
from bzrlib.atomicfile import AtomicFile  | 
37  | 
||
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
38  | 
|
39  | 
||
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
40  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
41  | 
def _fingerprint(abspath):  | 
42  | 
try:  | 
|
43  | 
fs = os.lstat(abspath)  | 
|
44  | 
except OSError:  | 
|
45  | 
        # might be missing, etc
 | 
|
46  | 
return None  | 
|
47  | 
||
48  | 
if stat.S_ISDIR(fs.st_mode):  | 
|
49  | 
return None  | 
|
50  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
51  | 
    # we discard any high precision because it's not reliable; perhaps we
 | 
52  | 
    # could do better on some systems?
 | 
|
53  | 
return (fs.st_size, long(fs.st_mtime),  | 
|
54  | 
long(fs.st_ctime), fs.st_ino, fs.st_dev)  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
55  | 
|
56  | 
||
57  | 
class HashCache(object):  | 
|
58  | 
"""Cache for looking up file SHA-1.  | 
|
59  | 
||
60  | 
    Files are considered to match the cached value if the fingerprint
 | 
|
61  | 
    of the file has not changed.  This includes its mtime, ctime,
 | 
|
62  | 
    device number, inode number, and size.  This should catch
 | 
|
63  | 
    modifications or replacement of the file by a new one.
 | 
|
64  | 
||
65  | 
    This may not catch modifications that do not change the file's
 | 
|
66  | 
    size and that occur within the resolution window of the
 | 
|
67  | 
    timestamps.  To handle this we specifically do not cache files
 | 
|
68  | 
    which have changed since the start of the present second, since
 | 
|
69  | 
    they could undetectably change again.
 | 
|
70  | 
||
71  | 
    This scheme may fail if the machine's clock steps backwards.
 | 
|
72  | 
    Don't do that.
 | 
|
73  | 
||
74  | 
    This does not canonicalize the paths passed in; that should be
 | 
|
75  | 
    done by the caller.
 | 
|
76  | 
||
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
77  | 
    _cache
 | 
78  | 
        Indexed by path, points to a two-tuple of the SHA-1 of the file.
 | 
|
79  | 
        and its fingerprint.
 | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
80  | 
|
81  | 
    stat_count
 | 
|
82  | 
        number of times files have been statted
 | 
|
83  | 
||
84  | 
    hit_count
 | 
|
85  | 
        number of times files have been retrieved from the cache, avoiding a
 | 
|
86  | 
        re-read
 | 
|
87  | 
        
 | 
|
88  | 
    miss_count
 | 
|
89  | 
        number of misses (times files have been completely re-read)
 | 
|
90  | 
    """
 | 
|
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
91  | 
needs_write = False  | 
92  | 
||
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
93  | 
def __init__(self, basedir):  | 
94  | 
self.basedir = basedir  | 
|
95  | 
self.hit_count = 0  | 
|
96  | 
self.miss_count = 0  | 
|
97  | 
self.stat_count = 0  | 
|
98  | 
self.danger_count = 0  | 
|
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
99  | 
self.removed_count = 0  | 
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
100  | 
self.update_count = 0  | 
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
101  | 
self._cache = {}  | 
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
102  | 
|
103  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
104  | 
def cache_file_name(self):  | 
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
105  | 
return os.sep.join([self.basedir, '.bzr', 'stat-cache'])  | 
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
106  | 
|
107  | 
||
108  | 
||
109  | 
||
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
110  | 
def clear(self):  | 
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
111  | 
"""Discard all cached information.  | 
112  | 
||
113  | 
        This does not reset the counters."""
 | 
|
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
114  | 
if self._cache:  | 
115  | 
self.needs_write = True  | 
|
116  | 
self._cache = {}  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
117  | 
|
118  | 
||
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
119  | 
def scan(self):  | 
120  | 
"""Scan all files and remove entries where the cache entry is obsolete.  | 
|
121  | 
        
 | 
|
122  | 
        Obsolete entries are those where the file has been modified or deleted
 | 
|
123  | 
        since the entry was inserted.        
 | 
|
124  | 
        """
 | 
|
125  | 
prep = [(ce[1][3], path, ce) for (path, ce) in self._cache.iteritems()]  | 
|
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
126  | 
prep.sort()  | 
127  | 
||
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
128  | 
for inum, path, cache_entry in prep:  | 
129  | 
abspath = os.sep.join([self.basedir, path])  | 
|
130  | 
fp = _fingerprint(abspath)  | 
|
131  | 
self.stat_count += 1  | 
|
132  | 
||
133  | 
cache_fp = cache_entry[1]  | 
|
134  | 
||
135  | 
if (not fp) or (cache_fp != fp):  | 
|
136  | 
                # not here or not a regular file anymore
 | 
|
137  | 
self.removed_count += 1  | 
|
138  | 
self.needs_write = True  | 
|
139  | 
del self._cache[path]  | 
|
140  | 
||
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
141  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
142  | 
def get_sha1(self, path):  | 
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
143  | 
"""Return the sha1 of a file.  | 
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
144  | 
        """
 | 
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
145  | 
abspath = os.sep.join([self.basedir, path])  | 
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
146  | 
self.stat_count += 1  | 
147  | 
file_fp = _fingerprint(abspath)  | 
|
148  | 
||
149  | 
if not file_fp:  | 
|
150  | 
            # not a regular file or not existing
 | 
|
151  | 
if path in self._cache:  | 
|
152  | 
self.removed_count += 1  | 
|
153  | 
self.needs_write = True  | 
|
154  | 
del self._cache[path]  | 
|
155  | 
return None  | 
|
| 
953
by Martin Pool
 - refactor imports and stats for hashcache  | 
156  | 
|
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
157  | 
if path in self._cache:  | 
158  | 
cache_sha1, cache_fp = self._cache[path]  | 
|
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
159  | 
else:  | 
160  | 
cache_sha1, cache_fp = None, None  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
161  | 
|
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
162  | 
if cache_fp == file_fp:  | 
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
163  | 
self.hit_count += 1  | 
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
164  | 
return cache_sha1  | 
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
165  | 
|
166  | 
self.miss_count += 1  | 
|
167  | 
digest = sha_file(file(abspath, 'rb', buffering=65000))  | 
|
168  | 
||
169  | 
now = int(time.time())  | 
|
170  | 
if file_fp[1] >= now or file_fp[2] >= now:  | 
|
171  | 
            # changed too recently; can't be cached.  we can
 | 
|
172  | 
            # return the result and it could possibly be cached
 | 
|
173  | 
            # next time.
 | 
|
174  | 
self.danger_count += 1  | 
|
175  | 
if cache_fp:  | 
|
176  | 
self.removed_count += 1  | 
|
177  | 
self.needs_write = True  | 
|
178  | 
del self._cache[path]  | 
|
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
179  | 
else:  | 
| 
954
by Martin Pool
 - separate out code that just scans the hash cache to find files that are possibly  | 
180  | 
self.update_count += 1  | 
181  | 
self.needs_write = True  | 
|
182  | 
self._cache[path] = (digest, file_fp)  | 
|
183  | 
||
184  | 
return digest  | 
|
185  | 
||
| 
846
by Martin Pool
 - start adding refactored/simplified hash cache  | 
186  | 
|
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
187  | 
|
188  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
189  | 
def write(self):  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
190  | 
"""Write contents of cache to file."""  | 
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
191  | 
outf = AtomicFile(self.cache_file_name(), 'wb')  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
192  | 
try:  | 
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
193  | 
print >>outf, CACHE_HEADER,  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
194  | 
|
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
195  | 
for path, c in self._cache.iteritems():  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
196  | 
assert '//' not in path, path  | 
197  | 
outf.write(path.encode('utf-8'))  | 
|
198  | 
outf.write('// ')  | 
|
| 
860
by Martin Pool
 - refactor hashcache to use just one dictionary  | 
199  | 
print >>outf, c[0], # hex sha1  | 
200  | 
for fld in c[1]:  | 
|
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
201  | 
print >>outf, "%d" % fld,  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
202  | 
print >>outf  | 
203  | 
||
204  | 
outf.commit()  | 
|
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
205  | 
self.needs_write = False  | 
| 
859
by Martin Pool
 - add HashCache.write and a simple test for it  | 
206  | 
finally:  | 
207  | 
if not outf.closed:  | 
|
208  | 
outf.abort()  | 
|
209  | 
||
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
210  | 
|
211  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
212  | 
def read(self):  | 
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
213  | 
"""Reinstate cache from file.  | 
214  | 
||
215  | 
        Overwrites existing cache.
 | 
|
216  | 
||
217  | 
        If the cache file has the wrong version marker, this just clears 
 | 
|
218  | 
        the cache."""
 | 
|
219  | 
self._cache = {}  | 
|
220  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
221  | 
fn = self.cache_file_name()  | 
222  | 
try:  | 
|
| 
948
by Martin Pool
 - more buffering when reading/writing hashcache  | 
223  | 
inf = file(fn, 'rb', buffering=65000)  | 
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
224  | 
except IOError, e:  | 
225  | 
mutter("failed to open %s: %s" % (fn, e))  | 
|
| 
1214
by Martin Pool
 - hashcache should be written out if it can't be read  | 
226  | 
            # better write it now so it is valid
 | 
227  | 
self.needs_write = True  | 
|
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
228  | 
            return
 | 
229  | 
||
230  | 
||
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
231  | 
hdr = inf.readline()  | 
232  | 
if hdr != CACHE_HEADER:  | 
|
233  | 
mutter('cache header marker not found at top of %s; discarding cache'  | 
|
| 
878
by Martin Pool
 - fix typo  | 
234  | 
% fn)  | 
| 
1214
by Martin Pool
 - hashcache should be written out if it can't be read  | 
235  | 
self.needs_write = True  | 
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
236  | 
            return
 | 
237  | 
||
238  | 
for l in inf:  | 
|
239  | 
pos = l.index('// ')  | 
|
240  | 
path = l[:pos].decode('utf-8')  | 
|
241  | 
if path in self._cache:  | 
|
242  | 
warning('duplicated path %r in cache' % path)  | 
|
243  | 
                continue
 | 
|
244  | 
||
245  | 
pos += 3  | 
|
246  | 
fields = l[pos:].split(' ')  | 
|
247  | 
if len(fields) != 6:  | 
|
248  | 
warning("bad line in hashcache: %r" % l)  | 
|
249  | 
                continue
 | 
|
250  | 
||
251  | 
sha1 = fields[0]  | 
|
252  | 
if len(sha1) != 40:  | 
|
253  | 
warning("bad sha1 in hashcache: %r" % sha1)  | 
|
254  | 
                continue
 | 
|
255  | 
||
256  | 
fp = tuple(map(long, fields[1:]))  | 
|
257  | 
||
258  | 
self._cache[path] = (sha1, fp)  | 
|
259  | 
||
| 
866
by Martin Pool
 - use new path-based hashcache for WorkingTree- squash mtime/ctime to whole seconds- update and if necessary write out hashcache when WorkingTree object is created.  | 
260  | 
self.needs_write = False  | 
261  | 
||
262  | 
||
| 
862
by Martin Pool
 - code to re-read hashcache from file  | 
263  | 
|
264  |