17
17
"""A simple least-recently-used (LRU) cache."""
19
from __future__ import absolute_import, division
30
24
_null_key = object()
33
26
class _LRUNode(object):
34
27
"""This maintains the linked-list which is the lru internals."""
36
__slots__ = ('prev', 'next_key', 'key', 'value')
29
__slots__ = ('prev', 'next_key', 'key', 'value', 'cleanup', 'size')
38
def __init__(self, key, value):
31
def __init__(self, key, value, cleanup=None):
40
33
self.next_key = _null_key
36
self.cleanup = cleanup
37
# TODO: We could compute this 'on-the-fly' like we used to, and remove
38
# one pointer from this object, we just need to decide if it
39
# actually costs us much of anything in normal usage
44
42
def __repr__(self):
45
43
if self.prev is None:
49
47
return '%s(%r n:%r p:%r)' % (self.__class__.__name__, self.key,
50
48
self.next_key, prev_key)
50
def run_cleanup(self):
52
if self.cleanup is not None:
53
self.cleanup(self.key, self.value)
55
# cleanup might raise an exception, but we want to make sure
56
# to break refcycles, etc
53
61
class LRUCache(object):
54
62
"""A class which manages a cache of entries, removing unused ones."""
56
def __init__(self, max_cache=100, after_cleanup_count=None):
64
def __init__(self, max_cache=100, after_cleanup_count=None,
65
after_cleanup_size=symbol_versioning.DEPRECATED_PARAMETER):
66
if symbol_versioning.deprecated_passed(after_cleanup_size):
67
symbol_versioning.warn('LRUCache.__init__(after_cleanup_size) was'
68
' deprecated in 1.11. Use'
69
' after_cleanup_count instead.',
71
after_cleanup_count = after_cleanup_size
58
73
# The "HEAD" of the lru linked list
59
74
self._most_recently_used = None
98
113
def __len__(self):
99
114
return len(self._cache)
101
def __setitem__(self, key, value):
102
"""Add a new value to the cache"""
117
"""Walk the LRU list, only meant to be used in tests."""
118
node = self._most_recently_used
120
if node.prev is not None:
121
raise AssertionError('the _most_recently_used entry is not'
122
' supposed to have a previous entry'
124
while node is not None:
125
if node.next_key is _null_key:
126
if node is not self._least_recently_used:
127
raise AssertionError('only the last node should have'
128
' no next value: %s' % (node,))
131
node_next = self._cache[node.next_key]
132
if node_next.prev is not node:
133
raise AssertionError('inconsistency found, node.next.prev'
134
' != node: %s' % (node,))
135
if node.prev is None:
136
if node is not self._most_recently_used:
137
raise AssertionError('only the _most_recently_used should'
138
' not have a previous node: %s'
141
if node.prev.next_key != node.key:
142
raise AssertionError('inconsistency found, node.prev.next'
143
' != node: %s' % (node,))
147
def add(self, key, value, cleanup=None):
148
"""Add a new value to the cache.
150
Also, if the entry is ever removed from the cache, call
153
:param key: The key to store it under
154
:param value: The object to store
155
:param cleanup: None or a function taking (key, value) to indicate
156
'value' should be cleaned up.
103
158
if key is _null_key:
104
159
raise ValueError('cannot use _null_key as a key')
105
160
if key in self._cache:
106
161
node = self._cache[key]
108
self._record_access(node)
165
# Maintain the LRU properties, even if cleanup raises an
168
node.cleanup = cleanup
169
self._record_access(node)
110
node = _LRUNode(key, value)
171
node = _LRUNode(key, value, cleanup=cleanup)
111
172
self._cache[key] = node
112
173
self._record_access(node)
136
197
:return: An unordered list of keys that are currently cached.
138
# GZ 2016-06-04: Maybe just make this return the view?
139
return list(viewkeys(self._cache))
199
return self._cache.keys()
142
"""Get a new dict with the same key:value pairs as the cache"""
143
return dict((k, n.value) for k, n in viewitems(self._cache))
202
"""Get the key:value pairs as a dict."""
203
return dict((k, n.value) for k, n in self._cache.iteritems())
145
205
def cleanup(self):
146
206
"""Clear the cache until it shrinks to the requested size.
152
212
while len(self._cache) > self._after_cleanup_count:
153
213
self._remove_lru()
215
def __setitem__(self, key, value):
216
"""Add a value to the cache, there will be no cleanup function."""
217
self.add(key, value, cleanup=None)
155
219
def _record_access(self, node):
156
220
"""Record that key was accessed."""
157
221
# Move 'node' to the front of the queue
185
249
# If we have removed all entries, remove the head pointer as well
186
250
if self._least_recently_used is None:
187
251
self._most_recently_used = None
188
if node.prev is not None:
189
node.prev.next_key = node.next_key
190
if node.next_key is not _null_key:
191
node_next = self._cache[node.next_key]
192
node_next.prev = node.prev
193
# And remove this node's pointers
195
node.next_key = _null_key
255
# cleanup might raise an exception, but we want to make sure to
256
# maintain the linked list
257
if node.prev is not None:
258
node.prev.next_key = node.next_key
259
if node.next_key is not _null_key:
260
node_next = self._cache[node.next_key]
261
node_next.prev = node.prev
262
# And remove this node's pointers
264
node.next_key = _null_key
197
266
def _remove_lru(self):
198
267
"""Remove one entry from the lru, and handle consequences.
216
285
def _update_max_cache(self, max_cache, after_cleanup_count=None):
217
286
self._max_cache = max_cache
218
287
if after_cleanup_count is None:
219
self._after_cleanup_count = self._max_cache * 8 // 10
288
self._after_cleanup_count = self._max_cache * 8 / 10
221
290
self._after_cleanup_count = min(after_cleanup_count,
233
302
defaults to len() if not supplied.
236
def __init__(self, max_size=1024 * 1024, after_cleanup_size=None,
305
def __init__(self, max_size=1024*1024, after_cleanup_size=None,
237
306
compute_size=None):
238
307
"""Create a new LRUSizeCache.
253
322
if compute_size is None:
254
323
self._compute_size = len
255
324
self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
256
LRUCache.__init__(self, max_cache=max(int(max_size // 512), 1))
258
def __setitem__(self, key, value):
259
"""Add a new value to the cache"""
325
LRUCache.__init__(self, max_cache=max(int(max_size/512), 1))
327
def add(self, key, value, cleanup=None):
328
"""Add a new value to the cache.
330
Also, if the entry is ever removed from the cache, call
333
:param key: The key to store it under
334
:param value: The object to store
335
:param cleanup: None or a function taking (key, value) to indicate
336
'value' should be cleaned up.
260
338
if key is _null_key:
261
339
raise ValueError('cannot use _null_key as a key')
262
340
node = self._cache.get(key, None)
271
349
if node is not None:
272
350
# We won't be replacing the old node, so just remove it
273
351
self._remove_node(node)
352
if cleanup is not None:
276
node = _LRUNode(key, value)
356
node = _LRUNode(key, value, cleanup=cleanup)
277
357
self._cache[key] = node
279
self._value_size -= self._compute_size(node.value)
359
self._value_size -= node.size
360
node.size = value_len
280
361
self._value_size += value_len
281
362
self._record_access(node)
295
376
self._remove_lru()
297
378
def _remove_node(self, node):
298
self._value_size -= self._compute_size(node.value)
379
self._value_size -= node.size
299
380
LRUCache._remove_node(self, node)
301
382
def resize(self, max_size, after_cleanup_size=None):
302
383
"""Change the number of bytes that will be cached."""
303
384
self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
304
max_cache = max(int(max_size // 512), 1)
385
max_cache = max(int(max_size/512), 1)
305
386
self._update_max_cache(max_cache)
307
388
def _update_max_size(self, max_size, after_cleanup_size=None):
308
389
self._max_size = max_size
309
390
if after_cleanup_size is None:
310
self._after_cleanup_size = self._max_size * 8 // 10
391
self._after_cleanup_size = self._max_size * 8 / 10
312
393
self._after_cleanup_size = min(after_cleanup_size, self._max_size)