Lines Matching +full:cache +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
12 * Mbcache is a simple key-value store. Keys need not be unique, however
13 * key-value pairs are expected to be unique (we use this fact in
16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
21 * identifies a cache entry.
24 * and a special "delete entry with given key-value pair" operation. Fixed
33 /* Maximum entries in cache to avoid degrading hash too much */
38 /* Number of entries in cache */
41 /* Work for shrinking when the cache has too many entries */
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, in mb_cache_entry_head() argument
53 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; in mb_cache_entry_head()
58 * in cache
63 * mb_cache_entry_create - create entry in cache
64 * @cache - cache where the entry should be created
65 * @mask - gfp mask with which the entry should be allocated
66 * @key - key of the entry
67 * @value - value of the entry
68 * @reusable - is the entry reusable by others?
70 * Creates entry in @cache with key @key and value @value. The function returns
71 * -EBUSY if entry with the same key and value already exists in cache.
74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, in mb_cache_entry_create() argument
82 if (cache->c_entry_count >= cache->c_max_entries) in mb_cache_entry_create()
83 schedule_work(&cache->c_shrink_work); in mb_cache_entry_create()
85 if (cache->c_entry_count >= 2*cache->c_max_entries) in mb_cache_entry_create()
86 mb_cache_shrink(cache, SYNC_SHRINK_BATCH); in mb_cache_entry_create()
90 return -ENOMEM; in mb_cache_entry_create()
92 INIT_LIST_HEAD(&entry->e_list); in mb_cache_entry_create()
94 atomic_set(&entry->e_refcnt, 1); in mb_cache_entry_create()
95 entry->e_key = key; in mb_cache_entry_create()
96 entry->e_value = value; in mb_cache_entry_create()
97 entry->e_reusable = reusable; in mb_cache_entry_create()
98 entry->e_referenced = 0; in mb_cache_entry_create()
99 head = mb_cache_entry_head(cache, key); in mb_cache_entry_create()
102 if (dup->e_key == key && dup->e_value == value) { in mb_cache_entry_create()
105 return -EBUSY; in mb_cache_entry_create()
108 hlist_bl_add_head(&entry->e_hash_list, head); in mb_cache_entry_create()
111 spin_lock(&cache->c_list_lock); in mb_cache_entry_create()
112 list_add_tail(&entry->e_list, &cache->c_list); in mb_cache_entry_create()
114 atomic_inc(&entry->e_refcnt); in mb_cache_entry_create()
115 cache->c_entry_count++; in mb_cache_entry_create()
116 spin_unlock(&cache->c_list_lock); in mb_cache_entry_create()
128 static struct mb_cache_entry *__entry_find(struct mb_cache *cache, in __entry_find() argument
136 head = mb_cache_entry_head(cache, key); in __entry_find()
138 if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) in __entry_find()
139 node = entry->e_hash_list.next; in __entry_find()
145 if (entry->e_key == key && entry->e_reusable) { in __entry_find()
146 atomic_inc(&entry->e_refcnt); in __entry_find()
149 node = node->next; in __entry_find()
155 mb_cache_entry_put(cache, old_entry); in __entry_find()
161 * mb_cache_entry_find_first - find the first reusable entry with the given key
162 * @cache: cache where we should search
165 * Search in @cache for a reusable entry with key @key. Grabs reference to the
168 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, in mb_cache_entry_find_first() argument
171 return __entry_find(cache, NULL, key); in mb_cache_entry_find_first()
176 * mb_cache_entry_find_next - find next reusable entry with the same key
177 * @cache: cache where we should search
185 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, in mb_cache_entry_find_next() argument
188 return __entry_find(cache, entry, entry->e_key); in mb_cache_entry_find_next()
193 * mb_cache_entry_get - get a cache entry by value (and key)
194 * @cache - cache we work with
195 * @key - key
196 * @value - value
198 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, in mb_cache_entry_get() argument
205 head = mb_cache_entry_head(cache, key); in mb_cache_entry_get()
208 if (entry->e_key == key && entry->e_value == value) { in mb_cache_entry_get()
209 atomic_inc(&entry->e_refcnt); in mb_cache_entry_get()
220 /* mb_cache_entry_delete - remove a cache entry
221 * @cache - cache we work with
222 * @key - key
223 * @value - value
225 * Remove entry from cache @cache with key @key and value @value.
227 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) in mb_cache_entry_delete() argument
233 head = mb_cache_entry_head(cache, key); in mb_cache_entry_delete()
236 if (entry->e_key == key && entry->e_value == value) { in mb_cache_entry_delete()
238 hlist_bl_del_init(&entry->e_hash_list); in mb_cache_entry_delete()
240 spin_lock(&cache->c_list_lock); in mb_cache_entry_delete()
241 if (!list_empty(&entry->e_list)) { in mb_cache_entry_delete()
242 list_del_init(&entry->e_list); in mb_cache_entry_delete()
243 if (!WARN_ONCE(cache->c_entry_count == 0, in mb_cache_entry_delete()
245 cache->c_entry_count--; in mb_cache_entry_delete()
246 atomic_dec(&entry->e_refcnt); in mb_cache_entry_delete()
248 spin_unlock(&cache->c_list_lock); in mb_cache_entry_delete()
249 mb_cache_entry_put(cache, entry); in mb_cache_entry_delete()
257 /* mb_cache_entry_touch - cache entry got used
258 * @cache - cache the entry belongs to
259 * @entry - entry that got used
261 * Marks entry as used to give hit higher chances of surviving in cache.
263 void mb_cache_entry_touch(struct mb_cache *cache, in mb_cache_entry_touch() argument
266 entry->e_referenced = 1; in mb_cache_entry_touch()
273 struct mb_cache *cache = container_of(shrink, struct mb_cache, in mb_cache_count() local
276 return cache->c_entry_count; in mb_cache_count()
279 /* Shrink number of entries in cache */
280 static unsigned long mb_cache_shrink(struct mb_cache *cache, in mb_cache_shrink() argument
287 spin_lock(&cache->c_list_lock); in mb_cache_shrink()
288 while (nr_to_scan-- && !list_empty(&cache->c_list)) { in mb_cache_shrink()
289 entry = list_first_entry(&cache->c_list, in mb_cache_shrink()
291 if (entry->e_referenced) { in mb_cache_shrink()
292 entry->e_referenced = 0; in mb_cache_shrink()
293 list_move_tail(&entry->e_list, &cache->c_list); in mb_cache_shrink()
296 list_del_init(&entry->e_list); in mb_cache_shrink()
297 cache->c_entry_count--; in mb_cache_shrink()
302 spin_unlock(&cache->c_list_lock); in mb_cache_shrink()
303 head = mb_cache_entry_head(cache, entry->e_key); in mb_cache_shrink()
305 if (!hlist_bl_unhashed(&entry->e_hash_list)) { in mb_cache_shrink()
306 hlist_bl_del_init(&entry->e_hash_list); in mb_cache_shrink()
307 atomic_dec(&entry->e_refcnt); in mb_cache_shrink()
310 if (mb_cache_entry_put(cache, entry)) in mb_cache_shrink()
313 spin_lock(&cache->c_list_lock); in mb_cache_shrink()
315 spin_unlock(&cache->c_list_lock); in mb_cache_shrink()
323 struct mb_cache *cache = container_of(shrink, struct mb_cache, in mb_cache_scan() local
325 return mb_cache_shrink(cache, sc->nr_to_scan); in mb_cache_scan()
328 /* We shrink 1/X of the cache when we have too many entries in it */
333 struct mb_cache *cache = container_of(work, struct mb_cache, in mb_cache_shrink_worker() local
335 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); in mb_cache_shrink_worker()
339 * mb_cache_create - create cache
342 * Create cache for keys with 2^bucket_bits hash entries.
346 struct mb_cache *cache; in mb_cache_create() local
350 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); in mb_cache_create()
351 if (!cache) in mb_cache_create()
353 cache->c_bucket_bits = bucket_bits; in mb_cache_create()
354 cache->c_max_entries = bucket_count << 4; in mb_cache_create()
355 INIT_LIST_HEAD(&cache->c_list); in mb_cache_create()
356 spin_lock_init(&cache->c_list_lock); in mb_cache_create()
357 cache->c_hash = kmalloc_array(bucket_count, in mb_cache_create()
360 if (!cache->c_hash) { in mb_cache_create()
361 kfree(cache); in mb_cache_create()
365 INIT_HLIST_BL_HEAD(&cache->c_hash[i]); in mb_cache_create()
367 cache->c_shrink.count_objects = mb_cache_count; in mb_cache_create()
368 cache->c_shrink.scan_objects = mb_cache_scan; in mb_cache_create()
369 cache->c_shrink.seeks = DEFAULT_SEEKS; in mb_cache_create()
370 if (register_shrinker(&cache->c_shrink)) { in mb_cache_create()
371 kfree(cache->c_hash); in mb_cache_create()
372 kfree(cache); in mb_cache_create()
376 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); in mb_cache_create()
378 return cache; in mb_cache_create()
386 * mb_cache_destroy - destroy cache
387 * @cache: the cache to destroy
389 * Free all entries in cache and cache itself. Caller must make sure nobody
390 * (except shrinker) can reach @cache when calling this.
392 void mb_cache_destroy(struct mb_cache *cache) in mb_cache_destroy() argument
396 unregister_shrinker(&cache->c_shrink); in mb_cache_destroy()
399 * We don't bother with any locking. Cache must not be used at this in mb_cache_destroy()
402 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { in mb_cache_destroy()
403 if (!hlist_bl_unhashed(&entry->e_hash_list)) { in mb_cache_destroy()
404 hlist_bl_del_init(&entry->e_hash_list); in mb_cache_destroy()
405 atomic_dec(&entry->e_refcnt); in mb_cache_destroy()
408 list_del(&entry->e_list); in mb_cache_destroy()
409 WARN_ON(atomic_read(&entry->e_refcnt) != 1); in mb_cache_destroy()
410 mb_cache_entry_put(cache, entry); in mb_cache_destroy()
412 kfree(cache->c_hash); in mb_cache_destroy()
413 kfree(cache); in mb_cache_destroy()
423 return -ENOMEM; in mbcache_init()
436 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");