1 #ifndef IOU_ALLOC_CACHE_H
2 #define IOU_ALLOC_CACHE_H
3
4 #include <linux/io_uring_types.h>
5
6 /*
7 * Don't allow the cache to grow beyond this size.
8 */
9 #define IO_ALLOC_CACHE_MAX 128
10
11 void io_alloc_cache_free(struct io_alloc_cache *cache,
12 void (*free)(const void *));
13 bool io_alloc_cache_init(struct io_alloc_cache *cache,
14 unsigned max_nr, unsigned int size,
15 unsigned int init_bytes);
16
17 void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
18
io_alloc_cache_put(struct io_alloc_cache * cache,void * entry)19 static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
20 void *entry)
21 {
22 if (cache->nr_cached < cache->max_cached) {
23 if (!kasan_mempool_poison_object(entry))
24 return false;
25 cache->entries[cache->nr_cached++] = entry;
26 return true;
27 }
28 return false;
29 }
30
io_alloc_cache_get(struct io_alloc_cache * cache)31 static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
32 {
33 if (cache->nr_cached) {
34 void *entry = cache->entries[--cache->nr_cached];
35
36 /*
37 * If KASAN is enabled, always clear the initial bytes that
38 * must be zeroed post alloc, in case any of them overlap
39 * with KASAN storage.
40 */
41 #if defined(CONFIG_KASAN)
42 kasan_mempool_unpoison_object(entry, cache->elem_size);
43 if (cache->init_clear)
44 memset(entry, 0, cache->init_clear);
45 #endif
46 return entry;
47 }
48
49 return NULL;
50 }
51
io_cache_alloc(struct io_alloc_cache * cache,gfp_t gfp)52 static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
53 {
54 void *obj;
55
56 obj = io_alloc_cache_get(cache);
57 if (obj)
58 return obj;
59 return io_cache_alloc_new(cache, gfp);
60 }
61
io_cache_free(struct io_alloc_cache * cache,void * obj)62 static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
63 {
64 if (!io_alloc_cache_put(cache, obj))
65 kfree(obj);
66 }
67
68 #endif
69