xref: /linux/tools/include/linux/slab.h (revision b4f0dd314b39ea154f62f3bd3115ed0470f9f71e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _TOOLS_SLAB_H
3 #define _TOOLS_SLAB_H
4 
5 #include <linux/types.h>
6 #include <linux/gfp.h>
7 #include <pthread.h>
8 
9 #define SLAB_RECLAIM_ACCOUNT    0x00020000UL            /* Objects are reclaimable */
10 
11 #define kzalloc_node(size, flags, node) kmalloc(size, flags)
12 enum _slab_flag_bits {
13 	_SLAB_KMALLOC,
14 	_SLAB_HWCACHE_ALIGN,
15 	_SLAB_PANIC,
16 	_SLAB_TYPESAFE_BY_RCU,
17 	_SLAB_ACCOUNT,
18 	_SLAB_FLAGS_LAST_BIT
19 };
20 
21 #define __SLAB_FLAG_BIT(nr)	((unsigned int __force)(1U << (nr)))
22 #define __SLAB_FLAG_UNUSED	((unsigned int __force)(0U))
23 
24 #define SLAB_HWCACHE_ALIGN	__SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
25 #define SLAB_PANIC		__SLAB_FLAG_BIT(_SLAB_PANIC)
26 #define SLAB_TYPESAFE_BY_RCU	__SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
27 #ifdef CONFIG_MEMCG
28 # define SLAB_ACCOUNT		__SLAB_FLAG_BIT(_SLAB_ACCOUNT)
29 #else
30 # define SLAB_ACCOUNT		__SLAB_FLAG_UNUSED
31 #endif
32 
33 void *kmalloc(size_t size, gfp_t gfp);
34 void kfree(void *p);
35 void *kmalloc_array(size_t n, size_t size, gfp_t gfp);
36 
37 bool slab_is_available(void);
38 
39 enum slab_state {
40 	DOWN,
41 	PARTIAL,
42 	UP,
43 	FULL
44 };
45 
46 struct kmem_cache {
47 	pthread_mutex_t lock;
48 	unsigned int size;
49 	unsigned int align;
50 	unsigned int sheaf_capacity;
51 	int nr_objs;
52 	void *objs;
53 	void (*ctor)(void *);
54 	bool non_kernel_enabled;
55 	unsigned int non_kernel;
56 	unsigned long nr_allocated;
57 	unsigned long nr_tallocated;
58 	bool exec_callback;
59 	void (*callback)(void *);
60 	void *private;
61 };
62 
63 struct kmem_cache_args {
64 	/**
65 	 * @align: The required alignment for the objects.
66 	 *
67 	 * %0 means no specific alignment is requested.
68 	 */
69 	unsigned int align;
70 	/**
71 	 * @sheaf_capacity: The maximum size of the sheaf.
72 	 */
73 	unsigned int sheaf_capacity;
74 	/**
75 	 * @useroffset: Usercopy region offset.
76 	 *
77 	 * %0 is a valid offset, when @usersize is non-%0
78 	 */
79 	unsigned int useroffset;
80 	/**
81 	 * @usersize: Usercopy region size.
82 	 *
83 	 * %0 means no usercopy region is specified.
84 	 */
85 	unsigned int usersize;
86 	/**
87 	 * @freeptr_offset: Custom offset for the free pointer
88 	 * in &SLAB_TYPESAFE_BY_RCU caches
89 	 *
90 	 * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
91 	 * outside of the object. This might cause the object to grow in size.
92 	 * Cache creators that have a reason to avoid this can specify a custom
93 	 * free pointer offset in their struct where the free pointer will be
94 	 * placed.
95 	 *
96 	 * Note that placing the free pointer inside the object requires the
97 	 * caller to ensure that no fields are invalidated that are required to
98 	 * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
99 	 * details).
100 	 *
101 	 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
102 	 * is specified, %use_freeptr_offset must be set %true.
103 	 *
104 	 * Note that @ctor currently isn't supported with custom free pointers
105 	 * as a @ctor requires an external free pointer.
106 	 */
107 	unsigned int freeptr_offset;
108 	/**
109 	 * @use_freeptr_offset: Whether a @freeptr_offset is used.
110 	 */
111 	bool use_freeptr_offset;
112 	/**
113 	 * @ctor: A constructor for the objects.
114 	 *
115 	 * The constructor is invoked for each object in a newly allocated slab
116 	 * page. It is the cache user's responsibility to free object in the
117 	 * same state as after calling the constructor, or deal appropriately
118 	 * with any differences between a freshly constructed and a reallocated
119 	 * object.
120 	 *
121 	 * %NULL means no constructor.
122 	 */
123 	void (*ctor)(void *);
124 };
125 
126 struct slab_sheaf {
127 	union {
128 		struct list_head barn_list;
129 		/* only used for prefilled sheafs */
130 		unsigned int capacity;
131 	};
132 	struct kmem_cache *cache;
133 	unsigned int size;
134 	int node; /* only used for rcu_sheaf */
135 	void *objects[];
136 };
137 
kzalloc(size_t size,gfp_t gfp)138 static inline void *kzalloc(size_t size, gfp_t gfp)
139 {
140 	return kmalloc(size, gfp | __GFP_ZERO);
141 }
142 
143 struct list_lru;
144 
145 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags);
kmem_cache_alloc(struct kmem_cache * cachep,int flags)146 static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
147 {
148 	return kmem_cache_alloc_lru(cachep, NULL, flags);
149 }
150 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
151 
152 
153 struct kmem_cache *
154 __kmem_cache_create_args(const char *name, unsigned int size,
155 		struct kmem_cache_args *args, unsigned int flags);
156 
157 /* If NULL is passed for @args, use this variant with default arguments. */
158 static inline struct kmem_cache *
__kmem_cache_default_args(const char * name,unsigned int size,struct kmem_cache_args * args,unsigned int flags)159 __kmem_cache_default_args(const char *name, unsigned int size,
160 		struct kmem_cache_args *args, unsigned int flags)
161 {
162 	struct kmem_cache_args kmem_default_args = {};
163 
164 	return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
165 }
166 
167 static inline struct kmem_cache *
__kmem_cache_create(const char * name,unsigned int size,unsigned int align,unsigned int flags,void (* ctor)(void *))168 __kmem_cache_create(const char *name, unsigned int size, unsigned int align,
169 		unsigned int flags, void (*ctor)(void *))
170 {
171 	struct kmem_cache_args kmem_args = {
172 		.align	= align,
173 		.ctor	= ctor,
174 	};
175 
176 	return __kmem_cache_create_args(name, size, &kmem_args, flags);
177 }
178 
179 #define kmem_cache_create(__name, __object_size, __args, ...)           \
180 	_Generic((__args),                                              \
181 		struct kmem_cache_args *: __kmem_cache_create_args,	\
182 		void *: __kmem_cache_default_args,			\
183 		default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
184 
185 void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
186 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
187 			  void **list);
188 struct slab_sheaf *
189 kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
190 
191 void *
192 kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
193 		struct slab_sheaf *sheaf);
194 
195 void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
196 		struct slab_sheaf *sheaf);
197 int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
198 		struct slab_sheaf **sheafp, unsigned int size);
199 
kmem_cache_sheaf_size(struct slab_sheaf * sheaf)200 static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
201 {
202 	return sheaf->size;
203 }
204 
205 #define __alloc_objs(KMALLOC, GFP, TYPE, COUNT)				\
206 ({									\
207 	const size_t __obj_size = size_mul(sizeof(TYPE), COUNT);	\
208 	(TYPE *)KMALLOC(__obj_size, GFP);				\
209 })
210 
211 #define kzalloc_obj(P, ...) \
212 	__alloc_objs(kzalloc, default_gfp(__VA_ARGS__), typeof(P), 1)
213 
214 #endif		/* _TOOLS_SLAB_H */
215