xref: /linux/mm/slab.h (revision 05cef13fa80de8cec481ae5a015e58bc6340ca2d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
14 
15 /*
16  * Internal slab definitions
17  */
18 
19 #ifdef CONFIG_64BIT
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba()	system_has_cmpxchg128()
22 # define try_cmpxchg_freelist		try_cmpxchg128
23 # endif
24 typedef u128 freelist_full_t;
25 #else /* CONFIG_64BIT */
26 # ifdef system_has_cmpxchg64
27 # define system_has_freelist_aba()	system_has_cmpxchg64()
28 # define try_cmpxchg_freelist		try_cmpxchg64
29 # endif
30 typedef u64 freelist_full_t;
31 #endif /* CONFIG_64BIT */
32 
33 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
34 #undef system_has_freelist_aba
35 #endif
36 
37 /*
38  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
39  * problems with cmpxchg of just a pointer.
40  */
41 struct freelist_counters {
42 	union {
43 		struct {
44 			void *freelist;
45 			union {
46 				unsigned long counters;
47 				struct {
48 					unsigned inuse:16;
49 					unsigned objects:15;
50 					/*
51 					 * If slab debugging is enabled then the
52 					 * frozen bit can be reused to indicate
53 					 * that the slab was corrupted
54 					 */
55 					unsigned frozen:1;
56 #ifdef CONFIG_64BIT
57 					/*
58 					 * Some optimizations use free bits in 'counters' field
59 					 * to save memory. In case ->stride field is not available,
60 					 * such optimizations are disabled.
61 					 */
62 					unsigned int stride;
63 #endif
64 				};
65 			};
66 		};
67 #ifdef system_has_freelist_aba
68 		freelist_full_t freelist_counters;
69 #endif
70 	};
71 };
72 
73 /* Reuses the bits in struct page */
74 struct slab {
75 	memdesc_flags_t flags;
76 
77 	struct kmem_cache *slab_cache;
78 	union {
79 		struct {
80 			struct list_head slab_list;
81 			/* Double-word boundary */
82 			struct freelist_counters;
83 		};
84 		struct rcu_head rcu_head;
85 	};
86 
87 	unsigned int __page_type;
88 	atomic_t __page_refcount;
89 #ifdef CONFIG_SLAB_OBJ_EXT
90 	unsigned long obj_exts;
91 #endif
92 };
93 
94 #define SLAB_MATCH(pg, sl)						\
95 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
96 SLAB_MATCH(flags, flags);
97 SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
98 SLAB_MATCH(_refcount, __page_refcount);
99 #ifdef CONFIG_MEMCG
100 SLAB_MATCH(memcg_data, obj_exts);
101 #elif defined(CONFIG_SLAB_OBJ_EXT)
102 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
103 #endif
104 #undef SLAB_MATCH
105 static_assert(sizeof(struct slab) <= sizeof(struct page));
106 #if defined(system_has_freelist_aba)
107 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
108 #endif
109 
110 /**
111  * slab_folio - The folio allocated for a slab
112  * @s: The slab.
113  *
114  * Slabs are allocated as folios that contain the individual objects and are
115  * using some fields in the first struct page of the folio - those fields are
116  * now accessed by struct slab. It is occasionally necessary to convert back to
117  * a folio in order to communicate with the rest of the mm.  Please use this
118  * helper function instead of casting yourself, as the implementation may change
119  * in the future.
120  */
121 #define slab_folio(s)		(_Generic((s),				\
122 	const struct slab *:	(const struct folio *)s,		\
123 	struct slab *:		(struct folio *)s))
124 
125 /**
126  * page_slab - Converts from struct page to its slab.
127  * @page: A page which may or may not belong to a slab.
128  *
129  * Return: The slab which contains this page or NULL if the page does
130  * not belong to a slab.  This includes pages returned from large kmalloc.
131  */
page_slab(const struct page * page)132 static inline struct slab *page_slab(const struct page *page)
133 {
134 	unsigned long head;
135 
136 	head = READ_ONCE(page->compound_head);
137 	if (head & 1)
138 		page = (struct page *)(head - 1);
139 	if (data_race(page->page_type >> 24) != PGTY_slab)
140 		page = NULL;
141 
142 	return (struct slab *)page;
143 }
144 
145 /**
146  * slab_page - The first struct page allocated for a slab
147  * @s: The slab.
148  *
149  * A convenience wrapper for converting slab to the first struct page of the
150  * underlying folio, to communicate with code not yet converted to folio or
151  * struct slab.
152  */
153 #define slab_page(s) folio_page(slab_folio(s), 0)
154 
slab_address(const struct slab * slab)155 static inline void *slab_address(const struct slab *slab)
156 {
157 	return folio_address(slab_folio(slab));
158 }
159 
slab_nid(const struct slab * slab)160 static inline int slab_nid(const struct slab *slab)
161 {
162 	return memdesc_nid(slab->flags);
163 }
164 
slab_pgdat(const struct slab * slab)165 static inline pg_data_t *slab_pgdat(const struct slab *slab)
166 {
167 	return NODE_DATA(slab_nid(slab));
168 }
169 
virt_to_slab(const void * addr)170 static inline struct slab *virt_to_slab(const void *addr)
171 {
172 	return page_slab(virt_to_page(addr));
173 }
174 
slab_order(const struct slab * slab)175 static inline int slab_order(const struct slab *slab)
176 {
177 	return folio_order(slab_folio(slab));
178 }
179 
slab_size(const struct slab * slab)180 static inline size_t slab_size(const struct slab *slab)
181 {
182 	return PAGE_SIZE << slab_order(slab);
183 }
184 
185 /*
186  * Word size structure that can be atomically updated or read and that
187  * contains both the order and the number of objects that a slab of the
188  * given order would contain.
189  */
190 struct kmem_cache_order_objects {
191 	unsigned int x;
192 };
193 
194 struct kmem_cache_per_node_ptrs {
195 	struct node_barn *barn;
196 	struct kmem_cache_node *node;
197 };
198 
199 /*
200  * Slab cache management.
201  */
202 struct kmem_cache {
203 	struct slub_percpu_sheaves __percpu *cpu_sheaves;
204 	/* Used for retrieving partial slabs, etc. */
205 	slab_flags_t flags;
206 	unsigned long min_partial;
207 	unsigned int size;		/* Object size including metadata */
208 	unsigned int object_size;	/* Object size without metadata */
209 	struct reciprocal_value reciprocal_size;
210 	unsigned int offset;		/* Free pointer offset */
211 	unsigned int sheaf_capacity;
212 	struct kmem_cache_order_objects oo;
213 
214 	/* Allocation and freeing of slabs */
215 	struct kmem_cache_order_objects min;
216 	gfp_t allocflags;		/* gfp flags to use on each alloc */
217 	int refcount;			/* Refcount for slab cache destroy */
218 	void (*ctor)(void *object);	/* Object constructor */
219 	unsigned int inuse;		/* Offset to metadata */
220 	unsigned int align;		/* Alignment */
221 	unsigned int red_left_pad;	/* Left redzone padding size */
222 	const char *name;		/* Name (only for display!) */
223 	struct list_head list;		/* List of slab caches */
224 #ifdef CONFIG_SYSFS
225 	struct kobject kobj;		/* For sysfs */
226 #endif
227 #ifdef CONFIG_SLAB_FREELIST_HARDENED
228 	unsigned long random;
229 #endif
230 
231 #ifdef CONFIG_NUMA
232 	/*
233 	 * Defragmentation by allocating from a remote node.
234 	 */
235 	unsigned int remote_node_defrag_ratio;
236 #endif
237 
238 #ifdef CONFIG_SLAB_FREELIST_RANDOM
239 	unsigned int *random_seq;
240 #endif
241 
242 #ifdef CONFIG_KASAN_GENERIC
243 	struct kasan_cache kasan_info;
244 #endif
245 
246 #ifdef CONFIG_HARDENED_USERCOPY
247 	unsigned int useroffset;	/* Usercopy region offset */
248 	unsigned int usersize;		/* Usercopy region size */
249 #endif
250 
251 #ifdef CONFIG_SLUB_STATS
252 	struct kmem_cache_stats __percpu *cpu_stats;
253 #endif
254 
255 	struct kmem_cache_per_node_ptrs per_node[MAX_NUMNODES];
256 };
257 
258 /*
259  * Every cache has !NULL s->cpu_sheaves but they may point to the
260  * bootstrap_sheaf temporarily during init, or permanently for the boot caches
261  * and caches with debugging enabled, or all caches with CONFIG_SLUB_TINY. This
262  * helper distinguishes whether cache has real non-bootstrap sheaves.
263  */
cache_has_sheaves(struct kmem_cache * s)264 static inline bool cache_has_sheaves(struct kmem_cache *s)
265 {
266 	/* Test CONFIG_SLUB_TINY for code elimination purposes */
267 	return !IS_ENABLED(CONFIG_SLUB_TINY) && s->sheaf_capacity;
268 }
269 
270 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
271 #define SLAB_SUPPORTS_SYSFS 1
272 void sysfs_slab_unlink(struct kmem_cache *s);
273 void sysfs_slab_release(struct kmem_cache *s);
274 int sysfs_slab_alias(struct kmem_cache *s, const char *name);
275 #else
sysfs_slab_unlink(struct kmem_cache * s)276 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)277 static inline void sysfs_slab_release(struct kmem_cache *s) { }
sysfs_slab_alias(struct kmem_cache * s,const char * name)278 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *name)
279 							{ return 0; }
280 #endif
281 
282 void *fixup_red_left(struct kmem_cache *s, void *p);
283 
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)284 static inline void *nearest_obj(struct kmem_cache *cache,
285 				const struct slab *slab, void *x)
286 {
287 	void *object = x - (x - slab_address(slab)) % cache->size;
288 	void *last_object = slab_address(slab) +
289 		(slab->objects - 1) * cache->size;
290 	void *result = (unlikely(object > last_object)) ? last_object : object;
291 
292 	result = fixup_red_left(cache, result);
293 	return result;
294 }
295 
296 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,const void * obj)297 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
298 					  void *addr, const void *obj)
299 {
300 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
301 				 cache->reciprocal_size);
302 }
303 
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,const void * obj)304 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
305 					const struct slab *slab, const void *obj)
306 {
307 	if (is_kfence_address(obj))
308 		return 0;
309 	return __obj_to_index(cache, slab_address(slab), obj);
310 }
311 
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)312 static inline int objs_per_slab(const struct kmem_cache *cache,
313 				const struct slab *slab)
314 {
315 	return slab->objects;
316 }
317 
318 /*
319  * State of the slab allocator.
320  *
321  * This is used to describe the states of the allocator during bootup.
322  * Allocators use this to gradually bootstrap themselves. Most allocators
323  * have the problem that the structures used for managing slab caches are
324  * allocated from slab caches themselves.
325  */
326 enum slab_state {
327 	DOWN,			/* No slab functionality yet */
328 	PARTIAL,		/* SLUB: kmem_cache_node available */
329 	UP,			/* Slab caches usable but not all extras yet */
330 	FULL			/* Everything is working */
331 };
332 
333 extern enum slab_state slab_state;
334 
335 /* The slab cache mutex protects the management structures during changes */
336 extern struct mutex slab_mutex;
337 
338 /* The list of all slab caches on the system */
339 extern struct list_head slab_caches;
340 
341 /* The slab cache that manages slab cache information */
342 extern struct kmem_cache *kmem_cache;
343 
344 /* A table of kmalloc cache names and sizes */
345 extern const struct kmalloc_info_struct {
346 	const char *name[NR_KMALLOC_TYPES];
347 	unsigned int size;
348 } kmalloc_info[];
349 
350 /* Kmalloc array related functions */
351 void setup_kmalloc_cache_index_table(void);
352 void create_kmalloc_caches(void);
353 
354 extern u8 kmalloc_size_index[24];
355 
size_index_elem(unsigned int bytes)356 static inline unsigned int size_index_elem(unsigned int bytes)
357 {
358 	return (bytes - 1) / 8;
359 }
360 
361 /*
362  * Find the kmem_cache structure that serves a given size of
363  * allocation
364  *
365  * This assumes size is larger than zero and not larger than
366  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
367  */
368 static inline struct kmem_cache *
kmalloc_slab(size_t size,kmem_buckets * b,gfp_t flags,unsigned long caller)369 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
370 {
371 	unsigned int index;
372 
373 	if (!b)
374 		b = &kmalloc_caches[kmalloc_type(flags, caller)];
375 	if (size <= 192)
376 		index = kmalloc_size_index[size_index_elem(size)];
377 	else
378 		index = fls(size - 1);
379 
380 	return (*b)[index];
381 }
382 
383 gfp_t kmalloc_fix_flags(gfp_t flags);
384 
385 /* Functions provided by the slab allocators */
386 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
387 			 unsigned int size, struct kmem_cache_args *args,
388 			 slab_flags_t flags);
389 
390 void __init kmem_cache_init(void);
391 extern void create_boot_cache(struct kmem_cache *, const char *name,
392 			unsigned int size, slab_flags_t flags,
393 			unsigned int useroffset, unsigned int usersize);
394 
395 int slab_unmergeable(struct kmem_cache *s);
396 bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags);
397 
398 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
399 
is_kmalloc_cache(struct kmem_cache * s)400 static inline bool is_kmalloc_cache(struct kmem_cache *s)
401 {
402 	return (s->flags & SLAB_KMALLOC);
403 }
404 
is_kmalloc_normal(struct kmem_cache * s)405 static inline bool is_kmalloc_normal(struct kmem_cache *s)
406 {
407 	if (!is_kmalloc_cache(s))
408 		return false;
409 	return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
410 }
411 
412 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
413 void flush_all_rcu_sheaves(void);
414 void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
415 
416 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
417 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
418 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
419 			 SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
420 			 SLAB_TEMPORARY | SLAB_ACCOUNT | \
421 			 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
422 
423 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
424 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
425 
426 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
427 
428 bool __kmem_cache_empty(struct kmem_cache *);
429 int __kmem_cache_shutdown(struct kmem_cache *);
430 void __kmem_cache_release(struct kmem_cache *);
431 int __kmem_cache_shrink(struct kmem_cache *);
432 void slab_kmem_cache_release(struct kmem_cache *);
433 
434 struct seq_file;
435 struct file;
436 
437 struct slabinfo {
438 	unsigned long active_objs;
439 	unsigned long num_objs;
440 	unsigned long active_slabs;
441 	unsigned long num_slabs;
442 	unsigned long shared_avail;
443 	unsigned int limit;
444 	unsigned int batchcount;
445 	unsigned int shared;
446 	unsigned int objects_per_slab;
447 	unsigned int cache_order;
448 };
449 
450 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
451 
452 #ifdef CONFIG_SLUB_DEBUG
453 #ifdef CONFIG_SLUB_DEBUG_ON
454 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
455 #else
456 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
457 #endif
458 extern void print_tracking(struct kmem_cache *s, void *object);
459 long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)460 static inline bool __slub_debug_enabled(void)
461 {
462 	return static_branch_unlikely(&slub_debug_enabled);
463 }
464 #else
print_tracking(struct kmem_cache * s,void * object)465 static inline void print_tracking(struct kmem_cache *s, void *object)
466 {
467 }
__slub_debug_enabled(void)468 static inline bool __slub_debug_enabled(void)
469 {
470 	return false;
471 }
472 #endif
473 
474 /*
475  * Returns true if any of the specified slab_debug flags is enabled for the
476  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
477  * the static key.
478  */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)479 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
480 {
481 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
482 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
483 	if (__slub_debug_enabled())
484 		return s->flags & flags;
485 	return false;
486 }
487 
488 #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
489 bool slab_in_kunit_test(void);
490 #else
slab_in_kunit_test(void)491 static inline bool slab_in_kunit_test(void) { return false; }
492 #endif
493 
494 /*
495  * slub is about to manipulate internal object metadata.  This memory lies
496  * outside the range of the allocated object, so accessing it would normally
497  * be reported by kasan as a bounds error.  metadata_access_enable() is used
498  * to tell kasan that these accesses are OK.
499  */
metadata_access_enable(void)500 static inline void metadata_access_enable(void)
501 {
502 	kasan_disable_current();
503 	kmsan_disable_current();
504 }
505 
metadata_access_disable(void)506 static inline void metadata_access_disable(void)
507 {
508 	kmsan_enable_current();
509 	kasan_enable_current();
510 }
511 
512 #ifdef CONFIG_SLAB_OBJ_EXT
513 
514 /*
515  * slab_obj_exts - get the pointer to the slab object extension vector
516  * associated with a slab.
517  * @slab: a pointer to the slab struct
518  *
519  * Returns the address of the object extension vector associated with the slab,
520  * or zero if no such vector has been associated yet.
521  * Do not dereference the return value directly; use get/put_slab_obj_exts()
522  * pair and slab_obj_ext() to access individual elements.
523  *
524  * Example usage:
525  *
526  * obj_exts = slab_obj_exts(slab);
527  * if (obj_exts) {
528  *         get_slab_obj_exts(obj_exts);
529  *         obj_ext = slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, obj));
530  *         // do something with obj_ext
531  *         put_slab_obj_exts(obj_exts);
532  * }
533  *
534  * Note that the get/put semantics does not involve reference counting.
535  * Instead, it updates kasan/kmsan depth so that accesses to slabobj_ext
536  * won't be reported as access violations.
537  */
slab_obj_exts(struct slab * slab)538 static inline unsigned long slab_obj_exts(struct slab *slab)
539 {
540 	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
541 
542 #ifdef CONFIG_MEMCG
543 	/*
544 	 * obj_exts should be either NULL, a valid pointer with
545 	 * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
546 	 */
547 	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
548 		       obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
549 	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
550 #endif
551 
552 	return obj_exts & ~OBJEXTS_FLAGS_MASK;
553 }
554 
get_slab_obj_exts(unsigned long obj_exts)555 static inline void get_slab_obj_exts(unsigned long obj_exts)
556 {
557 	VM_WARN_ON_ONCE(!obj_exts);
558 	metadata_access_enable();
559 }
560 
put_slab_obj_exts(unsigned long obj_exts)561 static inline void put_slab_obj_exts(unsigned long obj_exts)
562 {
563 	metadata_access_disable();
564 }
565 
566 #ifdef CONFIG_64BIT
slab_set_stride(struct slab * slab,unsigned int stride)567 static inline void slab_set_stride(struct slab *slab, unsigned int stride)
568 {
569 	slab->stride = stride;
570 }
slab_get_stride(struct slab * slab)571 static inline unsigned int slab_get_stride(struct slab *slab)
572 {
573 	return slab->stride;
574 }
575 #else
slab_set_stride(struct slab * slab,unsigned int stride)576 static inline void slab_set_stride(struct slab *slab, unsigned int stride)
577 {
578 	VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext));
579 }
slab_get_stride(struct slab * slab)580 static inline unsigned int slab_get_stride(struct slab *slab)
581 {
582 	return sizeof(struct slabobj_ext);
583 }
584 #endif
585 
586 /*
587  * slab_obj_ext - get the pointer to the slab object extension metadata
588  * associated with an object in a slab.
589  * @slab: a pointer to the slab struct
590  * @obj_exts: a pointer to the object extension vector
591  * @index: an index of the object
592  *
593  * Returns a pointer to the object extension associated with the object.
594  * Must be called within a section covered by get/put_slab_obj_exts().
595  */
slab_obj_ext(struct slab * slab,unsigned long obj_exts,unsigned int index)596 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
597 					       unsigned long obj_exts,
598 					       unsigned int index)
599 {
600 	struct slabobj_ext *obj_ext;
601 
602 	VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
603 
604 	obj_ext = (struct slabobj_ext *)(obj_exts +
605 					 slab_get_stride(slab) * index);
606 	return kasan_reset_tag(obj_ext);
607 }
608 
609 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
610                         gfp_t gfp, bool new_slab);
611 
612 #else /* CONFIG_SLAB_OBJ_EXT */
613 
slab_obj_exts(struct slab * slab)614 static inline unsigned long slab_obj_exts(struct slab *slab)
615 {
616 	return 0;
617 }
618 
slab_obj_ext(struct slab * slab,unsigned long obj_exts,unsigned int index)619 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
620 					       unsigned long obj_exts,
621 					       unsigned int index)
622 {
623 	return NULL;
624 }
625 
slab_set_stride(struct slab * slab,unsigned int stride)626 static inline void slab_set_stride(struct slab *slab, unsigned int stride) { }
slab_get_stride(struct slab * slab)627 static inline unsigned int slab_get_stride(struct slab *slab) { return 0; }
628 
629 
630 #endif /* CONFIG_SLAB_OBJ_EXT */
631 
cache_vmstat_idx(struct kmem_cache * s)632 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
633 {
634 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
635 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
636 }
637 
638 #ifdef CONFIG_MEMCG
639 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
640 				  gfp_t flags, size_t size, void **p);
641 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
642 			    void **p, int objects, unsigned long obj_exts);
643 #endif
644 
645 void kvfree_rcu_cb(struct rcu_head *head);
646 
large_kmalloc_order(const struct page * page)647 static inline unsigned int large_kmalloc_order(const struct page *page)
648 {
649 	return page[1].flags.f & 0xff;
650 }
651 
large_kmalloc_size(const struct page * page)652 static inline size_t large_kmalloc_size(const struct page *page)
653 {
654 	return PAGE_SIZE << large_kmalloc_order(page);
655 }
656 
657 #ifdef CONFIG_SLUB_DEBUG
658 void dump_unreclaimable_slab(void);
659 #else
dump_unreclaimable_slab(void)660 static inline void dump_unreclaimable_slab(void)
661 {
662 }
663 #endif
664 
665 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
666 
667 #ifdef CONFIG_SLAB_FREELIST_RANDOM
668 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
669 			gfp_t gfp);
670 void cache_random_seq_destroy(struct kmem_cache *cachep);
671 #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)672 static inline int cache_random_seq_create(struct kmem_cache *cachep,
673 					unsigned int count, gfp_t gfp)
674 {
675 	return 0;
676 }
cache_random_seq_destroy(struct kmem_cache * cachep)677 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
678 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
679 
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)680 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
681 {
682 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
683 				&init_on_alloc)) {
684 		if (c->ctor)
685 			return false;
686 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
687 			return flags & __GFP_ZERO;
688 		return true;
689 	}
690 	return flags & __GFP_ZERO;
691 }
692 
slab_want_init_on_free(struct kmem_cache * c)693 static inline bool slab_want_init_on_free(struct kmem_cache *c)
694 {
695 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
696 				&init_on_free))
697 		return !(c->ctor ||
698 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
699 	return false;
700 }
701 
702 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
703 void debugfs_slab_release(struct kmem_cache *);
704 #else
debugfs_slab_release(struct kmem_cache * s)705 static inline void debugfs_slab_release(struct kmem_cache *s) { }
706 #endif
707 
708 #ifdef CONFIG_PRINTK
709 #define KS_ADDRS_COUNT 16
710 struct kmem_obj_info {
711 	void *kp_ptr;
712 	struct slab *kp_slab;
713 	void *kp_objp;
714 	unsigned long kp_data_offset;
715 	struct kmem_cache *kp_slab_cache;
716 	void *kp_ret;
717 	void *kp_stack[KS_ADDRS_COUNT];
718 	void *kp_free_stack[KS_ADDRS_COUNT];
719 };
720 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
721 #endif
722 
723 void __check_heap_object(const void *ptr, unsigned long n,
724 			 const struct slab *slab, bool to_user);
725 
726 void defer_free_barrier(void);
727 
slub_debug_orig_size(struct kmem_cache * s)728 static inline bool slub_debug_orig_size(struct kmem_cache *s)
729 {
730 	return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
731 			(s->flags & SLAB_KMALLOC));
732 }
733 
734 #ifdef CONFIG_SLUB_DEBUG
735 void skip_orig_size_check(struct kmem_cache *s, const void *object);
736 #endif
737 
738 #endif /* MM_SLAB_H */
739