1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
14
15 /*
16 * Internal slab definitions
17 */
18
19 #ifdef CONFIG_64BIT
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba() system_has_cmpxchg128()
22 # define try_cmpxchg_freelist try_cmpxchg128
23 # endif
24 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128
25 typedef u128 freelist_full_t;
26 #else /* CONFIG_64BIT */
27 # ifdef system_has_cmpxchg64
28 # define system_has_freelist_aba() system_has_cmpxchg64()
29 # define try_cmpxchg_freelist try_cmpxchg64
30 # endif
31 #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64
32 typedef u64 freelist_full_t;
33 #endif /* CONFIG_64BIT */
34
35 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
36 #undef system_has_freelist_aba
37 #endif
38
39 /*
40 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
41 * problems with cmpxchg of just a pointer.
42 */
43 typedef union {
44 struct {
45 void *freelist;
46 unsigned long counter;
47 };
48 freelist_full_t full;
49 } freelist_aba_t;
50
51 /* Reuses the bits in struct page */
52 struct slab {
53 unsigned long __page_flags;
54
55 struct kmem_cache *slab_cache;
56 union {
57 struct {
58 union {
59 struct list_head slab_list;
60 #ifdef CONFIG_SLUB_CPU_PARTIAL
61 struct {
62 struct slab *next;
63 int slabs; /* Nr of slabs left */
64 };
65 #endif
66 };
67 /* Double-word boundary */
68 union {
69 struct {
70 void *freelist; /* first free object */
71 union {
72 unsigned long counters;
73 struct {
74 unsigned inuse:16;
75 unsigned objects:15;
76 unsigned frozen:1;
77 };
78 };
79 };
80 #ifdef system_has_freelist_aba
81 freelist_aba_t freelist_counter;
82 #endif
83 };
84 };
85 struct rcu_head rcu_head;
86 };
87 unsigned int __unused;
88
89 atomic_t __page_refcount;
90 #ifdef CONFIG_MEMCG
91 unsigned long memcg_data;
92 #endif
93 };
94
95 #define SLAB_MATCH(pg, sl) \
96 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
97 SLAB_MATCH(flags, __page_flags);
98 SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
99 SLAB_MATCH(_refcount, __page_refcount);
100 #ifdef CONFIG_MEMCG
101 SLAB_MATCH(memcg_data, memcg_data);
102 #endif
103 #undef SLAB_MATCH
104 static_assert(sizeof(struct slab) <= sizeof(struct page));
105 #if defined(system_has_freelist_aba)
106 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
107 #endif
108
109 /**
110 * folio_slab - Converts from folio to slab.
111 * @folio: The folio.
112 *
113 * Currently struct slab is a different representation of a folio where
114 * folio_test_slab() is true.
115 *
116 * Return: The slab which contains this folio.
117 */
118 #define folio_slab(folio) (_Generic((folio), \
119 const struct folio *: (const struct slab *)(folio), \
120 struct folio *: (struct slab *)(folio)))
121
122 /**
123 * slab_folio - The folio allocated for a slab
124 * @slab: The slab.
125 *
126 * Slabs are allocated as folios that contain the individual objects and are
127 * using some fields in the first struct page of the folio - those fields are
128 * now accessed by struct slab. It is occasionally necessary to convert back to
129 * a folio in order to communicate with the rest of the mm. Please use this
130 * helper function instead of casting yourself, as the implementation may change
131 * in the future.
132 */
133 #define slab_folio(s) (_Generic((s), \
134 const struct slab *: (const struct folio *)s, \
135 struct slab *: (struct folio *)s))
136
137 /**
138 * page_slab - Converts from first struct page to slab.
139 * @p: The first (either head of compound or single) page of slab.
140 *
141 * A temporary wrapper to convert struct page to struct slab in situations where
142 * we know the page is the compound head, or single order-0 page.
143 *
144 * Long-term ideally everything would work with struct slab directly or go
145 * through folio to struct slab.
146 *
147 * Return: The slab which contains this page
148 */
149 #define page_slab(p) (_Generic((p), \
150 const struct page *: (const struct slab *)(p), \
151 struct page *: (struct slab *)(p)))
152
153 /**
154 * slab_page - The first struct page allocated for a slab
155 * @slab: The slab.
156 *
157 * A convenience wrapper for converting slab to the first struct page of the
158 * underlying folio, to communicate with code not yet converted to folio or
159 * struct slab.
160 */
161 #define slab_page(s) folio_page(slab_folio(s), 0)
162
163 /*
164 * If network-based swap is enabled, sl*b must keep track of whether pages
165 * were allocated from pfmemalloc reserves.
166 */
slab_test_pfmemalloc(const struct slab * slab)167 static inline bool slab_test_pfmemalloc(const struct slab *slab)
168 {
169 return folio_test_active((struct folio *)slab_folio(slab));
170 }
171
slab_set_pfmemalloc(struct slab * slab)172 static inline void slab_set_pfmemalloc(struct slab *slab)
173 {
174 folio_set_active(slab_folio(slab));
175 }
176
slab_clear_pfmemalloc(struct slab * slab)177 static inline void slab_clear_pfmemalloc(struct slab *slab)
178 {
179 folio_clear_active(slab_folio(slab));
180 }
181
__slab_clear_pfmemalloc(struct slab * slab)182 static inline void __slab_clear_pfmemalloc(struct slab *slab)
183 {
184 __folio_clear_active(slab_folio(slab));
185 }
186
slab_address(const struct slab * slab)187 static inline void *slab_address(const struct slab *slab)
188 {
189 return folio_address(slab_folio(slab));
190 }
191
slab_nid(const struct slab * slab)192 static inline int slab_nid(const struct slab *slab)
193 {
194 return folio_nid(slab_folio(slab));
195 }
196
slab_pgdat(const struct slab * slab)197 static inline pg_data_t *slab_pgdat(const struct slab *slab)
198 {
199 return folio_pgdat(slab_folio(slab));
200 }
201
virt_to_slab(const void * addr)202 static inline struct slab *virt_to_slab(const void *addr)
203 {
204 struct folio *folio = virt_to_folio(addr);
205
206 if (!folio_test_slab(folio))
207 return NULL;
208
209 return folio_slab(folio);
210 }
211
slab_order(const struct slab * slab)212 static inline int slab_order(const struct slab *slab)
213 {
214 return folio_order((struct folio *)slab_folio(slab));
215 }
216
slab_size(const struct slab * slab)217 static inline size_t slab_size(const struct slab *slab)
218 {
219 return PAGE_SIZE << slab_order(slab);
220 }
221
222 #ifdef CONFIG_SLUB_CPU_PARTIAL
223 #define slub_percpu_partial(c) ((c)->partial)
224
225 #define slub_set_percpu_partial(c, p) \
226 ({ \
227 slub_percpu_partial(c) = (p)->next; \
228 })
229
230 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
231 #else
232 #define slub_percpu_partial(c) NULL
233
234 #define slub_set_percpu_partial(c, p)
235
236 #define slub_percpu_partial_read_once(c) NULL
237 #endif // CONFIG_SLUB_CPU_PARTIAL
238
239 /*
240 * Word size structure that can be atomically updated or read and that
241 * contains both the order and the number of objects that a slab of the
242 * given order would contain.
243 */
244 struct kmem_cache_order_objects {
245 unsigned int x;
246 };
247
248 /*
249 * Slab cache management.
250 */
251 struct kmem_cache {
252 #ifndef CONFIG_SLUB_TINY
253 struct kmem_cache_cpu __percpu *cpu_slab;
254 #endif
255 /* Used for retrieving partial slabs, etc. */
256 slab_flags_t flags;
257 unsigned long min_partial;
258 unsigned int size; /* Object size including metadata */
259 unsigned int object_size; /* Object size without metadata */
260 struct reciprocal_value reciprocal_size;
261 unsigned int offset; /* Free pointer offset */
262 #ifdef CONFIG_SLUB_CPU_PARTIAL
263 /* Number of per cpu partial objects to keep around */
264 unsigned int cpu_partial;
265 /* Number of per cpu partial slabs to keep around */
266 unsigned int cpu_partial_slabs;
267 #endif
268 struct kmem_cache_order_objects oo;
269
270 /* Allocation and freeing of slabs */
271 struct kmem_cache_order_objects min;
272 gfp_t allocflags; /* gfp flags to use on each alloc */
273 int refcount; /* Refcount for slab cache destroy */
274 void (*ctor)(void *object); /* Object constructor */
275 unsigned int inuse; /* Offset to metadata */
276 unsigned int align; /* Alignment */
277 unsigned int red_left_pad; /* Left redzone padding size */
278 const char *name; /* Name (only for display!) */
279 struct list_head list; /* List of slab caches */
280 #ifdef CONFIG_SYSFS
281 struct kobject kobj; /* For sysfs */
282 #endif
283 #ifdef CONFIG_SLAB_FREELIST_HARDENED
284 unsigned long random;
285 #endif
286
287 #ifdef CONFIG_NUMA
288 /*
289 * Defragmentation by allocating from a remote node.
290 */
291 unsigned int remote_node_defrag_ratio;
292 #endif
293
294 #ifdef CONFIG_SLAB_FREELIST_RANDOM
295 unsigned int *random_seq;
296 #endif
297
298 #ifdef CONFIG_KASAN_GENERIC
299 struct kasan_cache kasan_info;
300 #endif
301
302 #ifdef CONFIG_HARDENED_USERCOPY
303 unsigned int useroffset; /* Usercopy region offset */
304 unsigned int usersize; /* Usercopy region size */
305 #endif
306
307 struct kmem_cache_node *node[MAX_NUMNODES];
308 };
309
310 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
311 #define SLAB_SUPPORTS_SYSFS
312 void sysfs_slab_unlink(struct kmem_cache *s);
313 void sysfs_slab_release(struct kmem_cache *s);
314 #else
sysfs_slab_unlink(struct kmem_cache * s)315 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)316 static inline void sysfs_slab_release(struct kmem_cache *s) { }
317 #endif
318
319 void *fixup_red_left(struct kmem_cache *s, void *p);
320
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)321 static inline void *nearest_obj(struct kmem_cache *cache,
322 const struct slab *slab, void *x)
323 {
324 void *object = x - (x - slab_address(slab)) % cache->size;
325 void *last_object = slab_address(slab) +
326 (slab->objects - 1) * cache->size;
327 void *result = (unlikely(object > last_object)) ? last_object : object;
328
329 result = fixup_red_left(cache, result);
330 return result;
331 }
332
333 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)334 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
335 void *addr, void *obj)
336 {
337 return reciprocal_divide(kasan_reset_tag(obj) - addr,
338 cache->reciprocal_size);
339 }
340
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)341 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
342 const struct slab *slab, void *obj)
343 {
344 if (is_kfence_address(obj))
345 return 0;
346 return __obj_to_index(cache, slab_address(slab), obj);
347 }
348
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)349 static inline int objs_per_slab(const struct kmem_cache *cache,
350 const struct slab *slab)
351 {
352 return slab->objects;
353 }
354
355 /*
356 * State of the slab allocator.
357 *
358 * This is used to describe the states of the allocator during bootup.
359 * Allocators use this to gradually bootstrap themselves. Most allocators
360 * have the problem that the structures used for managing slab caches are
361 * allocated from slab caches themselves.
362 */
363 enum slab_state {
364 DOWN, /* No slab functionality yet */
365 PARTIAL, /* SLUB: kmem_cache_node available */
366 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
367 UP, /* Slab caches usable but not all extras yet */
368 FULL /* Everything is working */
369 };
370
371 extern enum slab_state slab_state;
372
373 /* The slab cache mutex protects the management structures during changes */
374 extern struct mutex slab_mutex;
375
376 /* The list of all slab caches on the system */
377 extern struct list_head slab_caches;
378
379 /* The slab cache that manages slab cache information */
380 extern struct kmem_cache *kmem_cache;
381
382 /* A table of kmalloc cache names and sizes */
383 extern const struct kmalloc_info_struct {
384 const char *name[NR_KMALLOC_TYPES];
385 unsigned int size;
386 } kmalloc_info[];
387
388 /* Kmalloc array related functions */
389 void setup_kmalloc_cache_index_table(void);
390 void create_kmalloc_caches(slab_flags_t);
391
392 extern u8 kmalloc_size_index[24];
393
size_index_elem(unsigned int bytes)394 static inline unsigned int size_index_elem(unsigned int bytes)
395 {
396 return (bytes - 1) / 8;
397 }
398
399 /*
400 * Find the kmem_cache structure that serves a given size of
401 * allocation
402 *
403 * This assumes size is larger than zero and not larger than
404 * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
405 */
406 static inline struct kmem_cache *
kmalloc_slab(size_t size,gfp_t flags,unsigned long caller)407 kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
408 {
409 unsigned int index;
410
411 if (size <= 192)
412 index = kmalloc_size_index[size_index_elem(size)];
413 else
414 index = fls(size - 1);
415
416 return kmalloc_caches[kmalloc_type(flags, caller)][index];
417 }
418
419 gfp_t kmalloc_fix_flags(gfp_t flags);
420
421 /* Functions provided by the slab allocators */
422 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
423
424 void __init kmem_cache_init(void);
425 void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type,
426 slab_flags_t flags);
427 extern void create_boot_cache(struct kmem_cache *, const char *name,
428 unsigned int size, slab_flags_t flags,
429 unsigned int useroffset, unsigned int usersize);
430
431 int slab_unmergeable(struct kmem_cache *s);
432 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
433 slab_flags_t flags, const char *name, void (*ctor)(void *));
434 struct kmem_cache *
435 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
436 slab_flags_t flags, void (*ctor)(void *));
437
438 slab_flags_t kmem_cache_flags(unsigned int object_size,
439 slab_flags_t flags, const char *name);
440
is_kmalloc_cache(struct kmem_cache * s)441 static inline bool is_kmalloc_cache(struct kmem_cache *s)
442 {
443 return (s->flags & SLAB_KMALLOC);
444 }
445
446 /* Legal flag mask for kmem_cache_create(), for various configurations */
447 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
448 SLAB_CACHE_DMA32 | SLAB_PANIC | \
449 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
450
451 #ifdef CONFIG_SLUB_DEBUG
452 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
453 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
454 #else
455 #define SLAB_DEBUG_FLAGS (0)
456 #endif
457
458 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
459 SLAB_TEMPORARY | SLAB_ACCOUNT | \
460 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
461
462 /* Common flags available with current configuration */
463 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
464
465 /* Common flags permitted for kmem_cache_create */
466 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
467 SLAB_RED_ZONE | \
468 SLAB_POISON | \
469 SLAB_STORE_USER | \
470 SLAB_TRACE | \
471 SLAB_CONSISTENCY_CHECKS | \
472 SLAB_MEM_SPREAD | \
473 SLAB_NOLEAKTRACE | \
474 SLAB_RECLAIM_ACCOUNT | \
475 SLAB_TEMPORARY | \
476 SLAB_ACCOUNT | \
477 SLAB_KMALLOC | \
478 SLAB_NO_MERGE | \
479 SLAB_NO_USER_FLAGS)
480
481 bool __kmem_cache_empty(struct kmem_cache *);
482 int __kmem_cache_shutdown(struct kmem_cache *);
483 void __kmem_cache_release(struct kmem_cache *);
484 int __kmem_cache_shrink(struct kmem_cache *);
485 void slab_kmem_cache_release(struct kmem_cache *);
486
487 struct seq_file;
488 struct file;
489
490 struct slabinfo {
491 unsigned long active_objs;
492 unsigned long num_objs;
493 unsigned long active_slabs;
494 unsigned long num_slabs;
495 unsigned long shared_avail;
496 unsigned int limit;
497 unsigned int batchcount;
498 unsigned int shared;
499 unsigned int objects_per_slab;
500 unsigned int cache_order;
501 };
502
503 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
504 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
505 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
506 size_t count, loff_t *ppos);
507
508 #ifdef CONFIG_SLUB_DEBUG
509 #ifdef CONFIG_SLUB_DEBUG_ON
510 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
511 #else
512 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
513 #endif
514 extern void print_tracking(struct kmem_cache *s, void *object);
515 long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)516 static inline bool __slub_debug_enabled(void)
517 {
518 return static_branch_unlikely(&slub_debug_enabled);
519 }
520 #else
print_tracking(struct kmem_cache * s,void * object)521 static inline void print_tracking(struct kmem_cache *s, void *object)
522 {
523 }
__slub_debug_enabled(void)524 static inline bool __slub_debug_enabled(void)
525 {
526 return false;
527 }
528 #endif
529
530 /*
531 * Returns true if any of the specified slub_debug flags is enabled for the
532 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
533 * the static key.
534 */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)535 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
536 {
537 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
538 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
539 if (__slub_debug_enabled())
540 return s->flags & flags;
541 return false;
542 }
543
544 #ifdef CONFIG_MEMCG_KMEM
545 /*
546 * slab_objcgs - get the object cgroups vector associated with a slab
547 * @slab: a pointer to the slab struct
548 *
549 * Returns a pointer to the object cgroups vector associated with the slab,
550 * or NULL if no such vector has been associated yet.
551 */
slab_objcgs(struct slab * slab)552 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
553 {
554 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
555
556 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
557 slab_page(slab));
558 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
559
560 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
561 }
562
563 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
564 gfp_t gfp, bool new_slab);
565 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
566 enum node_stat_item idx, int nr);
567 #else /* CONFIG_MEMCG_KMEM */
slab_objcgs(struct slab * slab)568 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
569 {
570 return NULL;
571 }
572
memcg_alloc_slab_cgroups(struct slab * slab,struct kmem_cache * s,gfp_t gfp,bool new_slab)573 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
574 struct kmem_cache *s, gfp_t gfp,
575 bool new_slab)
576 {
577 return 0;
578 }
579 #endif /* CONFIG_MEMCG_KMEM */
580
581 size_t __ksize(const void *objp);
582
slab_ksize(const struct kmem_cache * s)583 static inline size_t slab_ksize(const struct kmem_cache *s)
584 {
585 #ifdef CONFIG_SLUB_DEBUG
586 /*
587 * Debugging requires use of the padding between object
588 * and whatever may come after it.
589 */
590 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
591 return s->object_size;
592 #endif
593 if (s->flags & SLAB_KASAN)
594 return s->object_size;
595 /*
596 * If we have the need to store the freelist pointer
597 * back there or track user information then we can
598 * only use the space before that information.
599 */
600 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
601 return s->inuse;
602 /*
603 * Else we can use all the padding etc for the allocation
604 */
605 return s->size;
606 }
607
608 #ifdef CONFIG_SLUB_DEBUG
609 void dump_unreclaimable_slab(void);
610 #else
dump_unreclaimable_slab(void)611 static inline void dump_unreclaimable_slab(void)
612 {
613 }
614 #endif
615
616 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
617
618 #ifdef CONFIG_SLAB_FREELIST_RANDOM
619 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
620 gfp_t gfp);
621 void cache_random_seq_destroy(struct kmem_cache *cachep);
622 #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)623 static inline int cache_random_seq_create(struct kmem_cache *cachep,
624 unsigned int count, gfp_t gfp)
625 {
626 return 0;
627 }
cache_random_seq_destroy(struct kmem_cache * cachep)628 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
629 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
630
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)631 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
632 {
633 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
634 &init_on_alloc)) {
635 if (c->ctor)
636 return false;
637 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
638 return flags & __GFP_ZERO;
639 return true;
640 }
641 return flags & __GFP_ZERO;
642 }
643
slab_want_init_on_free(struct kmem_cache * c)644 static inline bool slab_want_init_on_free(struct kmem_cache *c)
645 {
646 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
647 &init_on_free))
648 return !(c->ctor ||
649 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
650 return false;
651 }
652
653 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
654 void debugfs_slab_release(struct kmem_cache *);
655 #else
debugfs_slab_release(struct kmem_cache * s)656 static inline void debugfs_slab_release(struct kmem_cache *s) { }
657 #endif
658
659 #ifdef CONFIG_PRINTK
660 #define KS_ADDRS_COUNT 16
661 struct kmem_obj_info {
662 void *kp_ptr;
663 struct slab *kp_slab;
664 void *kp_objp;
665 unsigned long kp_data_offset;
666 struct kmem_cache *kp_slab_cache;
667 void *kp_ret;
668 void *kp_stack[KS_ADDRS_COUNT];
669 void *kp_free_stack[KS_ADDRS_COUNT];
670 };
671 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
672 #endif
673
674 void __check_heap_object(const void *ptr, unsigned long n,
675 const struct slab *slab, bool to_user);
676
677 #ifdef CONFIG_SLUB_DEBUG
678 void skip_orig_size_check(struct kmem_cache *s, const void *object);
679 #endif
680
681 #endif /* MM_SLAB_H */
682