1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
14
15 /*
16 * Internal slab definitions
17 */
18
19 #ifdef CONFIG_64BIT
20 # ifdef system_has_cmpxchg128
21 # define system_has_freelist_aba() system_has_cmpxchg128()
22 # define try_cmpxchg_freelist try_cmpxchg128
23 # endif
24 typedef u128 freelist_full_t;
25 #else /* CONFIG_64BIT */
26 # ifdef system_has_cmpxchg64
27 # define system_has_freelist_aba() system_has_cmpxchg64()
28 # define try_cmpxchg_freelist try_cmpxchg64
29 # endif
30 typedef u64 freelist_full_t;
31 #endif /* CONFIG_64BIT */
32
33 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
34 #undef system_has_freelist_aba
35 #endif
36
37 /*
38 * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
39 * problems with cmpxchg of just a pointer.
40 */
41 struct freelist_counters {
42 union {
43 struct {
44 void *freelist;
45 union {
46 unsigned long counters;
47 struct {
48 unsigned inuse:16;
49 unsigned objects:15;
50 /*
51 * If slab debugging is enabled then the
52 * frozen bit can be reused to indicate
53 * that the slab was corrupted
54 */
55 unsigned frozen:1;
56 #ifdef CONFIG_64BIT
57 /*
58 * Some optimizations use free bits in 'counters' field
59 * to save memory. In case ->stride field is not available,
60 * such optimizations are disabled.
61 */
62 unsigned int stride;
63 #endif
64 };
65 };
66 };
67 #ifdef system_has_freelist_aba
68 freelist_full_t freelist_counters;
69 #endif
70 };
71 };
72
73 /* Reuses the bits in struct page */
74 struct slab {
75 memdesc_flags_t flags;
76
77 struct kmem_cache *slab_cache;
78 union {
79 struct {
80 struct list_head slab_list;
81 /* Double-word boundary */
82 struct freelist_counters;
83 };
84 struct rcu_head rcu_head;
85 };
86
87 unsigned int __page_type;
88 atomic_t __page_refcount;
89 #ifdef CONFIG_SLAB_OBJ_EXT
90 unsigned long obj_exts;
91 #endif
92 };
93
94 #define SLAB_MATCH(pg, sl) \
95 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
96 SLAB_MATCH(flags, flags);
97 SLAB_MATCH(compound_info, slab_cache); /* Ensure bit 0 is clear */
98 SLAB_MATCH(_refcount, __page_refcount);
99 #ifdef CONFIG_MEMCG
100 SLAB_MATCH(memcg_data, obj_exts);
101 #elif defined(CONFIG_SLAB_OBJ_EXT)
102 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
103 #endif
104 #undef SLAB_MATCH
105 static_assert(sizeof(struct slab) <= sizeof(struct page));
106 #if defined(system_has_freelist_aba)
107 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
108 #endif
109
110 /**
111 * slab_folio - The folio allocated for a slab
112 * @s: The slab.
113 *
114 * Slabs are allocated as folios that contain the individual objects and are
115 * using some fields in the first struct page of the folio - those fields are
116 * now accessed by struct slab. It is occasionally necessary to convert back to
117 * a folio in order to communicate with the rest of the mm. Please use this
118 * helper function instead of casting yourself, as the implementation may change
119 * in the future.
120 */
121 #define slab_folio(s) (_Generic((s), \
122 const struct slab *: (const struct folio *)s, \
123 struct slab *: (struct folio *)s))
124
125 /**
126 * page_slab - Converts from struct page to its slab.
127 * @page: A page which may or may not belong to a slab.
128 *
129 * Return: The slab which contains this page or NULL if the page does
130 * not belong to a slab. This includes pages returned from large kmalloc.
131 */
page_slab(const struct page * page)132 static inline struct slab *page_slab(const struct page *page)
133 {
134 page = compound_head(page);
135 if (data_race(page->page_type >> 24) != PGTY_slab)
136 page = NULL;
137
138 return (struct slab *)page;
139 }
140
141 /**
142 * slab_page - The first struct page allocated for a slab
143 * @s: The slab.
144 *
145 * A convenience wrapper for converting slab to the first struct page of the
146 * underlying folio, to communicate with code not yet converted to folio or
147 * struct slab.
148 */
149 #define slab_page(s) folio_page(slab_folio(s), 0)
150
slab_address(const struct slab * slab)151 static inline void *slab_address(const struct slab *slab)
152 {
153 return folio_address(slab_folio(slab));
154 }
155
slab_nid(const struct slab * slab)156 static inline int slab_nid(const struct slab *slab)
157 {
158 return memdesc_nid(slab->flags);
159 }
160
slab_pgdat(const struct slab * slab)161 static inline pg_data_t *slab_pgdat(const struct slab *slab)
162 {
163 return NODE_DATA(slab_nid(slab));
164 }
165
virt_to_slab(const void * addr)166 static inline struct slab *virt_to_slab(const void *addr)
167 {
168 return page_slab(virt_to_page(addr));
169 }
170
slab_order(const struct slab * slab)171 static inline int slab_order(const struct slab *slab)
172 {
173 return folio_order(slab_folio(slab));
174 }
175
slab_size(const struct slab * slab)176 static inline size_t slab_size(const struct slab *slab)
177 {
178 return PAGE_SIZE << slab_order(slab);
179 }
180
181 /*
182 * Word size structure that can be atomically updated or read and that
183 * contains both the order and the number of objects that a slab of the
184 * given order would contain.
185 */
186 struct kmem_cache_order_objects {
187 unsigned int x;
188 };
189
190 struct kmem_cache_per_node_ptrs {
191 struct node_barn *barn;
192 struct kmem_cache_node *node;
193 };
194
195 /*
196 * Slab cache management.
197 */
198 struct kmem_cache {
199 struct slub_percpu_sheaves __percpu *cpu_sheaves;
200 /* Used for retrieving partial slabs, etc. */
201 slab_flags_t flags;
202 unsigned long min_partial;
203 unsigned int size; /* Object size including metadata */
204 unsigned int object_size; /* Object size without metadata */
205 struct reciprocal_value reciprocal_size;
206 unsigned int offset; /* Free pointer offset */
207 unsigned int sheaf_capacity;
208 struct kmem_cache_order_objects oo;
209
210 /* Allocation and freeing of slabs */
211 struct kmem_cache_order_objects min;
212 gfp_t allocflags; /* gfp flags to use on each alloc */
213 int refcount; /* Refcount for slab cache destroy */
214 void (*ctor)(void *object); /* Object constructor */
215 unsigned int inuse; /* Offset to metadata */
216 unsigned int align; /* Alignment */
217 unsigned int red_left_pad; /* Left redzone padding size */
218 const char *name; /* Name (only for display!) */
219 struct list_head list; /* List of slab caches */
220 #ifdef CONFIG_SYSFS
221 struct kobject kobj; /* For sysfs */
222 #endif
223 #ifdef CONFIG_SLAB_FREELIST_HARDENED
224 unsigned long random;
225 #endif
226
227 #ifdef CONFIG_NUMA
228 /*
229 * Defragmentation by allocating from a remote node.
230 */
231 unsigned int remote_node_defrag_ratio;
232 #endif
233
234 #ifdef CONFIG_SLAB_FREELIST_RANDOM
235 unsigned int *random_seq;
236 #endif
237
238 #ifdef CONFIG_KASAN_GENERIC
239 struct kasan_cache kasan_info;
240 #endif
241
242 #ifdef CONFIG_HARDENED_USERCOPY
243 unsigned int useroffset; /* Usercopy region offset */
244 unsigned int usersize; /* Usercopy region size */
245 #endif
246
247 #ifdef CONFIG_SLUB_STATS
248 struct kmem_cache_stats __percpu *cpu_stats;
249 #endif
250
251 struct kmem_cache_per_node_ptrs per_node[MAX_NUMNODES];
252 };
253
254 /*
255 * Every cache has !NULL s->cpu_sheaves but they may point to the
256 * bootstrap_sheaf temporarily during init, or permanently for the boot caches
257 * and caches with debugging enabled, or all caches with CONFIG_SLUB_TINY. This
258 * helper distinguishes whether cache has real non-bootstrap sheaves.
259 */
cache_has_sheaves(struct kmem_cache * s)260 static inline bool cache_has_sheaves(struct kmem_cache *s)
261 {
262 /* Test CONFIG_SLUB_TINY for code elimination purposes */
263 return !IS_ENABLED(CONFIG_SLUB_TINY) && s->sheaf_capacity;
264 }
265
266 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
267 #define SLAB_SUPPORTS_SYSFS 1
268 void sysfs_slab_unlink(struct kmem_cache *s);
269 void sysfs_slab_release(struct kmem_cache *s);
270 int sysfs_slab_alias(struct kmem_cache *s, const char *name);
271 #else
sysfs_slab_unlink(struct kmem_cache * s)272 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)273 static inline void sysfs_slab_release(struct kmem_cache *s) { }
sysfs_slab_alias(struct kmem_cache * s,const char * name)274 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *name)
275 { return 0; }
276 #endif
277
278 void *fixup_red_left(struct kmem_cache *s, void *p);
279
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)280 static inline void *nearest_obj(struct kmem_cache *cache,
281 const struct slab *slab, void *x)
282 {
283 void *object = x - (x - slab_address(slab)) % cache->size;
284 void *last_object = slab_address(slab) +
285 (slab->objects - 1) * cache->size;
286 void *result = (unlikely(object > last_object)) ? last_object : object;
287
288 result = fixup_red_left(cache, result);
289 return result;
290 }
291
292 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,const void * obj)293 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
294 void *addr, const void *obj)
295 {
296 return reciprocal_divide(kasan_reset_tag(obj) - addr,
297 cache->reciprocal_size);
298 }
299
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,const void * obj)300 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
301 const struct slab *slab, const void *obj)
302 {
303 if (is_kfence_address(obj))
304 return 0;
305 return __obj_to_index(cache, slab_address(slab), obj);
306 }
307
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)308 static inline int objs_per_slab(const struct kmem_cache *cache,
309 const struct slab *slab)
310 {
311 return slab->objects;
312 }
313
314 /*
315 * State of the slab allocator.
316 *
317 * This is used to describe the states of the allocator during bootup.
318 * Allocators use this to gradually bootstrap themselves. Most allocators
319 * have the problem that the structures used for managing slab caches are
320 * allocated from slab caches themselves.
321 */
322 enum slab_state {
323 DOWN, /* No slab functionality yet */
324 PARTIAL, /* SLUB: kmem_cache_node available */
325 UP, /* Slab caches usable but not all extras yet */
326 FULL /* Everything is working */
327 };
328
329 extern enum slab_state slab_state;
330
331 /* The slab cache mutex protects the management structures during changes */
332 extern struct mutex slab_mutex;
333
334 /* The list of all slab caches on the system */
335 extern struct list_head slab_caches;
336
337 /* The slab cache that manages slab cache information */
338 extern struct kmem_cache *kmem_cache;
339
340 /* A table of kmalloc cache names and sizes */
341 extern const struct kmalloc_info_struct {
342 const char *name[NR_KMALLOC_TYPES];
343 unsigned int size;
344 } kmalloc_info[];
345
346 /* Kmalloc array related functions */
347 void setup_kmalloc_cache_index_table(void);
348 void create_kmalloc_caches(void);
349
350 extern u8 kmalloc_size_index[24];
351
size_index_elem(unsigned int bytes)352 static inline unsigned int size_index_elem(unsigned int bytes)
353 {
354 return (bytes - 1) / 8;
355 }
356
357 /*
358 * Find the kmem_cache structure that serves a given size of
359 * allocation
360 *
361 * This assumes size is larger than zero and not larger than
362 * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
363 */
364 static inline struct kmem_cache *
kmalloc_slab(size_t size,kmem_buckets * b,gfp_t flags,unsigned long caller)365 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
366 {
367 unsigned int index;
368
369 if (!b)
370 b = &kmalloc_caches[kmalloc_type(flags, caller)];
371 if (size <= 192)
372 index = kmalloc_size_index[size_index_elem(size)];
373 else
374 index = fls(size - 1);
375
376 return (*b)[index];
377 }
378
379 gfp_t kmalloc_fix_flags(gfp_t flags);
380
381 /* Functions provided by the slab allocators */
382 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
383 unsigned int size, struct kmem_cache_args *args,
384 slab_flags_t flags);
385
386 void __init kmem_cache_init(void);
387 extern void create_boot_cache(struct kmem_cache *, const char *name,
388 unsigned int size, slab_flags_t flags,
389 unsigned int useroffset, unsigned int usersize);
390
391 int slab_unmergeable(struct kmem_cache *s);
392 bool slab_args_unmergeable(struct kmem_cache_args *args, slab_flags_t flags);
393
394 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
395
is_kmalloc_cache(struct kmem_cache * s)396 static inline bool is_kmalloc_cache(struct kmem_cache *s)
397 {
398 return (s->flags & SLAB_KMALLOC);
399 }
400
is_kmalloc_normal(struct kmem_cache * s)401 static inline bool is_kmalloc_normal(struct kmem_cache *s)
402 {
403 if (!is_kmalloc_cache(s))
404 return false;
405 return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
406 }
407
408 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
409 void flush_all_rcu_sheaves(void);
410 void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
411
412 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
413 SLAB_CACHE_DMA32 | SLAB_PANIC | \
414 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
415 SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
416 SLAB_TEMPORARY | SLAB_ACCOUNT | \
417 SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
418
419 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
420 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
421
422 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
423
424 bool __kmem_cache_empty(struct kmem_cache *);
425 int __kmem_cache_shutdown(struct kmem_cache *);
426 void __kmem_cache_release(struct kmem_cache *);
427 int __kmem_cache_shrink(struct kmem_cache *);
428 void slab_kmem_cache_release(struct kmem_cache *);
429
430 struct seq_file;
431 struct file;
432
433 struct slabinfo {
434 unsigned long active_objs;
435 unsigned long num_objs;
436 unsigned long active_slabs;
437 unsigned long num_slabs;
438 unsigned long shared_avail;
439 unsigned int limit;
440 unsigned int batchcount;
441 unsigned int shared;
442 unsigned int objects_per_slab;
443 unsigned int cache_order;
444 };
445
446 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
447
448 #ifdef CONFIG_SLUB_DEBUG
449 #ifdef CONFIG_SLUB_DEBUG_ON
450 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
451 #else
452 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
453 #endif
454 extern void print_tracking(struct kmem_cache *s, void *object);
455 long validate_slab_cache(struct kmem_cache *s);
__slub_debug_enabled(void)456 static inline bool __slub_debug_enabled(void)
457 {
458 return static_branch_unlikely(&slub_debug_enabled);
459 }
460 #else
print_tracking(struct kmem_cache * s,void * object)461 static inline void print_tracking(struct kmem_cache *s, void *object)
462 {
463 }
__slub_debug_enabled(void)464 static inline bool __slub_debug_enabled(void)
465 {
466 return false;
467 }
468 #endif
469
470 /*
471 * Returns true if any of the specified slab_debug flags is enabled for the
472 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
473 * the static key.
474 */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)475 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
476 {
477 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
478 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
479 if (__slub_debug_enabled())
480 return s->flags & flags;
481 return false;
482 }
483
484 #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
485 bool slab_in_kunit_test(void);
486 #else
slab_in_kunit_test(void)487 static inline bool slab_in_kunit_test(void) { return false; }
488 #endif
489
490 /*
491 * slub is about to manipulate internal object metadata. This memory lies
492 * outside the range of the allocated object, so accessing it would normally
493 * be reported by kasan as a bounds error. metadata_access_enable() is used
494 * to tell kasan that these accesses are OK.
495 */
metadata_access_enable(void)496 static inline void metadata_access_enable(void)
497 {
498 kasan_disable_current();
499 kmsan_disable_current();
500 }
501
metadata_access_disable(void)502 static inline void metadata_access_disable(void)
503 {
504 kmsan_enable_current();
505 kasan_enable_current();
506 }
507
508 #ifdef CONFIG_SLAB_OBJ_EXT
509
510 /*
511 * slab_obj_exts - get the pointer to the slab object extension vector
512 * associated with a slab.
513 * @slab: a pointer to the slab struct
514 *
515 * Returns the address of the object extension vector associated with the slab,
516 * or zero if no such vector has been associated yet.
517 * Do not dereference the return value directly; use get/put_slab_obj_exts()
518 * pair and slab_obj_ext() to access individual elements.
519 *
520 * Example usage:
521 *
522 * obj_exts = slab_obj_exts(slab);
523 * if (obj_exts) {
524 * get_slab_obj_exts(obj_exts);
525 * obj_ext = slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, obj));
526 * // do something with obj_ext
527 * put_slab_obj_exts(obj_exts);
528 * }
529 *
530 * Note that the get/put semantics does not involve reference counting.
531 * Instead, it updates kasan/kmsan depth so that accesses to slabobj_ext
532 * won't be reported as access violations.
533 */
slab_obj_exts(struct slab * slab)534 static inline unsigned long slab_obj_exts(struct slab *slab)
535 {
536 unsigned long obj_exts = READ_ONCE(slab->obj_exts);
537
538 #ifdef CONFIG_MEMCG
539 /*
540 * obj_exts should be either NULL, a valid pointer with
541 * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
542 */
543 VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
544 obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
545 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
546 #endif
547
548 return obj_exts & ~OBJEXTS_FLAGS_MASK;
549 }
550
get_slab_obj_exts(unsigned long obj_exts)551 static inline void get_slab_obj_exts(unsigned long obj_exts)
552 {
553 VM_WARN_ON_ONCE(!obj_exts);
554 metadata_access_enable();
555 }
556
put_slab_obj_exts(unsigned long obj_exts)557 static inline void put_slab_obj_exts(unsigned long obj_exts)
558 {
559 metadata_access_disable();
560 }
561
562 #ifdef CONFIG_64BIT
slab_set_stride(struct slab * slab,unsigned int stride)563 static inline void slab_set_stride(struct slab *slab, unsigned int stride)
564 {
565 slab->stride = stride;
566 }
slab_get_stride(struct slab * slab)567 static inline unsigned int slab_get_stride(struct slab *slab)
568 {
569 return slab->stride;
570 }
571 #else
slab_set_stride(struct slab * slab,unsigned int stride)572 static inline void slab_set_stride(struct slab *slab, unsigned int stride)
573 {
574 VM_WARN_ON_ONCE(stride != sizeof(struct slabobj_ext));
575 }
slab_get_stride(struct slab * slab)576 static inline unsigned int slab_get_stride(struct slab *slab)
577 {
578 return sizeof(struct slabobj_ext);
579 }
580 #endif
581
582 /*
583 * slab_obj_ext - get the pointer to the slab object extension metadata
584 * associated with an object in a slab.
585 * @slab: a pointer to the slab struct
586 * @obj_exts: a pointer to the object extension vector
587 * @index: an index of the object
588 *
589 * Returns a pointer to the object extension associated with the object.
590 * Must be called within a section covered by get/put_slab_obj_exts().
591 */
slab_obj_ext(struct slab * slab,unsigned long obj_exts,unsigned int index)592 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
593 unsigned long obj_exts,
594 unsigned int index)
595 {
596 struct slabobj_ext *obj_ext;
597
598 VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
599
600 obj_ext = (struct slabobj_ext *)(obj_exts +
601 slab_get_stride(slab) * index);
602 return kasan_reset_tag(obj_ext);
603 }
604
605 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
606 gfp_t gfp, bool new_slab);
607
608 #else /* CONFIG_SLAB_OBJ_EXT */
609
slab_obj_exts(struct slab * slab)610 static inline unsigned long slab_obj_exts(struct slab *slab)
611 {
612 return 0;
613 }
614
slab_obj_ext(struct slab * slab,unsigned long obj_exts,unsigned int index)615 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
616 unsigned long obj_exts,
617 unsigned int index)
618 {
619 return NULL;
620 }
621
slab_set_stride(struct slab * slab,unsigned int stride)622 static inline void slab_set_stride(struct slab *slab, unsigned int stride) { }
slab_get_stride(struct slab * slab)623 static inline unsigned int slab_get_stride(struct slab *slab) { return 0; }
624
625
626 #endif /* CONFIG_SLAB_OBJ_EXT */
627
cache_vmstat_idx(struct kmem_cache * s)628 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
629 {
630 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
631 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
632 }
633
634 #ifdef CONFIG_MEMCG
635 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
636 gfp_t flags, size_t size, void **p);
637 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
638 void **p, int objects, unsigned long obj_exts);
639 #endif
640
641 void kvfree_rcu_cb(struct rcu_head *head);
642
large_kmalloc_order(const struct page * page)643 static inline unsigned int large_kmalloc_order(const struct page *page)
644 {
645 return page[1].flags.f & 0xff;
646 }
647
large_kmalloc_size(const struct page * page)648 static inline size_t large_kmalloc_size(const struct page *page)
649 {
650 return PAGE_SIZE << large_kmalloc_order(page);
651 }
652
653 #ifdef CONFIG_SLUB_DEBUG
654 void dump_unreclaimable_slab(void);
655 #else
dump_unreclaimable_slab(void)656 static inline void dump_unreclaimable_slab(void)
657 {
658 }
659 #endif
660
661 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
662
663 #ifdef CONFIG_SLAB_FREELIST_RANDOM
664 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
665 gfp_t gfp);
666 void cache_random_seq_destroy(struct kmem_cache *cachep);
667 #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)668 static inline int cache_random_seq_create(struct kmem_cache *cachep,
669 unsigned int count, gfp_t gfp)
670 {
671 return 0;
672 }
cache_random_seq_destroy(struct kmem_cache * cachep)673 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
674 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
675
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)676 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
677 {
678 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
679 &init_on_alloc)) {
680 if (c->ctor)
681 return false;
682 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
683 return flags & __GFP_ZERO;
684 return true;
685 }
686 return flags & __GFP_ZERO;
687 }
688
slab_want_init_on_free(struct kmem_cache * c)689 static inline bool slab_want_init_on_free(struct kmem_cache *c)
690 {
691 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
692 &init_on_free))
693 return !(c->ctor ||
694 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
695 return false;
696 }
697
698 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
699 void debugfs_slab_release(struct kmem_cache *);
700 #else
debugfs_slab_release(struct kmem_cache * s)701 static inline void debugfs_slab_release(struct kmem_cache *s) { }
702 #endif
703
704 #ifdef CONFIG_PRINTK
705 #define KS_ADDRS_COUNT 16
706 struct kmem_obj_info {
707 void *kp_ptr;
708 struct slab *kp_slab;
709 void *kp_objp;
710 unsigned long kp_data_offset;
711 struct kmem_cache *kp_slab_cache;
712 void *kp_ret;
713 void *kp_stack[KS_ADDRS_COUNT];
714 void *kp_free_stack[KS_ADDRS_COUNT];
715 };
716 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
717 #endif
718
719 void __check_heap_object(const void *ptr, unsigned long n,
720 const struct slab *slab, bool to_user);
721
722 void defer_free_barrier(void);
723
slub_debug_orig_size(struct kmem_cache * s)724 static inline bool slub_debug_orig_size(struct kmem_cache *s)
725 {
726 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) &&
727 (s->flags & SLAB_KMALLOC));
728 }
729
730 #ifdef CONFIG_SLUB_DEBUG
731 void skip_orig_size_check(struct kmem_cache *s, const void *object);
732 #endif
733
734 #endif /* MM_SLAB_H */
735