1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/cache.h>
16 #include <linux/gfp.h>
17 #include <linux/overflow.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/cleanup.h>
22 #include <linux/hash.h>
23 
24 
25 /*
26  * Flags to pass to kmem_cache_create().
27  * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
28  */
29 /* DEBUG: Perform (expensive) checks on alloc/free */
30 #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
31 /* DEBUG: Red zone objs in a cache */
32 #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
33 /* DEBUG: Poison objects */
34 #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
35 /* Indicate a kmalloc slab */
36 #define SLAB_KMALLOC		((slab_flags_t __force)0x00001000U)
37 /* Align objs on cache lines */
38 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
39 /* Use GFP_DMA memory */
40 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
41 /* Use GFP_DMA32 memory */
42 #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
43 /* DEBUG: Store the last owner for bug hunting */
44 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
45 /* Panic if kmem_cache_create() fails */
46 #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
47 /*
48  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
49  *
50  * This delays freeing the SLAB page by a grace period, it does _NOT_
51  * delay object freeing. This means that if you do kmem_cache_free()
52  * that memory location is free to be reused at any time. Thus it may
53  * be possible to see another object there in the same RCU grace period.
54  *
55  * This feature only ensures the memory location backing the object
56  * stays valid, the trick to using this is relying on an independent
57  * object validation pass. Something like:
58  *
59  * begin:
60  *  rcu_read_lock();
61  *  obj = lockless_lookup(key);
62  *  if (obj) {
63  *    if (!try_get_ref(obj)) // might fail for free objects
64  *      rcu_read_unlock();
65  *      goto begin;
66  *
67  *    if (obj->key != key) { // not the object we expected
68  *      put_ref(obj);
69  *      rcu_read_unlock();
70  *      goto begin;
71  *    }
72  *  }
73  *  rcu_read_unlock();
74  *
75  * This is useful if we need to approach a kernel structure obliquely,
76  * from its address obtained without the usual locking. We can lock
77  * the structure to stabilize it and check it's still at the given address,
78  * only if we can be sure that the memory has not been meanwhile reused
79  * for some other kind of object (which our subsystem's lock might corrupt).
80  *
81  * rcu_read_lock before reading the address, then rcu_read_unlock after
82  * taking the spinlock within the structure expected at that address.
83  *
84  * Note that it is not possible to acquire a lock within a structure
85  * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
86  * as described above.  The reason is that SLAB_TYPESAFE_BY_RCU pages
87  * are not zeroed before being given to the slab, which means that any
88  * locks must be initialized after each and every kmem_struct_alloc().
89  * Alternatively, make the ctor passed to kmem_cache_create() initialize
90  * the locks at page-allocation time, as is done in __i915_request_ctor(),
91  * sighand_ctor(), and anon_vma_ctor().  Such a ctor permits readers
92  * to safely acquire those ctor-initialized locks under rcu_read_lock()
93  * protection.
94  *
95  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
96  */
97 /* Defer freeing slabs to RCU */
98 #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
99 /* Spread some memory over cpuset */
100 #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
101 /* Trace allocations and frees */
102 #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
103 
104 /* Flag to prevent checks on free */
105 #ifdef CONFIG_DEBUG_OBJECTS
106 # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
107 #else
108 # define SLAB_DEBUG_OBJECTS	0
109 #endif
110 
111 /* Avoid kmemleak tracing */
112 #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
113 
114 /*
115  * Prevent merging with compatible kmem caches. This flag should be used
116  * cautiously. Valid use cases:
117  *
118  * - caches created for self-tests (e.g. kunit)
119  * - general caches created and used by a subsystem, only when a
120  *   (subsystem-specific) debug option is enabled
121  * - performance critical caches, should be very rare and consulted with slab
122  *   maintainers, and not used together with CONFIG_SLUB_TINY
123  */
124 #define SLAB_NO_MERGE		((slab_flags_t __force)0x01000000U)
125 
126 /* Fault injection mark */
127 #ifdef CONFIG_FAILSLAB
128 # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
129 #else
130 # define SLAB_FAILSLAB		0
131 #endif
132 /* Account to memcg */
133 #ifdef CONFIG_MEMCG_KMEM
134 # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
135 #else
136 # define SLAB_ACCOUNT		0
137 #endif
138 
139 #ifdef CONFIG_KASAN_GENERIC
140 #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
141 #else
142 #define SLAB_KASAN		0
143 #endif
144 
145 /*
146  * Ignore user specified debugging flags.
147  * Intended for caches created for self-tests so they have only flags
148  * specified in the code and other flags are ignored.
149  */
150 #define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
151 
152 #ifdef CONFIG_KFENCE
153 #define SLAB_SKIP_KFENCE	((slab_flags_t __force)0x20000000U)
154 #else
155 #define SLAB_SKIP_KFENCE	0
156 #endif
157 
158 /* The following flags affect the page allocator grouping pages by mobility */
159 /* Objects are reclaimable */
160 #ifndef CONFIG_SLUB_TINY
161 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
162 #else
163 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0)
164 #endif
165 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
166 
167 /*
168  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
169  *
170  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
171  *
172  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
173  * Both make kfree a no-op.
174  */
175 #define ZERO_SIZE_PTR ((void *)16)
176 
177 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
178 				(unsigned long)ZERO_SIZE_PTR)
179 
180 #include <linux/kasan.h>
181 
182 struct list_lru;
183 struct mem_cgroup;
184 /*
185  * struct kmem_cache related prototypes
186  */
187 bool slab_is_available(void);
188 
189 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
190 			unsigned int align, slab_flags_t flags,
191 			void (*ctor)(void *));
192 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
193 			unsigned int size, unsigned int align,
194 			slab_flags_t flags,
195 			unsigned int useroffset, unsigned int usersize,
196 			void (*ctor)(void *));
197 void kmem_cache_destroy(struct kmem_cache *s);
198 int kmem_cache_shrink(struct kmem_cache *s);
199 
200 /*
201  * Please use this macro to create slab caches. Simply specify the
202  * name of the structure and maybe some flags that are listed above.
203  *
204  * The alignment of the struct determines object alignment. If you
205  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
206  * then the objects will be properly aligned in SMP configurations.
207  */
208 #define KMEM_CACHE(__struct, __flags)					\
209 		kmem_cache_create(#__struct, sizeof(struct __struct),	\
210 			__alignof__(struct __struct), (__flags), NULL)
211 
212 /*
213  * To whitelist a single field for copying to/from usercopy, use this
214  * macro instead for KMEM_CACHE() above.
215  */
216 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
217 		kmem_cache_create_usercopy(#__struct,			\
218 			sizeof(struct __struct),			\
219 			__alignof__(struct __struct), (__flags),	\
220 			offsetof(struct __struct, __field),		\
221 			sizeof_field(struct __struct, __field), NULL)
222 
223 /*
224  * Common kmalloc functions provided by all allocators
225  */
226 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
227 void kfree(const void *objp);
228 void kfree_sensitive(const void *objp);
229 size_t __ksize(const void *objp);
230 
231 DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
232 
233 /**
234  * ksize - Report actual allocation size of associated object
235  *
236  * @objp: Pointer returned from a prior kmalloc()-family allocation.
237  *
238  * This should not be used for writing beyond the originally requested
239  * allocation size. Either use krealloc() or round up the allocation size
240  * with kmalloc_size_roundup() prior to allocation. If this is used to
241  * access beyond the originally requested allocation size, UBSAN_BOUNDS
242  * and/or FORTIFY_SOURCE may trip, since they only know about the
243  * originally allocated size via the __alloc_size attribute.
244  */
245 size_t ksize(const void *objp);
246 
247 #ifdef CONFIG_PRINTK
248 bool kmem_dump_obj(void *object);
249 #else
kmem_dump_obj(void * object)250 static inline bool kmem_dump_obj(void *object) { return false; }
251 #endif
252 
253 /*
254  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
255  * alignment larger than the alignment of a 64-bit integer.
256  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
257  */
258 #ifdef ARCH_HAS_DMA_MINALIGN
259 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
260 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
261 #endif
262 #endif
263 
264 #ifndef ARCH_KMALLOC_MINALIGN
265 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
266 #elif ARCH_KMALLOC_MINALIGN > 8
267 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
268 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
269 #endif
270 
271 /*
272  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
273  * Intended for arches that get misalignment faults even for 64 bit integer
274  * aligned buffers.
275  */
276 #ifndef ARCH_SLAB_MINALIGN
277 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
278 #endif
279 
280 /*
281  * Arches can define this function if they want to decide the minimum slab
282  * alignment at runtime. The value returned by the function must be a power
283  * of two and >= ARCH_SLAB_MINALIGN.
284  */
285 #ifndef arch_slab_minalign
arch_slab_minalign(void)286 static inline unsigned int arch_slab_minalign(void)
287 {
288 	return ARCH_SLAB_MINALIGN;
289 }
290 #endif
291 
292 /*
293  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
294  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
295  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
296  */
297 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
298 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
299 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
300 
301 /*
302  * Kmalloc array related definitions
303  */
304 
305 /*
306  * SLUB directly allocates requests fitting in to an order-1 page
307  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
308  */
309 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
310 #define KMALLOC_SHIFT_MAX	(MAX_PAGE_ORDER + PAGE_SHIFT)
311 #ifndef KMALLOC_SHIFT_LOW
312 #define KMALLOC_SHIFT_LOW	3
313 #endif
314 
315 /* Maximum allocatable size */
316 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
317 /* Maximum size for which we actually use a slab cache */
318 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
319 /* Maximum order allocatable via the slab allocator */
320 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
321 
322 /*
323  * Kmalloc subsystem.
324  */
325 #ifndef KMALLOC_MIN_SIZE
326 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
327 #endif
328 
329 /*
330  * This restriction comes from byte sized index implementation.
331  * Page size is normally 2^12 bytes and, in this case, if we want to use
332  * byte sized index which can represent 2^8 entries, the size of the object
333  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
334  * If minimum size of kmalloc is less than 16, we use it as minimum object
335  * size and give up to use byte sized index.
336  */
337 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
338                                (KMALLOC_MIN_SIZE) : 16)
339 
340 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
341 #define RANDOM_KMALLOC_CACHES_NR	15 // # of cache copies
342 #else
343 #define RANDOM_KMALLOC_CACHES_NR	0
344 #endif
345 
346 /*
347  * Whenever changing this, take care of that kmalloc_type() and
348  * create_kmalloc_caches() still work as intended.
349  *
350  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
351  * is for accounted but unreclaimable and non-dma objects. All the other
352  * kmem caches can have both accounted and unaccounted objects.
353  */
354 enum kmalloc_cache_type {
355 	KMALLOC_NORMAL = 0,
356 #ifndef CONFIG_ZONE_DMA
357 	KMALLOC_DMA = KMALLOC_NORMAL,
358 #endif
359 #ifndef CONFIG_MEMCG_KMEM
360 	KMALLOC_CGROUP = KMALLOC_NORMAL,
361 #endif
362 	KMALLOC_RANDOM_START = KMALLOC_NORMAL,
363 	KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
364 #ifdef CONFIG_SLUB_TINY
365 	KMALLOC_RECLAIM = KMALLOC_NORMAL,
366 #else
367 	KMALLOC_RECLAIM,
368 #endif
369 #ifdef CONFIG_ZONE_DMA
370 	KMALLOC_DMA,
371 #endif
372 #ifdef CONFIG_MEMCG_KMEM
373 	KMALLOC_CGROUP,
374 #endif
375 	NR_KMALLOC_TYPES
376 };
377 
378 extern struct kmem_cache *
379 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
380 
381 /*
382  * Define gfp bits that should not be set for KMALLOC_NORMAL.
383  */
384 #define KMALLOC_NOT_NORMAL_BITS					\
385 	(__GFP_RECLAIMABLE |					\
386 	(IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |	\
387 	(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
388 
389 extern unsigned long random_kmalloc_seed;
390 
kmalloc_type(gfp_t flags,unsigned long caller)391 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
392 {
393 	/*
394 	 * The most common case is KMALLOC_NORMAL, so test for it
395 	 * with a single branch for all the relevant flags.
396 	 */
397 	if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
398 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
399 		/* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
400 		return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
401 						      ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
402 #else
403 		return KMALLOC_NORMAL;
404 #endif
405 
406 	/*
407 	 * At least one of the flags has to be set. Their priorities in
408 	 * decreasing order are:
409 	 *  1) __GFP_DMA
410 	 *  2) __GFP_RECLAIMABLE
411 	 *  3) __GFP_ACCOUNT
412 	 */
413 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
414 		return KMALLOC_DMA;
415 	if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
416 		return KMALLOC_RECLAIM;
417 	else
418 		return KMALLOC_CGROUP;
419 }
420 
421 /*
422  * Figure out which kmalloc slab an allocation of a certain size
423  * belongs to.
424  * 0 = zero alloc
425  * 1 =  65 .. 96 bytes
426  * 2 = 129 .. 192 bytes
427  * n = 2^(n-1)+1 .. 2^n
428  *
429  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
430  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
431  * Callers where !size_is_constant should only be test modules, where runtime
432  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
433  */
__kmalloc_index(size_t size,bool size_is_constant)434 static __always_inline unsigned int __kmalloc_index(size_t size,
435 						    bool size_is_constant)
436 {
437 	if (!size)
438 		return 0;
439 
440 	if (size <= KMALLOC_MIN_SIZE)
441 		return KMALLOC_SHIFT_LOW;
442 
443 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
444 		return 1;
445 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
446 		return 2;
447 	if (size <=          8) return 3;
448 	if (size <=         16) return 4;
449 	if (size <=         32) return 5;
450 	if (size <=         64) return 6;
451 	if (size <=        128) return 7;
452 	if (size <=        256) return 8;
453 	if (size <=        512) return 9;
454 	if (size <=       1024) return 10;
455 	if (size <=   2 * 1024) return 11;
456 	if (size <=   4 * 1024) return 12;
457 	if (size <=   8 * 1024) return 13;
458 	if (size <=  16 * 1024) return 14;
459 	if (size <=  32 * 1024) return 15;
460 	if (size <=  64 * 1024) return 16;
461 	if (size <= 128 * 1024) return 17;
462 	if (size <= 256 * 1024) return 18;
463 	if (size <= 512 * 1024) return 19;
464 	if (size <= 1024 * 1024) return 20;
465 	if (size <=  2 * 1024 * 1024) return 21;
466 
467 	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
468 		BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
469 	else
470 		BUG();
471 
472 	/* Will never be reached. Needed because the compiler may complain */
473 	return -1;
474 }
475 static_assert(PAGE_SHIFT <= 20);
476 #define kmalloc_index(s) __kmalloc_index(s, true)
477 
478 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
479 
480 /**
481  * kmem_cache_alloc - Allocate an object
482  * @cachep: The cache to allocate from.
483  * @flags: See kmalloc().
484  *
485  * Allocate an object from this cache.
486  * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
487  *
488  * Return: pointer to the new object or %NULL in case of error
489  */
490 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
491 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
492 			   gfp_t gfpflags) __assume_slab_alignment __malloc;
493 void kmem_cache_free(struct kmem_cache *s, void *objp);
494 
495 /*
496  * Bulk allocation and freeing operations. These are accelerated in an
497  * allocator specific way to avoid taking locks repeatedly or building
498  * metadata structures unnecessarily.
499  *
500  * Note that interrupts must be enabled when calling these functions.
501  */
502 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
503 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
504 
kfree_bulk(size_t size,void ** p)505 static __always_inline void kfree_bulk(size_t size, void **p)
506 {
507 	kmem_cache_free_bulk(NULL, size, p);
508 }
509 
510 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
511 							 __alloc_size(1);
512 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
513 									 __malloc;
514 
515 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
516 		    __assume_kmalloc_alignment __alloc_size(3);
517 
518 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
519 			 int node, size_t size) __assume_kmalloc_alignment
520 						__alloc_size(4);
521 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
522 					      __alloc_size(1);
523 
524 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
525 							     __alloc_size(1);
526 
527 /**
528  * kmalloc - allocate kernel memory
529  * @size: how many bytes of memory are required.
530  * @flags: describe the allocation context
531  *
532  * kmalloc is the normal method of allocating memory
533  * for objects smaller than page size in the kernel.
534  *
535  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
536  * bytes. For @size of power of two bytes, the alignment is also guaranteed
537  * to be at least to the size.
538  *
539  * The @flags argument may be one of the GFP flags defined at
540  * include/linux/gfp_types.h and described at
541  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
542  *
543  * The recommended usage of the @flags is described at
544  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
545  *
546  * Below is a brief outline of the most useful GFP flags
547  *
548  * %GFP_KERNEL
549  *	Allocate normal kernel ram. May sleep.
550  *
551  * %GFP_NOWAIT
552  *	Allocation will not sleep.
553  *
554  * %GFP_ATOMIC
555  *	Allocation will not sleep.  May use emergency pools.
556  *
557  * Also it is possible to set different flags by OR'ing
558  * in one or more of the following additional @flags:
559  *
560  * %__GFP_ZERO
561  *	Zero the allocated memory before returning. Also see kzalloc().
562  *
563  * %__GFP_HIGH
564  *	This allocation has high priority and may use emergency pools.
565  *
566  * %__GFP_NOFAIL
567  *	Indicate that this allocation is in no way allowed to fail
568  *	(think twice before using).
569  *
570  * %__GFP_NORETRY
571  *	If memory is not immediately available,
572  *	then give up at once.
573  *
574  * %__GFP_NOWARN
575  *	If allocation fails, don't issue any warnings.
576  *
577  * %__GFP_RETRY_MAYFAIL
578  *	Try really hard to succeed the allocation but fail
579  *	eventually.
580  */
kmalloc(size_t size,gfp_t flags)581 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
582 {
583 	if (__builtin_constant_p(size) && size) {
584 		unsigned int index;
585 
586 		if (size > KMALLOC_MAX_CACHE_SIZE)
587 			return kmalloc_large(size, flags);
588 
589 		index = kmalloc_index(size);
590 		return kmalloc_trace(
591 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
592 				flags, size);
593 	}
594 	return __kmalloc(size, flags);
595 }
596 
kmalloc_node(size_t size,gfp_t flags,int node)597 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
598 {
599 	if (__builtin_constant_p(size) && size) {
600 		unsigned int index;
601 
602 		if (size > KMALLOC_MAX_CACHE_SIZE)
603 			return kmalloc_large_node(size, flags, node);
604 
605 		index = kmalloc_index(size);
606 		return kmalloc_node_trace(
607 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
608 				flags, node, size);
609 	}
610 	return __kmalloc_node(size, flags, node);
611 }
612 
613 /**
614  * kmalloc_array - allocate memory for an array.
615  * @n: number of elements.
616  * @size: element size.
617  * @flags: the type of memory to allocate (see kmalloc).
618  */
kmalloc_array(size_t n,size_t size,gfp_t flags)619 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
620 {
621 	size_t bytes;
622 
623 	if (unlikely(check_mul_overflow(n, size, &bytes)))
624 		return NULL;
625 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
626 		return kmalloc(bytes, flags);
627 	return __kmalloc(bytes, flags);
628 }
629 
630 /**
631  * krealloc_array - reallocate memory for an array.
632  * @p: pointer to the memory chunk to reallocate
633  * @new_n: new number of elements to alloc
634  * @new_size: new size of a single member of the array
635  * @flags: the type of memory to allocate (see kmalloc)
636  */
krealloc_array(void * p,size_t new_n,size_t new_size,gfp_t flags)637 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
638 								      size_t new_n,
639 								      size_t new_size,
640 								      gfp_t flags)
641 {
642 	size_t bytes;
643 
644 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
645 		return NULL;
646 
647 	return krealloc(p, bytes, flags);
648 }
649 
650 /**
651  * kcalloc - allocate memory for an array. The memory is set to zero.
652  * @n: number of elements.
653  * @size: element size.
654  * @flags: the type of memory to allocate (see kmalloc).
655  */
kcalloc(size_t n,size_t size,gfp_t flags)656 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
657 {
658 	return kmalloc_array(n, size, flags | __GFP_ZERO);
659 }
660 
661 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
662 				  unsigned long caller) __alloc_size(1);
663 #define kmalloc_node_track_caller(size, flags, node) \
664 	__kmalloc_node_track_caller(size, flags, node, \
665 				    _RET_IP_)
666 
667 /*
668  * kmalloc_track_caller is a special version of kmalloc that records the
669  * calling function of the routine calling it for slab leak tracking instead
670  * of just the calling function (confusing, eh?).
671  * It's useful when the call to kmalloc comes from a widely-used standard
672  * allocator where we care about the real place the memory allocation
673  * request comes from.
674  */
675 #define kmalloc_track_caller(size, flags) \
676 	__kmalloc_node_track_caller(size, flags, \
677 				    NUMA_NO_NODE, _RET_IP_)
678 
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)679 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
680 							  int node)
681 {
682 	size_t bytes;
683 
684 	if (unlikely(check_mul_overflow(n, size, &bytes)))
685 		return NULL;
686 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
687 		return kmalloc_node(bytes, flags, node);
688 	return __kmalloc_node(bytes, flags, node);
689 }
690 
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)691 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
692 {
693 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
694 }
695 
696 /*
697  * Shortcuts
698  */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)699 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
700 {
701 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
702 }
703 
704 /**
705  * kzalloc - allocate memory. The memory is set to zero.
706  * @size: how many bytes of memory are required.
707  * @flags: the type of memory to allocate (see kmalloc).
708  */
kzalloc(size_t size,gfp_t flags)709 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
710 {
711 	return kmalloc(size, flags | __GFP_ZERO);
712 }
713 
714 /**
715  * kzalloc_node - allocate zeroed memory from a particular memory node.
716  * @size: how many bytes of memory are required.
717  * @flags: the type of memory to allocate (see kmalloc).
718  * @node: memory node from which to allocate
719  */
kzalloc_node(size_t size,gfp_t flags,int node)720 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
721 {
722 	return kmalloc_node(size, flags | __GFP_ZERO, node);
723 }
724 
725 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
kvmalloc(size_t size,gfp_t flags)726 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
727 {
728 	return kvmalloc_node(size, flags, NUMA_NO_NODE);
729 }
kvzalloc_node(size_t size,gfp_t flags,int node)730 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
731 {
732 	return kvmalloc_node(size, flags | __GFP_ZERO, node);
733 }
kvzalloc(size_t size,gfp_t flags)734 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
735 {
736 	return kvmalloc(size, flags | __GFP_ZERO);
737 }
738 
kvmalloc_array(size_t n,size_t size,gfp_t flags)739 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
740 {
741 	size_t bytes;
742 
743 	if (unlikely(check_mul_overflow(n, size, &bytes)))
744 		return NULL;
745 
746 	return kvmalloc(bytes, flags);
747 }
748 
kvcalloc(size_t n,size_t size,gfp_t flags)749 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
750 {
751 	return kvmalloc_array(n, size, flags | __GFP_ZERO);
752 }
753 
754 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
755 		      __realloc_size(3);
756 extern void kvfree(const void *addr);
757 DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
758 
759 extern void kvfree_sensitive(const void *addr, size_t len);
760 
761 unsigned int kmem_cache_size(struct kmem_cache *s);
762 
763 /**
764  * kmalloc_size_roundup - Report allocation bucket size for the given size
765  *
766  * @size: Number of bytes to round up from.
767  *
768  * This returns the number of bytes that would be available in a kmalloc()
769  * allocation of @size bytes. For example, a 126 byte request would be
770  * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
771  * for the general-purpose kmalloc()-based allocations, and is not for the
772  * pre-sized kmem_cache_alloc()-based allocations.)
773  *
774  * Use this to kmalloc() the full bucket size ahead of time instead of using
775  * ksize() to query the size after an allocation.
776  */
777 size_t kmalloc_size_roundup(size_t size);
778 
779 void __init kmem_cache_init_late(void);
780 
781 #endif	/* _LINUX_SLAB_H */
782