xref: /linux/mm/slab_common.c (revision 7ebdfaa52d15b947503f76474477f92854796d96)
1039363f3SChristoph Lameter /*
2039363f3SChristoph Lameter  * Slab allocator functions that are independent of the allocator strategy
3039363f3SChristoph Lameter  *
4039363f3SChristoph Lameter  * (C) 2012 Christoph Lameter <cl@linux.com>
5039363f3SChristoph Lameter  */
6039363f3SChristoph Lameter #include <linux/slab.h>
7039363f3SChristoph Lameter 
8039363f3SChristoph Lameter #include <linux/mm.h>
9039363f3SChristoph Lameter #include <linux/poison.h>
10039363f3SChristoph Lameter #include <linux/interrupt.h>
11039363f3SChristoph Lameter #include <linux/memory.h>
12039363f3SChristoph Lameter #include <linux/compiler.h>
13039363f3SChristoph Lameter #include <linux/module.h>
1420cea968SChristoph Lameter #include <linux/cpu.h>
1520cea968SChristoph Lameter #include <linux/uaccess.h>
16b7454ad3SGlauber Costa #include <linux/seq_file.h>
17b7454ad3SGlauber Costa #include <linux/proc_fs.h>
18039363f3SChristoph Lameter #include <asm/cacheflush.h>
19039363f3SChristoph Lameter #include <asm/tlbflush.h>
20039363f3SChristoph Lameter #include <asm/page.h>
212633d7a0SGlauber Costa #include <linux/memcontrol.h>
22928cec9cSAndrey Ryabinin 
23928cec9cSAndrey Ryabinin #define CREATE_TRACE_POINTS
24f1b6eb6eSChristoph Lameter #include <trace/events/kmem.h>
25039363f3SChristoph Lameter 
2697d06609SChristoph Lameter #include "slab.h"
2797d06609SChristoph Lameter 
2897d06609SChristoph Lameter enum slab_state slab_state;
2918004c5dSChristoph Lameter LIST_HEAD(slab_caches);
3018004c5dSChristoph Lameter DEFINE_MUTEX(slab_mutex);
319b030cb8SChristoph Lameter struct kmem_cache *kmem_cache;
3297d06609SChristoph Lameter 
3307f361b2SJoonsoo Kim /*
34423c929cSJoonsoo Kim  * Set of flags that will prevent slab merging
35423c929cSJoonsoo Kim  */
36423c929cSJoonsoo Kim #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37423c929cSJoonsoo Kim 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38423c929cSJoonsoo Kim 		SLAB_FAILSLAB)
39423c929cSJoonsoo Kim 
40423c929cSJoonsoo Kim #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
41423c929cSJoonsoo Kim 		SLAB_CACHE_DMA | SLAB_NOTRACK)
42423c929cSJoonsoo Kim 
43423c929cSJoonsoo Kim /*
44423c929cSJoonsoo Kim  * Merge control. If this is set then no merging of slab caches will occur.
45423c929cSJoonsoo Kim  * (Could be removed. This was introduced to pacify the merge skeptics.)
46423c929cSJoonsoo Kim  */
47423c929cSJoonsoo Kim static int slab_nomerge;
48423c929cSJoonsoo Kim 
49423c929cSJoonsoo Kim static int __init setup_slab_nomerge(char *str)
50423c929cSJoonsoo Kim {
51423c929cSJoonsoo Kim 	slab_nomerge = 1;
52423c929cSJoonsoo Kim 	return 1;
53423c929cSJoonsoo Kim }
54423c929cSJoonsoo Kim 
55423c929cSJoonsoo Kim #ifdef CONFIG_SLUB
56423c929cSJoonsoo Kim __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57423c929cSJoonsoo Kim #endif
58423c929cSJoonsoo Kim 
59423c929cSJoonsoo Kim __setup("slab_nomerge", setup_slab_nomerge);
60423c929cSJoonsoo Kim 
61423c929cSJoonsoo Kim /*
6207f361b2SJoonsoo Kim  * Determine the size of a slab object
6307f361b2SJoonsoo Kim  */
6407f361b2SJoonsoo Kim unsigned int kmem_cache_size(struct kmem_cache *s)
6507f361b2SJoonsoo Kim {
6607f361b2SJoonsoo Kim 	return s->object_size;
6707f361b2SJoonsoo Kim }
6807f361b2SJoonsoo Kim EXPORT_SYMBOL(kmem_cache_size);
6907f361b2SJoonsoo Kim 
7077be4b13SShuah Khan #ifdef CONFIG_DEBUG_VM
71794b1248SVladimir Davydov static int kmem_cache_sanity_check(const char *name, size_t size)
7277be4b13SShuah Khan {
7377be4b13SShuah Khan 	struct kmem_cache *s = NULL;
7477be4b13SShuah Khan 
7577be4b13SShuah Khan 	if (!name || in_interrupt() || size < sizeof(void *) ||
7677be4b13SShuah Khan 		size > KMALLOC_MAX_SIZE) {
7777be4b13SShuah Khan 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
7877be4b13SShuah Khan 		return -EINVAL;
7977be4b13SShuah Khan 	}
8077be4b13SShuah Khan 
8177be4b13SShuah Khan 	list_for_each_entry(s, &slab_caches, list) {
8277be4b13SShuah Khan 		char tmp;
8377be4b13SShuah Khan 		int res;
8477be4b13SShuah Khan 
8577be4b13SShuah Khan 		/*
8677be4b13SShuah Khan 		 * This happens when the module gets unloaded and doesn't
8777be4b13SShuah Khan 		 * destroy its slab cache and no-one else reuses the vmalloc
8877be4b13SShuah Khan 		 * area of the module.  Print a warning.
8977be4b13SShuah Khan 		 */
9077be4b13SShuah Khan 		res = probe_kernel_address(s->name, tmp);
9177be4b13SShuah Khan 		if (res) {
9277be4b13SShuah Khan 			pr_err("Slab cache with size %d has lost its name\n",
9377be4b13SShuah Khan 			       s->object_size);
9477be4b13SShuah Khan 			continue;
9577be4b13SShuah Khan 		}
9677be4b13SShuah Khan 	}
9777be4b13SShuah Khan 
9877be4b13SShuah Khan 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
9977be4b13SShuah Khan 	return 0;
10077be4b13SShuah Khan }
10177be4b13SShuah Khan #else
102794b1248SVladimir Davydov static inline int kmem_cache_sanity_check(const char *name, size_t size)
10377be4b13SShuah Khan {
10477be4b13SShuah Khan 	return 0;
10577be4b13SShuah Khan }
10677be4b13SShuah Khan #endif
10777be4b13SShuah Khan 
10855007d84SGlauber Costa #ifdef CONFIG_MEMCG_KMEM
10933a690c4SVladimir Davydov static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
11033a690c4SVladimir Davydov 		struct kmem_cache *s, struct kmem_cache *root_cache)
11133a690c4SVladimir Davydov {
11233a690c4SVladimir Davydov 	size_t size;
11333a690c4SVladimir Davydov 
11433a690c4SVladimir Davydov 	if (!memcg_kmem_enabled())
11533a690c4SVladimir Davydov 		return 0;
11633a690c4SVladimir Davydov 
11733a690c4SVladimir Davydov 	if (!memcg) {
11833a690c4SVladimir Davydov 		size = offsetof(struct memcg_cache_params, memcg_caches);
11933a690c4SVladimir Davydov 		size += memcg_limited_groups_array_size * sizeof(void *);
12033a690c4SVladimir Davydov 	} else
12133a690c4SVladimir Davydov 		size = sizeof(struct memcg_cache_params);
12233a690c4SVladimir Davydov 
12333a690c4SVladimir Davydov 	s->memcg_params = kzalloc(size, GFP_KERNEL);
12433a690c4SVladimir Davydov 	if (!s->memcg_params)
12533a690c4SVladimir Davydov 		return -ENOMEM;
12633a690c4SVladimir Davydov 
12733a690c4SVladimir Davydov 	if (memcg) {
12833a690c4SVladimir Davydov 		s->memcg_params->memcg = memcg;
12933a690c4SVladimir Davydov 		s->memcg_params->root_cache = root_cache;
13033a690c4SVladimir Davydov 	} else
13133a690c4SVladimir Davydov 		s->memcg_params->is_root_cache = true;
13233a690c4SVladimir Davydov 
13333a690c4SVladimir Davydov 	return 0;
13433a690c4SVladimir Davydov }
13533a690c4SVladimir Davydov 
13633a690c4SVladimir Davydov static void memcg_free_cache_params(struct kmem_cache *s)
13733a690c4SVladimir Davydov {
13833a690c4SVladimir Davydov 	kfree(s->memcg_params);
13933a690c4SVladimir Davydov }
14033a690c4SVladimir Davydov 
1416f817f4cSVladimir Davydov static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs)
1426f817f4cSVladimir Davydov {
1436f817f4cSVladimir Davydov 	int size;
1446f817f4cSVladimir Davydov 	struct memcg_cache_params *new_params, *cur_params;
1456f817f4cSVladimir Davydov 
1466f817f4cSVladimir Davydov 	BUG_ON(!is_root_cache(s));
1476f817f4cSVladimir Davydov 
1486f817f4cSVladimir Davydov 	size = offsetof(struct memcg_cache_params, memcg_caches);
1496f817f4cSVladimir Davydov 	size += num_memcgs * sizeof(void *);
1506f817f4cSVladimir Davydov 
1516f817f4cSVladimir Davydov 	new_params = kzalloc(size, GFP_KERNEL);
1526f817f4cSVladimir Davydov 	if (!new_params)
1536f817f4cSVladimir Davydov 		return -ENOMEM;
1546f817f4cSVladimir Davydov 
1556f817f4cSVladimir Davydov 	cur_params = s->memcg_params;
1566f817f4cSVladimir Davydov 	memcpy(new_params->memcg_caches, cur_params->memcg_caches,
1576f817f4cSVladimir Davydov 	       memcg_limited_groups_array_size * sizeof(void *));
1586f817f4cSVladimir Davydov 
1596f817f4cSVladimir Davydov 	new_params->is_root_cache = true;
1606f817f4cSVladimir Davydov 
1616f817f4cSVladimir Davydov 	rcu_assign_pointer(s->memcg_params, new_params);
1626f817f4cSVladimir Davydov 	if (cur_params)
1636f817f4cSVladimir Davydov 		kfree_rcu(cur_params, rcu_head);
1646f817f4cSVladimir Davydov 
1656f817f4cSVladimir Davydov 	return 0;
1666f817f4cSVladimir Davydov }
1676f817f4cSVladimir Davydov 
16855007d84SGlauber Costa int memcg_update_all_caches(int num_memcgs)
16955007d84SGlauber Costa {
17055007d84SGlauber Costa 	struct kmem_cache *s;
17155007d84SGlauber Costa 	int ret = 0;
17255007d84SGlauber Costa 	mutex_lock(&slab_mutex);
17355007d84SGlauber Costa 
17455007d84SGlauber Costa 	list_for_each_entry(s, &slab_caches, list) {
17555007d84SGlauber Costa 		if (!is_root_cache(s))
17655007d84SGlauber Costa 			continue;
17755007d84SGlauber Costa 
1786f817f4cSVladimir Davydov 		ret = memcg_update_cache_params(s, num_memcgs);
17955007d84SGlauber Costa 		/*
18055007d84SGlauber Costa 		 * Instead of freeing the memory, we'll just leave the caches
18155007d84SGlauber Costa 		 * up to this point in an updated state.
18255007d84SGlauber Costa 		 */
18355007d84SGlauber Costa 		if (ret)
18455007d84SGlauber Costa 			goto out;
18555007d84SGlauber Costa 	}
18655007d84SGlauber Costa 
18755007d84SGlauber Costa 	memcg_update_array_size(num_memcgs);
18855007d84SGlauber Costa out:
18955007d84SGlauber Costa 	mutex_unlock(&slab_mutex);
19055007d84SGlauber Costa 	return ret;
19155007d84SGlauber Costa }
19233a690c4SVladimir Davydov #else
19333a690c4SVladimir Davydov static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
19433a690c4SVladimir Davydov 		struct kmem_cache *s, struct kmem_cache *root_cache)
19533a690c4SVladimir Davydov {
19633a690c4SVladimir Davydov 	return 0;
19733a690c4SVladimir Davydov }
19833a690c4SVladimir Davydov 
19933a690c4SVladimir Davydov static inline void memcg_free_cache_params(struct kmem_cache *s)
20033a690c4SVladimir Davydov {
20133a690c4SVladimir Davydov }
20233a690c4SVladimir Davydov #endif /* CONFIG_MEMCG_KMEM */
20355007d84SGlauber Costa 
204039363f3SChristoph Lameter /*
205423c929cSJoonsoo Kim  * Find a mergeable slab cache
206423c929cSJoonsoo Kim  */
207423c929cSJoonsoo Kim int slab_unmergeable(struct kmem_cache *s)
208423c929cSJoonsoo Kim {
209423c929cSJoonsoo Kim 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
210423c929cSJoonsoo Kim 		return 1;
211423c929cSJoonsoo Kim 
212423c929cSJoonsoo Kim 	if (!is_root_cache(s))
213423c929cSJoonsoo Kim 		return 1;
214423c929cSJoonsoo Kim 
215423c929cSJoonsoo Kim 	if (s->ctor)
216423c929cSJoonsoo Kim 		return 1;
217423c929cSJoonsoo Kim 
218423c929cSJoonsoo Kim 	/*
219423c929cSJoonsoo Kim 	 * We may have set a slab to be unmergeable during bootstrap.
220423c929cSJoonsoo Kim 	 */
221423c929cSJoonsoo Kim 	if (s->refcount < 0)
222423c929cSJoonsoo Kim 		return 1;
223423c929cSJoonsoo Kim 
224423c929cSJoonsoo Kim 	return 0;
225423c929cSJoonsoo Kim }
226423c929cSJoonsoo Kim 
227423c929cSJoonsoo Kim struct kmem_cache *find_mergeable(size_t size, size_t align,
228423c929cSJoonsoo Kim 		unsigned long flags, const char *name, void (*ctor)(void *))
229423c929cSJoonsoo Kim {
230423c929cSJoonsoo Kim 	struct kmem_cache *s;
231423c929cSJoonsoo Kim 
232423c929cSJoonsoo Kim 	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
233423c929cSJoonsoo Kim 		return NULL;
234423c929cSJoonsoo Kim 
235423c929cSJoonsoo Kim 	if (ctor)
236423c929cSJoonsoo Kim 		return NULL;
237423c929cSJoonsoo Kim 
238423c929cSJoonsoo Kim 	size = ALIGN(size, sizeof(void *));
239423c929cSJoonsoo Kim 	align = calculate_alignment(flags, align, size);
240423c929cSJoonsoo Kim 	size = ALIGN(size, align);
241423c929cSJoonsoo Kim 	flags = kmem_cache_flags(size, flags, name, NULL);
242423c929cSJoonsoo Kim 
24354362057SJoonsoo Kim 	list_for_each_entry_reverse(s, &slab_caches, list) {
244423c929cSJoonsoo Kim 		if (slab_unmergeable(s))
245423c929cSJoonsoo Kim 			continue;
246423c929cSJoonsoo Kim 
247423c929cSJoonsoo Kim 		if (size > s->size)
248423c929cSJoonsoo Kim 			continue;
249423c929cSJoonsoo Kim 
250423c929cSJoonsoo Kim 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
251423c929cSJoonsoo Kim 			continue;
252423c929cSJoonsoo Kim 		/*
253423c929cSJoonsoo Kim 		 * Check if alignment is compatible.
254423c929cSJoonsoo Kim 		 * Courtesy of Adrian Drzewiecki
255423c929cSJoonsoo Kim 		 */
256423c929cSJoonsoo Kim 		if ((s->size & ~(align - 1)) != s->size)
257423c929cSJoonsoo Kim 			continue;
258423c929cSJoonsoo Kim 
259423c929cSJoonsoo Kim 		if (s->size - size >= sizeof(void *))
260423c929cSJoonsoo Kim 			continue;
261423c929cSJoonsoo Kim 
26295069ac8SJoonsoo Kim 		if (IS_ENABLED(CONFIG_SLAB) && align &&
26395069ac8SJoonsoo Kim 			(align > s->align || s->align % align))
26495069ac8SJoonsoo Kim 			continue;
26595069ac8SJoonsoo Kim 
266423c929cSJoonsoo Kim 		return s;
267423c929cSJoonsoo Kim 	}
268423c929cSJoonsoo Kim 	return NULL;
269423c929cSJoonsoo Kim }
270423c929cSJoonsoo Kim 
271423c929cSJoonsoo Kim /*
27245906855SChristoph Lameter  * Figure out what the alignment of the objects will be given a set of
27345906855SChristoph Lameter  * flags, a user specified alignment and the size of the objects.
27445906855SChristoph Lameter  */
27545906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags,
27645906855SChristoph Lameter 		unsigned long align, unsigned long size)
27745906855SChristoph Lameter {
27845906855SChristoph Lameter 	/*
27945906855SChristoph Lameter 	 * If the user wants hardware cache aligned objects then follow that
28045906855SChristoph Lameter 	 * suggestion if the object is sufficiently large.
28145906855SChristoph Lameter 	 *
28245906855SChristoph Lameter 	 * The hardware cache alignment cannot override the specified
28345906855SChristoph Lameter 	 * alignment though. If that is greater then use it.
28445906855SChristoph Lameter 	 */
28545906855SChristoph Lameter 	if (flags & SLAB_HWCACHE_ALIGN) {
28645906855SChristoph Lameter 		unsigned long ralign = cache_line_size();
28745906855SChristoph Lameter 		while (size <= ralign / 2)
28845906855SChristoph Lameter 			ralign /= 2;
28945906855SChristoph Lameter 		align = max(align, ralign);
29045906855SChristoph Lameter 	}
29145906855SChristoph Lameter 
29245906855SChristoph Lameter 	if (align < ARCH_SLAB_MINALIGN)
29345906855SChristoph Lameter 		align = ARCH_SLAB_MINALIGN;
29445906855SChristoph Lameter 
29545906855SChristoph Lameter 	return ALIGN(align, sizeof(void *));
29645906855SChristoph Lameter }
29745906855SChristoph Lameter 
298794b1248SVladimir Davydov static struct kmem_cache *
299794b1248SVladimir Davydov do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
300794b1248SVladimir Davydov 		     unsigned long flags, void (*ctor)(void *),
301794b1248SVladimir Davydov 		     struct mem_cgroup *memcg, struct kmem_cache *root_cache)
302794b1248SVladimir Davydov {
303794b1248SVladimir Davydov 	struct kmem_cache *s;
304794b1248SVladimir Davydov 	int err;
305794b1248SVladimir Davydov 
306794b1248SVladimir Davydov 	err = -ENOMEM;
307794b1248SVladimir Davydov 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
308794b1248SVladimir Davydov 	if (!s)
309794b1248SVladimir Davydov 		goto out;
310794b1248SVladimir Davydov 
311794b1248SVladimir Davydov 	s->name = name;
312794b1248SVladimir Davydov 	s->object_size = object_size;
313794b1248SVladimir Davydov 	s->size = size;
314794b1248SVladimir Davydov 	s->align = align;
315794b1248SVladimir Davydov 	s->ctor = ctor;
316794b1248SVladimir Davydov 
317794b1248SVladimir Davydov 	err = memcg_alloc_cache_params(memcg, s, root_cache);
318794b1248SVladimir Davydov 	if (err)
319794b1248SVladimir Davydov 		goto out_free_cache;
320794b1248SVladimir Davydov 
321794b1248SVladimir Davydov 	err = __kmem_cache_create(s, flags);
322794b1248SVladimir Davydov 	if (err)
323794b1248SVladimir Davydov 		goto out_free_cache;
324794b1248SVladimir Davydov 
325794b1248SVladimir Davydov 	s->refcount = 1;
326794b1248SVladimir Davydov 	list_add(&s->list, &slab_caches);
327794b1248SVladimir Davydov out:
328794b1248SVladimir Davydov 	if (err)
329794b1248SVladimir Davydov 		return ERR_PTR(err);
330794b1248SVladimir Davydov 	return s;
331794b1248SVladimir Davydov 
332794b1248SVladimir Davydov out_free_cache:
333794b1248SVladimir Davydov 	memcg_free_cache_params(s);
334794b1248SVladimir Davydov 	kfree(s);
335794b1248SVladimir Davydov 	goto out;
336794b1248SVladimir Davydov }
33745906855SChristoph Lameter 
33845906855SChristoph Lameter /*
339039363f3SChristoph Lameter  * kmem_cache_create - Create a cache.
340039363f3SChristoph Lameter  * @name: A string which is used in /proc/slabinfo to identify this cache.
341039363f3SChristoph Lameter  * @size: The size of objects to be created in this cache.
342039363f3SChristoph Lameter  * @align: The required alignment for the objects.
343039363f3SChristoph Lameter  * @flags: SLAB flags
344039363f3SChristoph Lameter  * @ctor: A constructor for the objects.
345039363f3SChristoph Lameter  *
346039363f3SChristoph Lameter  * Returns a ptr to the cache on success, NULL on failure.
347039363f3SChristoph Lameter  * Cannot be called within a interrupt, but can be interrupted.
348039363f3SChristoph Lameter  * The @ctor is run when new pages are allocated by the cache.
349039363f3SChristoph Lameter  *
350039363f3SChristoph Lameter  * The flags are
351039363f3SChristoph Lameter  *
352039363f3SChristoph Lameter  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
353039363f3SChristoph Lameter  * to catch references to uninitialised memory.
354039363f3SChristoph Lameter  *
355039363f3SChristoph Lameter  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
356039363f3SChristoph Lameter  * for buffer overruns.
357039363f3SChristoph Lameter  *
358039363f3SChristoph Lameter  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
359039363f3SChristoph Lameter  * cacheline.  This can be beneficial if you're counting cycles as closely
360039363f3SChristoph Lameter  * as davem.
361039363f3SChristoph Lameter  */
3622633d7a0SGlauber Costa struct kmem_cache *
363794b1248SVladimir Davydov kmem_cache_create(const char *name, size_t size, size_t align,
364794b1248SVladimir Davydov 		  unsigned long flags, void (*ctor)(void *))
365039363f3SChristoph Lameter {
366794b1248SVladimir Davydov 	struct kmem_cache *s;
367794b1248SVladimir Davydov 	char *cache_name;
3683965fc36SVladimir Davydov 	int err;
369039363f3SChristoph Lameter 
370b920536aSPekka Enberg 	get_online_cpus();
37103afc0e2SVladimir Davydov 	get_online_mems();
37203afc0e2SVladimir Davydov 
373b920536aSPekka Enberg 	mutex_lock(&slab_mutex);
374686d550dSChristoph Lameter 
375794b1248SVladimir Davydov 	err = kmem_cache_sanity_check(name, size);
3763aa24f51SAndrew Morton 	if (err) {
3773aa24f51SAndrew Morton 		s = NULL;	/* suppress uninit var warning */
3783965fc36SVladimir Davydov 		goto out_unlock;
3793aa24f51SAndrew Morton 	}
380686d550dSChristoph Lameter 
381d8843922SGlauber Costa 	/*
382d8843922SGlauber Costa 	 * Some allocators will constraint the set of valid flags to a subset
383d8843922SGlauber Costa 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
384d8843922SGlauber Costa 	 * case, and we'll just provide them with a sanitized version of the
385d8843922SGlauber Costa 	 * passed flags.
386d8843922SGlauber Costa 	 */
387d8843922SGlauber Costa 	flags &= CACHE_CREATE_MASK;
388686d550dSChristoph Lameter 
389a44cb944SVladimir Davydov 	s = __kmem_cache_alias(name, size, align, flags, ctor);
390cbb79694SChristoph Lameter 	if (s)
3913965fc36SVladimir Davydov 		goto out_unlock;
392794b1248SVladimir Davydov 
393794b1248SVladimir Davydov 	cache_name = kstrdup(name, GFP_KERNEL);
394794b1248SVladimir Davydov 	if (!cache_name) {
395794b1248SVladimir Davydov 		err = -ENOMEM;
396794b1248SVladimir Davydov 		goto out_unlock;
397a44cb944SVladimir Davydov 	}
398cbb79694SChristoph Lameter 
399794b1248SVladimir Davydov 	s = do_kmem_cache_create(cache_name, size, size,
400794b1248SVladimir Davydov 				 calculate_alignment(flags, align, size),
401794b1248SVladimir Davydov 				 flags, ctor, NULL, NULL);
402794b1248SVladimir Davydov 	if (IS_ERR(s)) {
403794b1248SVladimir Davydov 		err = PTR_ERR(s);
404794b1248SVladimir Davydov 		kfree(cache_name);
405794b1248SVladimir Davydov 	}
406db265ecaSChristoph Lameter 
4073965fc36SVladimir Davydov out_unlock:
40820cea968SChristoph Lameter 	mutex_unlock(&slab_mutex);
40903afc0e2SVladimir Davydov 
41003afc0e2SVladimir Davydov 	put_online_mems();
41120cea968SChristoph Lameter 	put_online_cpus();
41220cea968SChristoph Lameter 
413ba3253c7SDave Jones 	if (err) {
414686d550dSChristoph Lameter 		if (flags & SLAB_PANIC)
415686d550dSChristoph Lameter 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
416686d550dSChristoph Lameter 				name, err);
417686d550dSChristoph Lameter 		else {
418686d550dSChristoph Lameter 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
419686d550dSChristoph Lameter 				name, err);
420686d550dSChristoph Lameter 			dump_stack();
421686d550dSChristoph Lameter 		}
422686d550dSChristoph Lameter 		return NULL;
423686d550dSChristoph Lameter 	}
424039363f3SChristoph Lameter 	return s;
425794b1248SVladimir Davydov }
426794b1248SVladimir Davydov EXPORT_SYMBOL(kmem_cache_create);
4273965fc36SVladimir Davydov 
428794b1248SVladimir Davydov #ifdef CONFIG_MEMCG_KMEM
429794b1248SVladimir Davydov /*
430776ed0f0SVladimir Davydov  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
431794b1248SVladimir Davydov  * @memcg: The memory cgroup the new cache is for.
432794b1248SVladimir Davydov  * @root_cache: The parent of the new cache.
433073ee1c6SVladimir Davydov  * @memcg_name: The name of the memory cgroup (used for naming the new cache).
434794b1248SVladimir Davydov  *
435794b1248SVladimir Davydov  * This function attempts to create a kmem cache that will serve allocation
436794b1248SVladimir Davydov  * requests going from @memcg to @root_cache. The new cache inherits properties
437794b1248SVladimir Davydov  * from its parent.
438794b1248SVladimir Davydov  */
439776ed0f0SVladimir Davydov struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
440073ee1c6SVladimir Davydov 					   struct kmem_cache *root_cache,
441073ee1c6SVladimir Davydov 					   const char *memcg_name)
442794b1248SVladimir Davydov {
443bd673145SVladimir Davydov 	struct kmem_cache *s = NULL;
444794b1248SVladimir Davydov 	char *cache_name;
445794b1248SVladimir Davydov 
446794b1248SVladimir Davydov 	get_online_cpus();
44703afc0e2SVladimir Davydov 	get_online_mems();
44803afc0e2SVladimir Davydov 
449794b1248SVladimir Davydov 	mutex_lock(&slab_mutex);
450794b1248SVladimir Davydov 
451073ee1c6SVladimir Davydov 	cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
452073ee1c6SVladimir Davydov 			       memcg_cache_id(memcg), memcg_name);
453794b1248SVladimir Davydov 	if (!cache_name)
454794b1248SVladimir Davydov 		goto out_unlock;
455794b1248SVladimir Davydov 
456794b1248SVladimir Davydov 	s = do_kmem_cache_create(cache_name, root_cache->object_size,
457794b1248SVladimir Davydov 				 root_cache->size, root_cache->align,
458794b1248SVladimir Davydov 				 root_cache->flags, root_cache->ctor,
459794b1248SVladimir Davydov 				 memcg, root_cache);
460bd673145SVladimir Davydov 	if (IS_ERR(s)) {
461794b1248SVladimir Davydov 		kfree(cache_name);
462bd673145SVladimir Davydov 		s = NULL;
463bd673145SVladimir Davydov 	}
464794b1248SVladimir Davydov 
465794b1248SVladimir Davydov out_unlock:
466794b1248SVladimir Davydov 	mutex_unlock(&slab_mutex);
46703afc0e2SVladimir Davydov 
46803afc0e2SVladimir Davydov 	put_online_mems();
469794b1248SVladimir Davydov 	put_online_cpus();
470bd673145SVladimir Davydov 
471bd673145SVladimir Davydov 	return s;
4722633d7a0SGlauber Costa }
473b8529907SVladimir Davydov 
474776ed0f0SVladimir Davydov static int memcg_cleanup_cache_params(struct kmem_cache *s)
475b8529907SVladimir Davydov {
476b8529907SVladimir Davydov 	int rc;
477b8529907SVladimir Davydov 
478b8529907SVladimir Davydov 	if (!s->memcg_params ||
479b8529907SVladimir Davydov 	    !s->memcg_params->is_root_cache)
480b8529907SVladimir Davydov 		return 0;
481b8529907SVladimir Davydov 
482b8529907SVladimir Davydov 	mutex_unlock(&slab_mutex);
483776ed0f0SVladimir Davydov 	rc = __memcg_cleanup_cache_params(s);
484b8529907SVladimir Davydov 	mutex_lock(&slab_mutex);
485b8529907SVladimir Davydov 
486b8529907SVladimir Davydov 	return rc;
487b8529907SVladimir Davydov }
488b8529907SVladimir Davydov #else
489776ed0f0SVladimir Davydov static int memcg_cleanup_cache_params(struct kmem_cache *s)
490b8529907SVladimir Davydov {
491b8529907SVladimir Davydov 	return 0;
492b8529907SVladimir Davydov }
493794b1248SVladimir Davydov #endif /* CONFIG_MEMCG_KMEM */
49497d06609SChristoph Lameter 
49541a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *s)
49641a21285SChristoph Lameter {
49741a21285SChristoph Lameter 	kfree(s->name);
49841a21285SChristoph Lameter 	kmem_cache_free(kmem_cache, s);
49941a21285SChristoph Lameter }
50041a21285SChristoph Lameter 
501945cf2b6SChristoph Lameter void kmem_cache_destroy(struct kmem_cache *s)
502945cf2b6SChristoph Lameter {
503945cf2b6SChristoph Lameter 	get_online_cpus();
50403afc0e2SVladimir Davydov 	get_online_mems();
50503afc0e2SVladimir Davydov 
506945cf2b6SChristoph Lameter 	mutex_lock(&slab_mutex);
507b8529907SVladimir Davydov 
508945cf2b6SChristoph Lameter 	s->refcount--;
509b8529907SVladimir Davydov 	if (s->refcount)
510b8529907SVladimir Davydov 		goto out_unlock;
511b8529907SVladimir Davydov 
512776ed0f0SVladimir Davydov 	if (memcg_cleanup_cache_params(s) != 0)
513b8529907SVladimir Davydov 		goto out_unlock;
514b8529907SVladimir Davydov 
515b8529907SVladimir Davydov 	if (__kmem_cache_shutdown(s) != 0) {
516b8529907SVladimir Davydov 		printk(KERN_ERR "kmem_cache_destroy %s: "
517b8529907SVladimir Davydov 		       "Slab cache still has objects\n", s->name);
518b8529907SVladimir Davydov 		dump_stack();
519b8529907SVladimir Davydov 		goto out_unlock;
520b8529907SVladimir Davydov 	}
521b8529907SVladimir Davydov 
5220bd62b11SVladimir Davydov 	list_del(&s->list);
5230bd62b11SVladimir Davydov 
524210ed9deSJiri Kosina 	mutex_unlock(&slab_mutex);
525945cf2b6SChristoph Lameter 	if (s->flags & SLAB_DESTROY_BY_RCU)
526945cf2b6SChristoph Lameter 		rcu_barrier();
527945cf2b6SChristoph Lameter 
5281aa13254SVladimir Davydov 	memcg_free_cache_params(s);
52941a21285SChristoph Lameter #ifdef SLAB_SUPPORTS_SYSFS
53041a21285SChristoph Lameter 	sysfs_slab_remove(s);
53141a21285SChristoph Lameter #else
53241a21285SChristoph Lameter 	slab_kmem_cache_release(s);
53341a21285SChristoph Lameter #endif
53403afc0e2SVladimir Davydov 	goto out;
535b8529907SVladimir Davydov 
536b8529907SVladimir Davydov out_unlock:
537210ed9deSJiri Kosina 	mutex_unlock(&slab_mutex);
53803afc0e2SVladimir Davydov out:
53903afc0e2SVladimir Davydov 	put_online_mems();
540945cf2b6SChristoph Lameter 	put_online_cpus();
541945cf2b6SChristoph Lameter }
542945cf2b6SChristoph Lameter EXPORT_SYMBOL(kmem_cache_destroy);
543945cf2b6SChristoph Lameter 
54403afc0e2SVladimir Davydov /**
54503afc0e2SVladimir Davydov  * kmem_cache_shrink - Shrink a cache.
54603afc0e2SVladimir Davydov  * @cachep: The cache to shrink.
54703afc0e2SVladimir Davydov  *
54803afc0e2SVladimir Davydov  * Releases as many slabs as possible for a cache.
54903afc0e2SVladimir Davydov  * To help debugging, a zero exit status indicates all slabs were released.
55003afc0e2SVladimir Davydov  */
55103afc0e2SVladimir Davydov int kmem_cache_shrink(struct kmem_cache *cachep)
55203afc0e2SVladimir Davydov {
55303afc0e2SVladimir Davydov 	int ret;
55403afc0e2SVladimir Davydov 
55503afc0e2SVladimir Davydov 	get_online_cpus();
55603afc0e2SVladimir Davydov 	get_online_mems();
55703afc0e2SVladimir Davydov 	ret = __kmem_cache_shrink(cachep);
55803afc0e2SVladimir Davydov 	put_online_mems();
55903afc0e2SVladimir Davydov 	put_online_cpus();
56003afc0e2SVladimir Davydov 	return ret;
56103afc0e2SVladimir Davydov }
56203afc0e2SVladimir Davydov EXPORT_SYMBOL(kmem_cache_shrink);
56303afc0e2SVladimir Davydov 
56497d06609SChristoph Lameter int slab_is_available(void)
56597d06609SChristoph Lameter {
56697d06609SChristoph Lameter 	return slab_state >= UP;
56797d06609SChristoph Lameter }
568b7454ad3SGlauber Costa 
56945530c44SChristoph Lameter #ifndef CONFIG_SLOB
57045530c44SChristoph Lameter /* Create a cache during boot when no slab services are available yet */
57145530c44SChristoph Lameter void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
57245530c44SChristoph Lameter 		unsigned long flags)
57345530c44SChristoph Lameter {
57445530c44SChristoph Lameter 	int err;
57545530c44SChristoph Lameter 
57645530c44SChristoph Lameter 	s->name = name;
57745530c44SChristoph Lameter 	s->size = s->object_size = size;
57845906855SChristoph Lameter 	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
57945530c44SChristoph Lameter 	err = __kmem_cache_create(s, flags);
58045530c44SChristoph Lameter 
58145530c44SChristoph Lameter 	if (err)
58231ba7346SChristoph Lameter 		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
58345530c44SChristoph Lameter 					name, size, err);
58445530c44SChristoph Lameter 
58545530c44SChristoph Lameter 	s->refcount = -1;	/* Exempt from merging for now */
58645530c44SChristoph Lameter }
58745530c44SChristoph Lameter 
58845530c44SChristoph Lameter struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
58945530c44SChristoph Lameter 				unsigned long flags)
59045530c44SChristoph Lameter {
59145530c44SChristoph Lameter 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
59245530c44SChristoph Lameter 
59345530c44SChristoph Lameter 	if (!s)
59445530c44SChristoph Lameter 		panic("Out of memory when creating slab %s\n", name);
59545530c44SChristoph Lameter 
59645530c44SChristoph Lameter 	create_boot_cache(s, name, size, flags);
59745530c44SChristoph Lameter 	list_add(&s->list, &slab_caches);
59845530c44SChristoph Lameter 	s->refcount = 1;
59945530c44SChristoph Lameter 	return s;
60045530c44SChristoph Lameter }
60145530c44SChristoph Lameter 
6029425c58eSChristoph Lameter struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
6039425c58eSChristoph Lameter EXPORT_SYMBOL(kmalloc_caches);
6049425c58eSChristoph Lameter 
6059425c58eSChristoph Lameter #ifdef CONFIG_ZONE_DMA
6069425c58eSChristoph Lameter struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
6079425c58eSChristoph Lameter EXPORT_SYMBOL(kmalloc_dma_caches);
6089425c58eSChristoph Lameter #endif
6099425c58eSChristoph Lameter 
610f97d5f63SChristoph Lameter /*
6112c59dd65SChristoph Lameter  * Conversion table for small slabs sizes / 8 to the index in the
6122c59dd65SChristoph Lameter  * kmalloc array. This is necessary for slabs < 192 since we have non power
6132c59dd65SChristoph Lameter  * of two cache sizes there. The size of larger slabs can be determined using
6142c59dd65SChristoph Lameter  * fls.
6152c59dd65SChristoph Lameter  */
6162c59dd65SChristoph Lameter static s8 size_index[24] = {
6172c59dd65SChristoph Lameter 	3,	/* 8 */
6182c59dd65SChristoph Lameter 	4,	/* 16 */
6192c59dd65SChristoph Lameter 	5,	/* 24 */
6202c59dd65SChristoph Lameter 	5,	/* 32 */
6212c59dd65SChristoph Lameter 	6,	/* 40 */
6222c59dd65SChristoph Lameter 	6,	/* 48 */
6232c59dd65SChristoph Lameter 	6,	/* 56 */
6242c59dd65SChristoph Lameter 	6,	/* 64 */
6252c59dd65SChristoph Lameter 	1,	/* 72 */
6262c59dd65SChristoph Lameter 	1,	/* 80 */
6272c59dd65SChristoph Lameter 	1,	/* 88 */
6282c59dd65SChristoph Lameter 	1,	/* 96 */
6292c59dd65SChristoph Lameter 	7,	/* 104 */
6302c59dd65SChristoph Lameter 	7,	/* 112 */
6312c59dd65SChristoph Lameter 	7,	/* 120 */
6322c59dd65SChristoph Lameter 	7,	/* 128 */
6332c59dd65SChristoph Lameter 	2,	/* 136 */
6342c59dd65SChristoph Lameter 	2,	/* 144 */
6352c59dd65SChristoph Lameter 	2,	/* 152 */
6362c59dd65SChristoph Lameter 	2,	/* 160 */
6372c59dd65SChristoph Lameter 	2,	/* 168 */
6382c59dd65SChristoph Lameter 	2,	/* 176 */
6392c59dd65SChristoph Lameter 	2,	/* 184 */
6402c59dd65SChristoph Lameter 	2	/* 192 */
6412c59dd65SChristoph Lameter };
6422c59dd65SChristoph Lameter 
6432c59dd65SChristoph Lameter static inline int size_index_elem(size_t bytes)
6442c59dd65SChristoph Lameter {
6452c59dd65SChristoph Lameter 	return (bytes - 1) / 8;
6462c59dd65SChristoph Lameter }
6472c59dd65SChristoph Lameter 
6482c59dd65SChristoph Lameter /*
6492c59dd65SChristoph Lameter  * Find the kmem_cache structure that serves a given size of
6502c59dd65SChristoph Lameter  * allocation
6512c59dd65SChristoph Lameter  */
6522c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
6532c59dd65SChristoph Lameter {
6542c59dd65SChristoph Lameter 	int index;
6552c59dd65SChristoph Lameter 
6569de1bc87SJoonsoo Kim 	if (unlikely(size > KMALLOC_MAX_SIZE)) {
657907985f4SSasha Levin 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6586286ae97SChristoph Lameter 		return NULL;
659907985f4SSasha Levin 	}
6606286ae97SChristoph Lameter 
6612c59dd65SChristoph Lameter 	if (size <= 192) {
6622c59dd65SChristoph Lameter 		if (!size)
6632c59dd65SChristoph Lameter 			return ZERO_SIZE_PTR;
6642c59dd65SChristoph Lameter 
6652c59dd65SChristoph Lameter 		index = size_index[size_index_elem(size)];
6662c59dd65SChristoph Lameter 	} else
6672c59dd65SChristoph Lameter 		index = fls(size - 1);
6682c59dd65SChristoph Lameter 
6692c59dd65SChristoph Lameter #ifdef CONFIG_ZONE_DMA
670b1e05416SJoonsoo Kim 	if (unlikely((flags & GFP_DMA)))
6712c59dd65SChristoph Lameter 		return kmalloc_dma_caches[index];
6722c59dd65SChristoph Lameter 
6732c59dd65SChristoph Lameter #endif
6742c59dd65SChristoph Lameter 	return kmalloc_caches[index];
6752c59dd65SChristoph Lameter }
6762c59dd65SChristoph Lameter 
6772c59dd65SChristoph Lameter /*
678f97d5f63SChristoph Lameter  * Create the kmalloc array. Some of the regular kmalloc arrays
679f97d5f63SChristoph Lameter  * may already have been created because they were needed to
680f97d5f63SChristoph Lameter  * enable allocations for slab creation.
681f97d5f63SChristoph Lameter  */
682f97d5f63SChristoph Lameter void __init create_kmalloc_caches(unsigned long flags)
683f97d5f63SChristoph Lameter {
684f97d5f63SChristoph Lameter 	int i;
685f97d5f63SChristoph Lameter 
6862c59dd65SChristoph Lameter 	/*
6872c59dd65SChristoph Lameter 	 * Patch up the size_index table if we have strange large alignment
6882c59dd65SChristoph Lameter 	 * requirements for the kmalloc array. This is only the case for
6892c59dd65SChristoph Lameter 	 * MIPS it seems. The standard arches will not generate any code here.
6902c59dd65SChristoph Lameter 	 *
6912c59dd65SChristoph Lameter 	 * Largest permitted alignment is 256 bytes due to the way we
6922c59dd65SChristoph Lameter 	 * handle the index determination for the smaller caches.
6932c59dd65SChristoph Lameter 	 *
6942c59dd65SChristoph Lameter 	 * Make sure that nothing crazy happens if someone starts tinkering
6952c59dd65SChristoph Lameter 	 * around with ARCH_KMALLOC_MINALIGN
6962c59dd65SChristoph Lameter 	 */
6972c59dd65SChristoph Lameter 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
6982c59dd65SChristoph Lameter 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
6992c59dd65SChristoph Lameter 
7002c59dd65SChristoph Lameter 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
7012c59dd65SChristoph Lameter 		int elem = size_index_elem(i);
7022c59dd65SChristoph Lameter 
7032c59dd65SChristoph Lameter 		if (elem >= ARRAY_SIZE(size_index))
7042c59dd65SChristoph Lameter 			break;
7052c59dd65SChristoph Lameter 		size_index[elem] = KMALLOC_SHIFT_LOW;
7062c59dd65SChristoph Lameter 	}
7072c59dd65SChristoph Lameter 
7082c59dd65SChristoph Lameter 	if (KMALLOC_MIN_SIZE >= 64) {
7092c59dd65SChristoph Lameter 		/*
7102c59dd65SChristoph Lameter 		 * The 96 byte size cache is not used if the alignment
7112c59dd65SChristoph Lameter 		 * is 64 byte.
7122c59dd65SChristoph Lameter 		 */
7132c59dd65SChristoph Lameter 		for (i = 64 + 8; i <= 96; i += 8)
7142c59dd65SChristoph Lameter 			size_index[size_index_elem(i)] = 7;
7152c59dd65SChristoph Lameter 
7162c59dd65SChristoph Lameter 	}
7172c59dd65SChristoph Lameter 
7182c59dd65SChristoph Lameter 	if (KMALLOC_MIN_SIZE >= 128) {
7192c59dd65SChristoph Lameter 		/*
7202c59dd65SChristoph Lameter 		 * The 192 byte sized cache is not used if the alignment
7212c59dd65SChristoph Lameter 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
7222c59dd65SChristoph Lameter 		 * instead.
7232c59dd65SChristoph Lameter 		 */
7242c59dd65SChristoph Lameter 		for (i = 128 + 8; i <= 192; i += 8)
7252c59dd65SChristoph Lameter 			size_index[size_index_elem(i)] = 8;
7262c59dd65SChristoph Lameter 	}
7278a965b3bSChristoph Lameter 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
7288a965b3bSChristoph Lameter 		if (!kmalloc_caches[i]) {
729f97d5f63SChristoph Lameter 			kmalloc_caches[i] = create_kmalloc_cache(NULL,
730f97d5f63SChristoph Lameter 							1 << i, flags);
731956e46efSChris Mason 		}
732f97d5f63SChristoph Lameter 
7338a965b3bSChristoph Lameter 		/*
7348a965b3bSChristoph Lameter 		 * Caches that are not of the two-to-the-power-of size.
7358a965b3bSChristoph Lameter 		 * These have to be created immediately after the
7368a965b3bSChristoph Lameter 		 * earlier power of two caches
7378a965b3bSChristoph Lameter 		 */
7388a965b3bSChristoph Lameter 		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
7398a965b3bSChristoph Lameter 			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
7408a965b3bSChristoph Lameter 
7418a965b3bSChristoph Lameter 		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
7428a965b3bSChristoph Lameter 			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
7438a965b3bSChristoph Lameter 	}
7448a965b3bSChristoph Lameter 
745f97d5f63SChristoph Lameter 	/* Kmalloc array is now usable */
746f97d5f63SChristoph Lameter 	slab_state = UP;
747f97d5f63SChristoph Lameter 
748f97d5f63SChristoph Lameter 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
749f97d5f63SChristoph Lameter 		struct kmem_cache *s = kmalloc_caches[i];
750f97d5f63SChristoph Lameter 		char *n;
751f97d5f63SChristoph Lameter 
752f97d5f63SChristoph Lameter 		if (s) {
753f97d5f63SChristoph Lameter 			n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
754f97d5f63SChristoph Lameter 
755f97d5f63SChristoph Lameter 			BUG_ON(!n);
756f97d5f63SChristoph Lameter 			s->name = n;
757f97d5f63SChristoph Lameter 		}
758f97d5f63SChristoph Lameter 	}
759f97d5f63SChristoph Lameter 
760f97d5f63SChristoph Lameter #ifdef CONFIG_ZONE_DMA
761f97d5f63SChristoph Lameter 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
762f97d5f63SChristoph Lameter 		struct kmem_cache *s = kmalloc_caches[i];
763f97d5f63SChristoph Lameter 
764f97d5f63SChristoph Lameter 		if (s) {
765f97d5f63SChristoph Lameter 			int size = kmalloc_size(i);
766f97d5f63SChristoph Lameter 			char *n = kasprintf(GFP_NOWAIT,
767f97d5f63SChristoph Lameter 				 "dma-kmalloc-%d", size);
768f97d5f63SChristoph Lameter 
769f97d5f63SChristoph Lameter 			BUG_ON(!n);
770f97d5f63SChristoph Lameter 			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
771f97d5f63SChristoph Lameter 				size, SLAB_CACHE_DMA | flags);
772f97d5f63SChristoph Lameter 		}
773f97d5f63SChristoph Lameter 	}
774f97d5f63SChristoph Lameter #endif
775f97d5f63SChristoph Lameter }
77645530c44SChristoph Lameter #endif /* !CONFIG_SLOB */
77745530c44SChristoph Lameter 
778cea371f4SVladimir Davydov /*
779cea371f4SVladimir Davydov  * To avoid unnecessary overhead, we pass through large allocation requests
780cea371f4SVladimir Davydov  * directly to the page allocator. We use __GFP_COMP, because we will need to
781cea371f4SVladimir Davydov  * know the allocation order to free the pages properly in kfree.
782cea371f4SVladimir Davydov  */
78352383431SVladimir Davydov void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
78452383431SVladimir Davydov {
78552383431SVladimir Davydov 	void *ret;
78652383431SVladimir Davydov 	struct page *page;
78752383431SVladimir Davydov 
78852383431SVladimir Davydov 	flags |= __GFP_COMP;
78952383431SVladimir Davydov 	page = alloc_kmem_pages(flags, order);
79052383431SVladimir Davydov 	ret = page ? page_address(page) : NULL;
79152383431SVladimir Davydov 	kmemleak_alloc(ret, size, 1, flags);
79252383431SVladimir Davydov 	return ret;
79352383431SVladimir Davydov }
79452383431SVladimir Davydov EXPORT_SYMBOL(kmalloc_order);
79552383431SVladimir Davydov 
796f1b6eb6eSChristoph Lameter #ifdef CONFIG_TRACING
797f1b6eb6eSChristoph Lameter void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
798f1b6eb6eSChristoph Lameter {
799f1b6eb6eSChristoph Lameter 	void *ret = kmalloc_order(size, flags, order);
800f1b6eb6eSChristoph Lameter 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
801f1b6eb6eSChristoph Lameter 	return ret;
802f1b6eb6eSChristoph Lameter }
803f1b6eb6eSChristoph Lameter EXPORT_SYMBOL(kmalloc_order_trace);
804f1b6eb6eSChristoph Lameter #endif
80545530c44SChristoph Lameter 
806b7454ad3SGlauber Costa #ifdef CONFIG_SLABINFO
807e9b4db2bSWanpeng Li 
808e9b4db2bSWanpeng Li #ifdef CONFIG_SLAB
809e9b4db2bSWanpeng Li #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
810e9b4db2bSWanpeng Li #else
811e9b4db2bSWanpeng Li #define SLABINFO_RIGHTS S_IRUSR
812e9b4db2bSWanpeng Li #endif
813e9b4db2bSWanpeng Li 
814*b047501cSVladimir Davydov static void print_slabinfo_header(struct seq_file *m)
815bcee6e2aSGlauber Costa {
816bcee6e2aSGlauber Costa 	/*
817bcee6e2aSGlauber Costa 	 * Output format version, so at least we can change it
818bcee6e2aSGlauber Costa 	 * without _too_ many complaints.
819bcee6e2aSGlauber Costa 	 */
820bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB
821bcee6e2aSGlauber Costa 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
822bcee6e2aSGlauber Costa #else
823bcee6e2aSGlauber Costa 	seq_puts(m, "slabinfo - version: 2.1\n");
824bcee6e2aSGlauber Costa #endif
825bcee6e2aSGlauber Costa 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
826bcee6e2aSGlauber Costa 		 "<objperslab> <pagesperslab>");
827bcee6e2aSGlauber Costa 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
828bcee6e2aSGlauber Costa 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
829bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB
830bcee6e2aSGlauber Costa 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
831bcee6e2aSGlauber Costa 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
832bcee6e2aSGlauber Costa 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
833bcee6e2aSGlauber Costa #endif
834bcee6e2aSGlauber Costa 	seq_putc(m, '\n');
835bcee6e2aSGlauber Costa }
836bcee6e2aSGlauber Costa 
8371df3b26fSVladimir Davydov void *slab_start(struct seq_file *m, loff_t *pos)
838b7454ad3SGlauber Costa {
839b7454ad3SGlauber Costa 	mutex_lock(&slab_mutex);
840b7454ad3SGlauber Costa 	return seq_list_start(&slab_caches, *pos);
841b7454ad3SGlauber Costa }
842b7454ad3SGlauber Costa 
843276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos)
844b7454ad3SGlauber Costa {
845b7454ad3SGlauber Costa 	return seq_list_next(p, &slab_caches, pos);
846b7454ad3SGlauber Costa }
847b7454ad3SGlauber Costa 
848276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p)
849b7454ad3SGlauber Costa {
850b7454ad3SGlauber Costa 	mutex_unlock(&slab_mutex);
851b7454ad3SGlauber Costa }
852b7454ad3SGlauber Costa 
853749c5415SGlauber Costa static void
854749c5415SGlauber Costa memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
855b7454ad3SGlauber Costa {
856749c5415SGlauber Costa 	struct kmem_cache *c;
857749c5415SGlauber Costa 	struct slabinfo sinfo;
858749c5415SGlauber Costa 	int i;
859749c5415SGlauber Costa 
860749c5415SGlauber Costa 	if (!is_root_cache(s))
861749c5415SGlauber Costa 		return;
862749c5415SGlauber Costa 
863749c5415SGlauber Costa 	for_each_memcg_cache_index(i) {
8642ade4de8SQiang Huang 		c = cache_from_memcg_idx(s, i);
865749c5415SGlauber Costa 		if (!c)
866749c5415SGlauber Costa 			continue;
867749c5415SGlauber Costa 
868749c5415SGlauber Costa 		memset(&sinfo, 0, sizeof(sinfo));
869749c5415SGlauber Costa 		get_slabinfo(c, &sinfo);
870749c5415SGlauber Costa 
871749c5415SGlauber Costa 		info->active_slabs += sinfo.active_slabs;
872749c5415SGlauber Costa 		info->num_slabs += sinfo.num_slabs;
873749c5415SGlauber Costa 		info->shared_avail += sinfo.shared_avail;
874749c5415SGlauber Costa 		info->active_objs += sinfo.active_objs;
875749c5415SGlauber Costa 		info->num_objs += sinfo.num_objs;
876749c5415SGlauber Costa 	}
877749c5415SGlauber Costa }
878749c5415SGlauber Costa 
879*b047501cSVladimir Davydov static void cache_show(struct kmem_cache *s, struct seq_file *m)
880749c5415SGlauber Costa {
8810d7561c6SGlauber Costa 	struct slabinfo sinfo;
8820d7561c6SGlauber Costa 
8830d7561c6SGlauber Costa 	memset(&sinfo, 0, sizeof(sinfo));
8840d7561c6SGlauber Costa 	get_slabinfo(s, &sinfo);
8850d7561c6SGlauber Costa 
886749c5415SGlauber Costa 	memcg_accumulate_slabinfo(s, &sinfo);
887749c5415SGlauber Costa 
8880d7561c6SGlauber Costa 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
889749c5415SGlauber Costa 		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
8900d7561c6SGlauber Costa 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
8910d7561c6SGlauber Costa 
8920d7561c6SGlauber Costa 	seq_printf(m, " : tunables %4u %4u %4u",
8930d7561c6SGlauber Costa 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
8940d7561c6SGlauber Costa 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
8950d7561c6SGlauber Costa 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
8960d7561c6SGlauber Costa 	slabinfo_show_stats(m, s);
8970d7561c6SGlauber Costa 	seq_putc(m, '\n');
898b7454ad3SGlauber Costa }
899b7454ad3SGlauber Costa 
9001df3b26fSVladimir Davydov static int slab_show(struct seq_file *m, void *p)
901749c5415SGlauber Costa {
902749c5415SGlauber Costa 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
903749c5415SGlauber Costa 
9041df3b26fSVladimir Davydov 	if (p == slab_caches.next)
9051df3b26fSVladimir Davydov 		print_slabinfo_header(m);
906*b047501cSVladimir Davydov 	if (is_root_cache(s))
907*b047501cSVladimir Davydov 		cache_show(s, m);
908749c5415SGlauber Costa 	return 0;
909749c5415SGlauber Costa }
910749c5415SGlauber Costa 
911*b047501cSVladimir Davydov #ifdef CONFIG_MEMCG_KMEM
912*b047501cSVladimir Davydov int memcg_slab_show(struct seq_file *m, void *p)
913*b047501cSVladimir Davydov {
914*b047501cSVladimir Davydov 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
915*b047501cSVladimir Davydov 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
916*b047501cSVladimir Davydov 
917*b047501cSVladimir Davydov 	if (p == slab_caches.next)
918*b047501cSVladimir Davydov 		print_slabinfo_header(m);
919*b047501cSVladimir Davydov 	if (!is_root_cache(s) && s->memcg_params->memcg == memcg)
920*b047501cSVladimir Davydov 		cache_show(s, m);
921*b047501cSVladimir Davydov 	return 0;
922*b047501cSVladimir Davydov }
923*b047501cSVladimir Davydov #endif
924*b047501cSVladimir Davydov 
925b7454ad3SGlauber Costa /*
926b7454ad3SGlauber Costa  * slabinfo_op - iterator that generates /proc/slabinfo
927b7454ad3SGlauber Costa  *
928b7454ad3SGlauber Costa  * Output layout:
929b7454ad3SGlauber Costa  * cache-name
930b7454ad3SGlauber Costa  * num-active-objs
931b7454ad3SGlauber Costa  * total-objs
932b7454ad3SGlauber Costa  * object size
933b7454ad3SGlauber Costa  * num-active-slabs
934b7454ad3SGlauber Costa  * total-slabs
935b7454ad3SGlauber Costa  * num-pages-per-slab
936b7454ad3SGlauber Costa  * + further values on SMP and with statistics enabled
937b7454ad3SGlauber Costa  */
938b7454ad3SGlauber Costa static const struct seq_operations slabinfo_op = {
9391df3b26fSVladimir Davydov 	.start = slab_start,
940276a2439SWanpeng Li 	.next = slab_next,
941276a2439SWanpeng Li 	.stop = slab_stop,
9421df3b26fSVladimir Davydov 	.show = slab_show,
943b7454ad3SGlauber Costa };
944b7454ad3SGlauber Costa 
945b7454ad3SGlauber Costa static int slabinfo_open(struct inode *inode, struct file *file)
946b7454ad3SGlauber Costa {
947b7454ad3SGlauber Costa 	return seq_open(file, &slabinfo_op);
948b7454ad3SGlauber Costa }
949b7454ad3SGlauber Costa 
950b7454ad3SGlauber Costa static const struct file_operations proc_slabinfo_operations = {
951b7454ad3SGlauber Costa 	.open		= slabinfo_open,
952b7454ad3SGlauber Costa 	.read		= seq_read,
953b7454ad3SGlauber Costa 	.write          = slabinfo_write,
954b7454ad3SGlauber Costa 	.llseek		= seq_lseek,
955b7454ad3SGlauber Costa 	.release	= seq_release,
956b7454ad3SGlauber Costa };
957b7454ad3SGlauber Costa 
958b7454ad3SGlauber Costa static int __init slab_proc_init(void)
959b7454ad3SGlauber Costa {
960e9b4db2bSWanpeng Li 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
961e9b4db2bSWanpeng Li 						&proc_slabinfo_operations);
962b7454ad3SGlauber Costa 	return 0;
963b7454ad3SGlauber Costa }
964b7454ad3SGlauber Costa module_init(slab_proc_init);
965b7454ad3SGlauber Costa #endif /* CONFIG_SLABINFO */
966928cec9cSAndrey Ryabinin 
967928cec9cSAndrey Ryabinin static __always_inline void *__do_krealloc(const void *p, size_t new_size,
968928cec9cSAndrey Ryabinin 					   gfp_t flags)
969928cec9cSAndrey Ryabinin {
970928cec9cSAndrey Ryabinin 	void *ret;
971928cec9cSAndrey Ryabinin 	size_t ks = 0;
972928cec9cSAndrey Ryabinin 
973928cec9cSAndrey Ryabinin 	if (p)
974928cec9cSAndrey Ryabinin 		ks = ksize(p);
975928cec9cSAndrey Ryabinin 
976928cec9cSAndrey Ryabinin 	if (ks >= new_size)
977928cec9cSAndrey Ryabinin 		return (void *)p;
978928cec9cSAndrey Ryabinin 
979928cec9cSAndrey Ryabinin 	ret = kmalloc_track_caller(new_size, flags);
980928cec9cSAndrey Ryabinin 	if (ret && p)
981928cec9cSAndrey Ryabinin 		memcpy(ret, p, ks);
982928cec9cSAndrey Ryabinin 
983928cec9cSAndrey Ryabinin 	return ret;
984928cec9cSAndrey Ryabinin }
985928cec9cSAndrey Ryabinin 
986928cec9cSAndrey Ryabinin /**
987928cec9cSAndrey Ryabinin  * __krealloc - like krealloc() but don't free @p.
988928cec9cSAndrey Ryabinin  * @p: object to reallocate memory for.
989928cec9cSAndrey Ryabinin  * @new_size: how many bytes of memory are required.
990928cec9cSAndrey Ryabinin  * @flags: the type of memory to allocate.
991928cec9cSAndrey Ryabinin  *
992928cec9cSAndrey Ryabinin  * This function is like krealloc() except it never frees the originally
993928cec9cSAndrey Ryabinin  * allocated buffer. Use this if you don't want to free the buffer immediately
994928cec9cSAndrey Ryabinin  * like, for example, with RCU.
995928cec9cSAndrey Ryabinin  */
996928cec9cSAndrey Ryabinin void *__krealloc(const void *p, size_t new_size, gfp_t flags)
997928cec9cSAndrey Ryabinin {
998928cec9cSAndrey Ryabinin 	if (unlikely(!new_size))
999928cec9cSAndrey Ryabinin 		return ZERO_SIZE_PTR;
1000928cec9cSAndrey Ryabinin 
1001928cec9cSAndrey Ryabinin 	return __do_krealloc(p, new_size, flags);
1002928cec9cSAndrey Ryabinin 
1003928cec9cSAndrey Ryabinin }
1004928cec9cSAndrey Ryabinin EXPORT_SYMBOL(__krealloc);
1005928cec9cSAndrey Ryabinin 
1006928cec9cSAndrey Ryabinin /**
1007928cec9cSAndrey Ryabinin  * krealloc - reallocate memory. The contents will remain unchanged.
1008928cec9cSAndrey Ryabinin  * @p: object to reallocate memory for.
1009928cec9cSAndrey Ryabinin  * @new_size: how many bytes of memory are required.
1010928cec9cSAndrey Ryabinin  * @flags: the type of memory to allocate.
1011928cec9cSAndrey Ryabinin  *
1012928cec9cSAndrey Ryabinin  * The contents of the object pointed to are preserved up to the
1013928cec9cSAndrey Ryabinin  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1014928cec9cSAndrey Ryabinin  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1015928cec9cSAndrey Ryabinin  * %NULL pointer, the object pointed to is freed.
1016928cec9cSAndrey Ryabinin  */
1017928cec9cSAndrey Ryabinin void *krealloc(const void *p, size_t new_size, gfp_t flags)
1018928cec9cSAndrey Ryabinin {
1019928cec9cSAndrey Ryabinin 	void *ret;
1020928cec9cSAndrey Ryabinin 
1021928cec9cSAndrey Ryabinin 	if (unlikely(!new_size)) {
1022928cec9cSAndrey Ryabinin 		kfree(p);
1023928cec9cSAndrey Ryabinin 		return ZERO_SIZE_PTR;
1024928cec9cSAndrey Ryabinin 	}
1025928cec9cSAndrey Ryabinin 
1026928cec9cSAndrey Ryabinin 	ret = __do_krealloc(p, new_size, flags);
1027928cec9cSAndrey Ryabinin 	if (ret && p != ret)
1028928cec9cSAndrey Ryabinin 		kfree(p);
1029928cec9cSAndrey Ryabinin 
1030928cec9cSAndrey Ryabinin 	return ret;
1031928cec9cSAndrey Ryabinin }
1032928cec9cSAndrey Ryabinin EXPORT_SYMBOL(krealloc);
1033928cec9cSAndrey Ryabinin 
1034928cec9cSAndrey Ryabinin /**
1035928cec9cSAndrey Ryabinin  * kzfree - like kfree but zero memory
1036928cec9cSAndrey Ryabinin  * @p: object to free memory of
1037928cec9cSAndrey Ryabinin  *
1038928cec9cSAndrey Ryabinin  * The memory of the object @p points to is zeroed before freed.
1039928cec9cSAndrey Ryabinin  * If @p is %NULL, kzfree() does nothing.
1040928cec9cSAndrey Ryabinin  *
1041928cec9cSAndrey Ryabinin  * Note: this function zeroes the whole allocated buffer which can be a good
1042928cec9cSAndrey Ryabinin  * deal bigger than the requested buffer size passed to kmalloc(). So be
1043928cec9cSAndrey Ryabinin  * careful when using this function in performance sensitive code.
1044928cec9cSAndrey Ryabinin  */
1045928cec9cSAndrey Ryabinin void kzfree(const void *p)
1046928cec9cSAndrey Ryabinin {
1047928cec9cSAndrey Ryabinin 	size_t ks;
1048928cec9cSAndrey Ryabinin 	void *mem = (void *)p;
1049928cec9cSAndrey Ryabinin 
1050928cec9cSAndrey Ryabinin 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1051928cec9cSAndrey Ryabinin 		return;
1052928cec9cSAndrey Ryabinin 	ks = ksize(mem);
1053928cec9cSAndrey Ryabinin 	memset(mem, 0, ks);
1054928cec9cSAndrey Ryabinin 	kfree(mem);
1055928cec9cSAndrey Ryabinin }
1056928cec9cSAndrey Ryabinin EXPORT_SYMBOL(kzfree);
1057928cec9cSAndrey Ryabinin 
1058928cec9cSAndrey Ryabinin /* Tracepoints definitions. */
1059928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1060928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1061928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1062928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1063928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kfree);
1064928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1065