xref: /linux/mm/slab_common.c (revision 945cf2b6199be70ff03102b9e642c3bb05d01de9)
1039363f3SChristoph Lameter /*
2039363f3SChristoph Lameter  * Slab allocator functions that are independent of the allocator strategy
3039363f3SChristoph Lameter  *
4039363f3SChristoph Lameter  * (C) 2012 Christoph Lameter <cl@linux.com>
5039363f3SChristoph Lameter  */
6039363f3SChristoph Lameter #include <linux/slab.h>
7039363f3SChristoph Lameter 
8039363f3SChristoph Lameter #include <linux/mm.h>
9039363f3SChristoph Lameter #include <linux/poison.h>
10039363f3SChristoph Lameter #include <linux/interrupt.h>
11039363f3SChristoph Lameter #include <linux/memory.h>
12039363f3SChristoph Lameter #include <linux/compiler.h>
13039363f3SChristoph Lameter #include <linux/module.h>
1420cea968SChristoph Lameter #include <linux/cpu.h>
1520cea968SChristoph Lameter #include <linux/uaccess.h>
16039363f3SChristoph Lameter #include <asm/cacheflush.h>
17039363f3SChristoph Lameter #include <asm/tlbflush.h>
18039363f3SChristoph Lameter #include <asm/page.h>
19039363f3SChristoph Lameter 
2097d06609SChristoph Lameter #include "slab.h"
2197d06609SChristoph Lameter 
2297d06609SChristoph Lameter enum slab_state slab_state;
2318004c5dSChristoph Lameter LIST_HEAD(slab_caches);
2418004c5dSChristoph Lameter DEFINE_MUTEX(slab_mutex);
2597d06609SChristoph Lameter 
2677be4b13SShuah Khan #ifdef CONFIG_DEBUG_VM
2777be4b13SShuah Khan static int kmem_cache_sanity_check(const char *name, size_t size)
2877be4b13SShuah Khan {
2977be4b13SShuah Khan 	struct kmem_cache *s = NULL;
3077be4b13SShuah Khan 
3177be4b13SShuah Khan 	if (!name || in_interrupt() || size < sizeof(void *) ||
3277be4b13SShuah Khan 		size > KMALLOC_MAX_SIZE) {
3377be4b13SShuah Khan 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
3477be4b13SShuah Khan 		return -EINVAL;
3577be4b13SShuah Khan 	}
3677be4b13SShuah Khan 
3777be4b13SShuah Khan 	list_for_each_entry(s, &slab_caches, list) {
3877be4b13SShuah Khan 		char tmp;
3977be4b13SShuah Khan 		int res;
4077be4b13SShuah Khan 
4177be4b13SShuah Khan 		/*
4277be4b13SShuah Khan 		 * This happens when the module gets unloaded and doesn't
4377be4b13SShuah Khan 		 * destroy its slab cache and no-one else reuses the vmalloc
4477be4b13SShuah Khan 		 * area of the module.  Print a warning.
4577be4b13SShuah Khan 		 */
4677be4b13SShuah Khan 		res = probe_kernel_address(s->name, tmp);
4777be4b13SShuah Khan 		if (res) {
4877be4b13SShuah Khan 			pr_err("Slab cache with size %d has lost its name\n",
4977be4b13SShuah Khan 			       s->object_size);
5077be4b13SShuah Khan 			continue;
5177be4b13SShuah Khan 		}
5277be4b13SShuah Khan 
5377be4b13SShuah Khan 		if (!strcmp(s->name, name)) {
5477be4b13SShuah Khan 			pr_err("%s (%s): Cache name already exists.\n",
5577be4b13SShuah Khan 			       __func__, name);
5677be4b13SShuah Khan 			dump_stack();
5777be4b13SShuah Khan 			s = NULL;
5877be4b13SShuah Khan 			return -EINVAL;
5977be4b13SShuah Khan 		}
6077be4b13SShuah Khan 	}
6177be4b13SShuah Khan 
6277be4b13SShuah Khan 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
6377be4b13SShuah Khan 	return 0;
6477be4b13SShuah Khan }
6577be4b13SShuah Khan #else
6677be4b13SShuah Khan static inline int kmem_cache_sanity_check(const char *name, size_t size)
6777be4b13SShuah Khan {
6877be4b13SShuah Khan 	return 0;
6977be4b13SShuah Khan }
7077be4b13SShuah Khan #endif
7177be4b13SShuah Khan 
72039363f3SChristoph Lameter /*
73039363f3SChristoph Lameter  * kmem_cache_create - Create a cache.
74039363f3SChristoph Lameter  * @name: A string which is used in /proc/slabinfo to identify this cache.
75039363f3SChristoph Lameter  * @size: The size of objects to be created in this cache.
76039363f3SChristoph Lameter  * @align: The required alignment for the objects.
77039363f3SChristoph Lameter  * @flags: SLAB flags
78039363f3SChristoph Lameter  * @ctor: A constructor for the objects.
79039363f3SChristoph Lameter  *
80039363f3SChristoph Lameter  * Returns a ptr to the cache on success, NULL on failure.
81039363f3SChristoph Lameter  * Cannot be called within a interrupt, but can be interrupted.
82039363f3SChristoph Lameter  * The @ctor is run when new pages are allocated by the cache.
83039363f3SChristoph Lameter  *
84039363f3SChristoph Lameter  * The flags are
85039363f3SChristoph Lameter  *
86039363f3SChristoph Lameter  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
87039363f3SChristoph Lameter  * to catch references to uninitialised memory.
88039363f3SChristoph Lameter  *
89039363f3SChristoph Lameter  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
90039363f3SChristoph Lameter  * for buffer overruns.
91039363f3SChristoph Lameter  *
92039363f3SChristoph Lameter  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
93039363f3SChristoph Lameter  * cacheline.  This can be beneficial if you're counting cycles as closely
94039363f3SChristoph Lameter  * as davem.
95039363f3SChristoph Lameter  */
96039363f3SChristoph Lameter 
97039363f3SChristoph Lameter struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
98039363f3SChristoph Lameter 		unsigned long flags, void (*ctor)(void *))
99039363f3SChristoph Lameter {
100039363f3SChristoph Lameter 	struct kmem_cache *s = NULL;
101686d550dSChristoph Lameter 	int err = 0;
102039363f3SChristoph Lameter 
103b920536aSPekka Enberg 	get_online_cpus();
104b920536aSPekka Enberg 	mutex_lock(&slab_mutex);
105686d550dSChristoph Lameter 
106686d550dSChristoph Lameter 	if (!kmem_cache_sanity_check(name, size) == 0)
107686d550dSChristoph Lameter 		goto out_locked;
108686d550dSChristoph Lameter 
109686d550dSChristoph Lameter 
110039363f3SChristoph Lameter 	s = __kmem_cache_create(name, size, align, flags, ctor);
111686d550dSChristoph Lameter 	if (!s)
112686d550dSChristoph Lameter 		err = -ENOSYS; /* Until __kmem_cache_create returns code */
113686d550dSChristoph Lameter 
1147c9adf5aSChristoph Lameter 	/*
1157c9adf5aSChristoph Lameter 	 * Check if the slab has actually been created and if it was a
1167c9adf5aSChristoph Lameter 	 * real instatiation. Aliases do not belong on the list
1177c9adf5aSChristoph Lameter 	 */
1187c9adf5aSChristoph Lameter 	if (s && s->refcount == 1)
1197c9adf5aSChristoph Lameter 		list_add(&s->list, &slab_caches);
1207c9adf5aSChristoph Lameter 
121686d550dSChristoph Lameter out_locked:
12220cea968SChristoph Lameter 	mutex_unlock(&slab_mutex);
12320cea968SChristoph Lameter 	put_online_cpus();
12420cea968SChristoph Lameter 
125686d550dSChristoph Lameter 	if (err) {
126686d550dSChristoph Lameter 
127686d550dSChristoph Lameter 		if (flags & SLAB_PANIC)
128686d550dSChristoph Lameter 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
129686d550dSChristoph Lameter 				name, err);
130686d550dSChristoph Lameter 		else {
131686d550dSChristoph Lameter 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
132686d550dSChristoph Lameter 				name, err);
133686d550dSChristoph Lameter 			dump_stack();
134686d550dSChristoph Lameter 		}
135686d550dSChristoph Lameter 
136686d550dSChristoph Lameter 		return NULL;
137686d550dSChristoph Lameter 	}
138039363f3SChristoph Lameter 
139039363f3SChristoph Lameter 	return s;
140039363f3SChristoph Lameter }
141039363f3SChristoph Lameter EXPORT_SYMBOL(kmem_cache_create);
14297d06609SChristoph Lameter 
143*945cf2b6SChristoph Lameter void kmem_cache_destroy(struct kmem_cache *s)
144*945cf2b6SChristoph Lameter {
145*945cf2b6SChristoph Lameter 	get_online_cpus();
146*945cf2b6SChristoph Lameter 	mutex_lock(&slab_mutex);
147*945cf2b6SChristoph Lameter 	s->refcount--;
148*945cf2b6SChristoph Lameter 	if (!s->refcount) {
149*945cf2b6SChristoph Lameter 		list_del(&s->list);
150*945cf2b6SChristoph Lameter 
151*945cf2b6SChristoph Lameter 		if (!__kmem_cache_shutdown(s)) {
152*945cf2b6SChristoph Lameter 			if (s->flags & SLAB_DESTROY_BY_RCU)
153*945cf2b6SChristoph Lameter 				rcu_barrier();
154*945cf2b6SChristoph Lameter 
155*945cf2b6SChristoph Lameter 			__kmem_cache_destroy(s);
156*945cf2b6SChristoph Lameter 		} else {
157*945cf2b6SChristoph Lameter 			list_add(&s->list, &slab_caches);
158*945cf2b6SChristoph Lameter 			printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
159*945cf2b6SChristoph Lameter 				s->name);
160*945cf2b6SChristoph Lameter 			dump_stack();
161*945cf2b6SChristoph Lameter 		}
162*945cf2b6SChristoph Lameter 	}
163*945cf2b6SChristoph Lameter 	mutex_unlock(&slab_mutex);
164*945cf2b6SChristoph Lameter 	put_online_cpus();
165*945cf2b6SChristoph Lameter }
166*945cf2b6SChristoph Lameter EXPORT_SYMBOL(kmem_cache_destroy);
167*945cf2b6SChristoph Lameter 
16897d06609SChristoph Lameter int slab_is_available(void)
16997d06609SChristoph Lameter {
17097d06609SChristoph Lameter 	return slab_state >= UP;
17197d06609SChristoph Lameter }
172