xref: /linux/mm/slab_common.c (revision 0d7561c61d76690ed84bd1016acc0fcbff063205)
1039363f3SChristoph Lameter /*
2039363f3SChristoph Lameter  * Slab allocator functions that are independent of the allocator strategy
3039363f3SChristoph Lameter  *
4039363f3SChristoph Lameter  * (C) 2012 Christoph Lameter <cl@linux.com>
5039363f3SChristoph Lameter  */
6039363f3SChristoph Lameter #include <linux/slab.h>
7039363f3SChristoph Lameter 
8039363f3SChristoph Lameter #include <linux/mm.h>
9039363f3SChristoph Lameter #include <linux/poison.h>
10039363f3SChristoph Lameter #include <linux/interrupt.h>
11039363f3SChristoph Lameter #include <linux/memory.h>
12039363f3SChristoph Lameter #include <linux/compiler.h>
13039363f3SChristoph Lameter #include <linux/module.h>
1420cea968SChristoph Lameter #include <linux/cpu.h>
1520cea968SChristoph Lameter #include <linux/uaccess.h>
16b7454ad3SGlauber Costa #include <linux/seq_file.h>
17b7454ad3SGlauber Costa #include <linux/proc_fs.h>
18039363f3SChristoph Lameter #include <asm/cacheflush.h>
19039363f3SChristoph Lameter #include <asm/tlbflush.h>
20039363f3SChristoph Lameter #include <asm/page.h>
21039363f3SChristoph Lameter 
2297d06609SChristoph Lameter #include "slab.h"
2397d06609SChristoph Lameter 
2497d06609SChristoph Lameter enum slab_state slab_state;
2518004c5dSChristoph Lameter LIST_HEAD(slab_caches);
2618004c5dSChristoph Lameter DEFINE_MUTEX(slab_mutex);
279b030cb8SChristoph Lameter struct kmem_cache *kmem_cache;
2897d06609SChristoph Lameter 
2977be4b13SShuah Khan #ifdef CONFIG_DEBUG_VM
3077be4b13SShuah Khan static int kmem_cache_sanity_check(const char *name, size_t size)
3177be4b13SShuah Khan {
3277be4b13SShuah Khan 	struct kmem_cache *s = NULL;
3377be4b13SShuah Khan 
3477be4b13SShuah Khan 	if (!name || in_interrupt() || size < sizeof(void *) ||
3577be4b13SShuah Khan 		size > KMALLOC_MAX_SIZE) {
3677be4b13SShuah Khan 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
3777be4b13SShuah Khan 		return -EINVAL;
3877be4b13SShuah Khan 	}
3977be4b13SShuah Khan 
4077be4b13SShuah Khan 	list_for_each_entry(s, &slab_caches, list) {
4177be4b13SShuah Khan 		char tmp;
4277be4b13SShuah Khan 		int res;
4377be4b13SShuah Khan 
4477be4b13SShuah Khan 		/*
4577be4b13SShuah Khan 		 * This happens when the module gets unloaded and doesn't
4677be4b13SShuah Khan 		 * destroy its slab cache and no-one else reuses the vmalloc
4777be4b13SShuah Khan 		 * area of the module.  Print a warning.
4877be4b13SShuah Khan 		 */
4977be4b13SShuah Khan 		res = probe_kernel_address(s->name, tmp);
5077be4b13SShuah Khan 		if (res) {
5177be4b13SShuah Khan 			pr_err("Slab cache with size %d has lost its name\n",
5277be4b13SShuah Khan 			       s->object_size);
5377be4b13SShuah Khan 			continue;
5477be4b13SShuah Khan 		}
5577be4b13SShuah Khan 
5677be4b13SShuah Khan 		if (!strcmp(s->name, name)) {
5777be4b13SShuah Khan 			pr_err("%s (%s): Cache name already exists.\n",
5877be4b13SShuah Khan 			       __func__, name);
5977be4b13SShuah Khan 			dump_stack();
6077be4b13SShuah Khan 			s = NULL;
6177be4b13SShuah Khan 			return -EINVAL;
6277be4b13SShuah Khan 		}
6377be4b13SShuah Khan 	}
6477be4b13SShuah Khan 
6577be4b13SShuah Khan 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
6677be4b13SShuah Khan 	return 0;
6777be4b13SShuah Khan }
6877be4b13SShuah Khan #else
6977be4b13SShuah Khan static inline int kmem_cache_sanity_check(const char *name, size_t size)
7077be4b13SShuah Khan {
7177be4b13SShuah Khan 	return 0;
7277be4b13SShuah Khan }
7377be4b13SShuah Khan #endif
7477be4b13SShuah Khan 
75039363f3SChristoph Lameter /*
76039363f3SChristoph Lameter  * kmem_cache_create - Create a cache.
77039363f3SChristoph Lameter  * @name: A string which is used in /proc/slabinfo to identify this cache.
78039363f3SChristoph Lameter  * @size: The size of objects to be created in this cache.
79039363f3SChristoph Lameter  * @align: The required alignment for the objects.
80039363f3SChristoph Lameter  * @flags: SLAB flags
81039363f3SChristoph Lameter  * @ctor: A constructor for the objects.
82039363f3SChristoph Lameter  *
83039363f3SChristoph Lameter  * Returns a ptr to the cache on success, NULL on failure.
84039363f3SChristoph Lameter  * Cannot be called within a interrupt, but can be interrupted.
85039363f3SChristoph Lameter  * The @ctor is run when new pages are allocated by the cache.
86039363f3SChristoph Lameter  *
87039363f3SChristoph Lameter  * The flags are
88039363f3SChristoph Lameter  *
89039363f3SChristoph Lameter  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
90039363f3SChristoph Lameter  * to catch references to uninitialised memory.
91039363f3SChristoph Lameter  *
92039363f3SChristoph Lameter  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
93039363f3SChristoph Lameter  * for buffer overruns.
94039363f3SChristoph Lameter  *
95039363f3SChristoph Lameter  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
96039363f3SChristoph Lameter  * cacheline.  This can be beneficial if you're counting cycles as closely
97039363f3SChristoph Lameter  * as davem.
98039363f3SChristoph Lameter  */
99039363f3SChristoph Lameter 
100039363f3SChristoph Lameter struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
101039363f3SChristoph Lameter 		unsigned long flags, void (*ctor)(void *))
102039363f3SChristoph Lameter {
103039363f3SChristoph Lameter 	struct kmem_cache *s = NULL;
104686d550dSChristoph Lameter 	int err = 0;
105039363f3SChristoph Lameter 
106b920536aSPekka Enberg 	get_online_cpus();
107b920536aSPekka Enberg 	mutex_lock(&slab_mutex);
108686d550dSChristoph Lameter 
109686d550dSChristoph Lameter 	if (!kmem_cache_sanity_check(name, size) == 0)
110686d550dSChristoph Lameter 		goto out_locked;
111686d550dSChristoph Lameter 
112686d550dSChristoph Lameter 
113cbb79694SChristoph Lameter 	s = __kmem_cache_alias(name, size, align, flags, ctor);
114cbb79694SChristoph Lameter 	if (s)
115cbb79694SChristoph Lameter 		goto out_locked;
116cbb79694SChristoph Lameter 
117278b1bb1SChristoph Lameter 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
118db265ecaSChristoph Lameter 	if (s) {
1198a13a4ccSChristoph Lameter 		s->object_size = s->size = size;
1208a13a4ccSChristoph Lameter 		s->align = align;
1218a13a4ccSChristoph Lameter 		s->ctor = ctor;
1228a13a4ccSChristoph Lameter 		s->name = kstrdup(name, GFP_KERNEL);
1238a13a4ccSChristoph Lameter 		if (!s->name) {
1248a13a4ccSChristoph Lameter 			kmem_cache_free(kmem_cache, s);
1258a13a4ccSChristoph Lameter 			err = -ENOMEM;
1268a13a4ccSChristoph Lameter 			goto out_locked;
1278a13a4ccSChristoph Lameter 		}
1288a13a4ccSChristoph Lameter 
1298a13a4ccSChristoph Lameter 		err = __kmem_cache_create(s, flags);
130cce89f4fSChristoph Lameter 		if (!err) {
131278b1bb1SChristoph Lameter 
132cce89f4fSChristoph Lameter 			s->refcount = 1;
1337c9adf5aSChristoph Lameter 			list_add(&s->list, &slab_caches);
1347c9adf5aSChristoph Lameter 
135cce89f4fSChristoph Lameter 		} else {
1368a13a4ccSChristoph Lameter 			kfree(s->name);
137278b1bb1SChristoph Lameter 			kmem_cache_free(kmem_cache, s);
138278b1bb1SChristoph Lameter 		}
1398a13a4ccSChristoph Lameter 	} else
140278b1bb1SChristoph Lameter 		err = -ENOMEM;
141db265ecaSChristoph Lameter 
142686d550dSChristoph Lameter out_locked:
14320cea968SChristoph Lameter 	mutex_unlock(&slab_mutex);
14420cea968SChristoph Lameter 	put_online_cpus();
14520cea968SChristoph Lameter 
146686d550dSChristoph Lameter 	if (err) {
147686d550dSChristoph Lameter 
148686d550dSChristoph Lameter 		if (flags & SLAB_PANIC)
149686d550dSChristoph Lameter 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
150686d550dSChristoph Lameter 				name, err);
151686d550dSChristoph Lameter 		else {
152686d550dSChristoph Lameter 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
153686d550dSChristoph Lameter 				name, err);
154686d550dSChristoph Lameter 			dump_stack();
155686d550dSChristoph Lameter 		}
156686d550dSChristoph Lameter 
157686d550dSChristoph Lameter 		return NULL;
158686d550dSChristoph Lameter 	}
159039363f3SChristoph Lameter 
160039363f3SChristoph Lameter 	return s;
161039363f3SChristoph Lameter }
162039363f3SChristoph Lameter EXPORT_SYMBOL(kmem_cache_create);
16397d06609SChristoph Lameter 
164945cf2b6SChristoph Lameter void kmem_cache_destroy(struct kmem_cache *s)
165945cf2b6SChristoph Lameter {
166945cf2b6SChristoph Lameter 	get_online_cpus();
167945cf2b6SChristoph Lameter 	mutex_lock(&slab_mutex);
168945cf2b6SChristoph Lameter 	s->refcount--;
169945cf2b6SChristoph Lameter 	if (!s->refcount) {
170945cf2b6SChristoph Lameter 		list_del(&s->list);
171945cf2b6SChristoph Lameter 
172945cf2b6SChristoph Lameter 		if (!__kmem_cache_shutdown(s)) {
173210ed9deSJiri Kosina 			mutex_unlock(&slab_mutex);
174945cf2b6SChristoph Lameter 			if (s->flags & SLAB_DESTROY_BY_RCU)
175945cf2b6SChristoph Lameter 				rcu_barrier();
176945cf2b6SChristoph Lameter 
177db265ecaSChristoph Lameter 			kfree(s->name);
1788f4c765cSChristoph Lameter 			kmem_cache_free(kmem_cache, s);
179945cf2b6SChristoph Lameter 		} else {
180945cf2b6SChristoph Lameter 			list_add(&s->list, &slab_caches);
181210ed9deSJiri Kosina 			mutex_unlock(&slab_mutex);
182945cf2b6SChristoph Lameter 			printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
183945cf2b6SChristoph Lameter 				s->name);
184945cf2b6SChristoph Lameter 			dump_stack();
185945cf2b6SChristoph Lameter 		}
186210ed9deSJiri Kosina 	} else {
187945cf2b6SChristoph Lameter 		mutex_unlock(&slab_mutex);
188210ed9deSJiri Kosina 	}
189945cf2b6SChristoph Lameter 	put_online_cpus();
190945cf2b6SChristoph Lameter }
191945cf2b6SChristoph Lameter EXPORT_SYMBOL(kmem_cache_destroy);
192945cf2b6SChristoph Lameter 
19397d06609SChristoph Lameter int slab_is_available(void)
19497d06609SChristoph Lameter {
19597d06609SChristoph Lameter 	return slab_state >= UP;
19697d06609SChristoph Lameter }
197b7454ad3SGlauber Costa 
198b7454ad3SGlauber Costa #ifdef CONFIG_SLABINFO
199bcee6e2aSGlauber Costa static void print_slabinfo_header(struct seq_file *m)
200bcee6e2aSGlauber Costa {
201bcee6e2aSGlauber Costa 	/*
202bcee6e2aSGlauber Costa 	 * Output format version, so at least we can change it
203bcee6e2aSGlauber Costa 	 * without _too_ many complaints.
204bcee6e2aSGlauber Costa 	 */
205bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB
206bcee6e2aSGlauber Costa 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
207bcee6e2aSGlauber Costa #else
208bcee6e2aSGlauber Costa 	seq_puts(m, "slabinfo - version: 2.1\n");
209bcee6e2aSGlauber Costa #endif
210bcee6e2aSGlauber Costa 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
211bcee6e2aSGlauber Costa 		 "<objperslab> <pagesperslab>");
212bcee6e2aSGlauber Costa 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
213bcee6e2aSGlauber Costa 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
214bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB
215bcee6e2aSGlauber Costa 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
216bcee6e2aSGlauber Costa 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
217bcee6e2aSGlauber Costa 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
218bcee6e2aSGlauber Costa #endif
219bcee6e2aSGlauber Costa 	seq_putc(m, '\n');
220bcee6e2aSGlauber Costa }
221bcee6e2aSGlauber Costa 
222b7454ad3SGlauber Costa static void *s_start(struct seq_file *m, loff_t *pos)
223b7454ad3SGlauber Costa {
224b7454ad3SGlauber Costa 	loff_t n = *pos;
225b7454ad3SGlauber Costa 
226b7454ad3SGlauber Costa 	mutex_lock(&slab_mutex);
227b7454ad3SGlauber Costa 	if (!n)
228b7454ad3SGlauber Costa 		print_slabinfo_header(m);
229b7454ad3SGlauber Costa 
230b7454ad3SGlauber Costa 	return seq_list_start(&slab_caches, *pos);
231b7454ad3SGlauber Costa }
232b7454ad3SGlauber Costa 
233b7454ad3SGlauber Costa static void *s_next(struct seq_file *m, void *p, loff_t *pos)
234b7454ad3SGlauber Costa {
235b7454ad3SGlauber Costa 	return seq_list_next(p, &slab_caches, pos);
236b7454ad3SGlauber Costa }
237b7454ad3SGlauber Costa 
238b7454ad3SGlauber Costa static void s_stop(struct seq_file *m, void *p)
239b7454ad3SGlauber Costa {
240b7454ad3SGlauber Costa 	mutex_unlock(&slab_mutex);
241b7454ad3SGlauber Costa }
242b7454ad3SGlauber Costa 
243b7454ad3SGlauber Costa static int s_show(struct seq_file *m, void *p)
244b7454ad3SGlauber Costa {
245*0d7561c6SGlauber Costa 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
246*0d7561c6SGlauber Costa 	struct slabinfo sinfo;
247*0d7561c6SGlauber Costa 
248*0d7561c6SGlauber Costa 	memset(&sinfo, 0, sizeof(sinfo));
249*0d7561c6SGlauber Costa 	get_slabinfo(s, &sinfo);
250*0d7561c6SGlauber Costa 
251*0d7561c6SGlauber Costa 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
252*0d7561c6SGlauber Costa 		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
253*0d7561c6SGlauber Costa 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
254*0d7561c6SGlauber Costa 
255*0d7561c6SGlauber Costa 	seq_printf(m, " : tunables %4u %4u %4u",
256*0d7561c6SGlauber Costa 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
257*0d7561c6SGlauber Costa 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
258*0d7561c6SGlauber Costa 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
259*0d7561c6SGlauber Costa 	slabinfo_show_stats(m, s);
260*0d7561c6SGlauber Costa 	seq_putc(m, '\n');
261*0d7561c6SGlauber Costa 	return 0;
262b7454ad3SGlauber Costa }
263b7454ad3SGlauber Costa 
264b7454ad3SGlauber Costa /*
265b7454ad3SGlauber Costa  * slabinfo_op - iterator that generates /proc/slabinfo
266b7454ad3SGlauber Costa  *
267b7454ad3SGlauber Costa  * Output layout:
268b7454ad3SGlauber Costa  * cache-name
269b7454ad3SGlauber Costa  * num-active-objs
270b7454ad3SGlauber Costa  * total-objs
271b7454ad3SGlauber Costa  * object size
272b7454ad3SGlauber Costa  * num-active-slabs
273b7454ad3SGlauber Costa  * total-slabs
274b7454ad3SGlauber Costa  * num-pages-per-slab
275b7454ad3SGlauber Costa  * + further values on SMP and with statistics enabled
276b7454ad3SGlauber Costa  */
277b7454ad3SGlauber Costa static const struct seq_operations slabinfo_op = {
278b7454ad3SGlauber Costa 	.start = s_start,
279b7454ad3SGlauber Costa 	.next = s_next,
280b7454ad3SGlauber Costa 	.stop = s_stop,
281b7454ad3SGlauber Costa 	.show = s_show,
282b7454ad3SGlauber Costa };
283b7454ad3SGlauber Costa 
284b7454ad3SGlauber Costa static int slabinfo_open(struct inode *inode, struct file *file)
285b7454ad3SGlauber Costa {
286b7454ad3SGlauber Costa 	return seq_open(file, &slabinfo_op);
287b7454ad3SGlauber Costa }
288b7454ad3SGlauber Costa 
289b7454ad3SGlauber Costa static const struct file_operations proc_slabinfo_operations = {
290b7454ad3SGlauber Costa 	.open		= slabinfo_open,
291b7454ad3SGlauber Costa 	.read		= seq_read,
292b7454ad3SGlauber Costa 	.write          = slabinfo_write,
293b7454ad3SGlauber Costa 	.llseek		= seq_lseek,
294b7454ad3SGlauber Costa 	.release	= seq_release,
295b7454ad3SGlauber Costa };
296b7454ad3SGlauber Costa 
297b7454ad3SGlauber Costa static int __init slab_proc_init(void)
298b7454ad3SGlauber Costa {
299b7454ad3SGlauber Costa 	proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
300b7454ad3SGlauber Costa 	return 0;
301b7454ad3SGlauber Costa }
302b7454ad3SGlauber Costa module_init(slab_proc_init);
303b7454ad3SGlauber Costa #endif /* CONFIG_SLABINFO */
304