1039363f3SChristoph Lameter /* 2039363f3SChristoph Lameter * Slab allocator functions that are independent of the allocator strategy 3039363f3SChristoph Lameter * 4039363f3SChristoph Lameter * (C) 2012 Christoph Lameter <cl@linux.com> 5039363f3SChristoph Lameter */ 6039363f3SChristoph Lameter #include <linux/slab.h> 7039363f3SChristoph Lameter 8039363f3SChristoph Lameter #include <linux/mm.h> 9039363f3SChristoph Lameter #include <linux/poison.h> 10039363f3SChristoph Lameter #include <linux/interrupt.h> 11039363f3SChristoph Lameter #include <linux/memory.h> 12039363f3SChristoph Lameter #include <linux/compiler.h> 13039363f3SChristoph Lameter #include <linux/module.h> 1420cea968SChristoph Lameter #include <linux/cpu.h> 1520cea968SChristoph Lameter #include <linux/uaccess.h> 16b7454ad3SGlauber Costa #include <linux/seq_file.h> 17b7454ad3SGlauber Costa #include <linux/proc_fs.h> 18039363f3SChristoph Lameter #include <asm/cacheflush.h> 19039363f3SChristoph Lameter #include <asm/tlbflush.h> 20039363f3SChristoph Lameter #include <asm/page.h> 212633d7a0SGlauber Costa #include <linux/memcontrol.h> 22*928cec9cSAndrey Ryabinin 23*928cec9cSAndrey Ryabinin #define CREATE_TRACE_POINTS 24f1b6eb6eSChristoph Lameter #include <trace/events/kmem.h> 25039363f3SChristoph Lameter 2697d06609SChristoph Lameter #include "slab.h" 2797d06609SChristoph Lameter 2897d06609SChristoph Lameter enum slab_state slab_state; 2918004c5dSChristoph Lameter LIST_HEAD(slab_caches); 3018004c5dSChristoph Lameter DEFINE_MUTEX(slab_mutex); 319b030cb8SChristoph Lameter struct kmem_cache *kmem_cache; 3297d06609SChristoph Lameter 3377be4b13SShuah Khan #ifdef CONFIG_DEBUG_VM 34794b1248SVladimir Davydov static int kmem_cache_sanity_check(const char *name, size_t size) 3577be4b13SShuah Khan { 3677be4b13SShuah Khan struct kmem_cache *s = NULL; 3777be4b13SShuah Khan 3877be4b13SShuah Khan if (!name || in_interrupt() || size < sizeof(void *) || 3977be4b13SShuah Khan size > KMALLOC_MAX_SIZE) { 4077be4b13SShuah Khan pr_err("kmem_cache_create(%s) integrity check failed\n", name); 4177be4b13SShuah Khan return -EINVAL; 4277be4b13SShuah Khan } 4377be4b13SShuah Khan 4477be4b13SShuah Khan list_for_each_entry(s, &slab_caches, list) { 4577be4b13SShuah Khan char tmp; 4677be4b13SShuah Khan int res; 4777be4b13SShuah Khan 4877be4b13SShuah Khan /* 4977be4b13SShuah Khan * This happens when the module gets unloaded and doesn't 5077be4b13SShuah Khan * destroy its slab cache and no-one else reuses the vmalloc 5177be4b13SShuah Khan * area of the module. Print a warning. 5277be4b13SShuah Khan */ 5377be4b13SShuah Khan res = probe_kernel_address(s->name, tmp); 5477be4b13SShuah Khan if (res) { 5577be4b13SShuah Khan pr_err("Slab cache with size %d has lost its name\n", 5677be4b13SShuah Khan s->object_size); 5777be4b13SShuah Khan continue; 5877be4b13SShuah Khan } 5977be4b13SShuah Khan 6069461747SMikulas Patocka #if !defined(CONFIG_SLUB) 61794b1248SVladimir Davydov if (!strcmp(s->name, name)) { 6277be4b13SShuah Khan pr_err("%s (%s): Cache name already exists.\n", 6377be4b13SShuah Khan __func__, name); 6477be4b13SShuah Khan dump_stack(); 6577be4b13SShuah Khan s = NULL; 6677be4b13SShuah Khan return -EINVAL; 6777be4b13SShuah Khan } 683e374919SChristoph Lameter #endif 6977be4b13SShuah Khan } 7077be4b13SShuah Khan 7177be4b13SShuah Khan WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 7277be4b13SShuah Khan return 0; 7377be4b13SShuah Khan } 7477be4b13SShuah Khan #else 75794b1248SVladimir Davydov static inline int kmem_cache_sanity_check(const char *name, size_t size) 7677be4b13SShuah Khan { 7777be4b13SShuah Khan return 0; 7877be4b13SShuah Khan } 7977be4b13SShuah Khan #endif 8077be4b13SShuah Khan 8155007d84SGlauber Costa #ifdef CONFIG_MEMCG_KMEM 8255007d84SGlauber Costa int memcg_update_all_caches(int num_memcgs) 8355007d84SGlauber Costa { 8455007d84SGlauber Costa struct kmem_cache *s; 8555007d84SGlauber Costa int ret = 0; 8655007d84SGlauber Costa mutex_lock(&slab_mutex); 8755007d84SGlauber Costa 8855007d84SGlauber Costa list_for_each_entry(s, &slab_caches, list) { 8955007d84SGlauber Costa if (!is_root_cache(s)) 9055007d84SGlauber Costa continue; 9155007d84SGlauber Costa 9255007d84SGlauber Costa ret = memcg_update_cache_size(s, num_memcgs); 9355007d84SGlauber Costa /* 9455007d84SGlauber Costa * See comment in memcontrol.c, memcg_update_cache_size: 9555007d84SGlauber Costa * Instead of freeing the memory, we'll just leave the caches 9655007d84SGlauber Costa * up to this point in an updated state. 9755007d84SGlauber Costa */ 9855007d84SGlauber Costa if (ret) 9955007d84SGlauber Costa goto out; 10055007d84SGlauber Costa } 10155007d84SGlauber Costa 10255007d84SGlauber Costa memcg_update_array_size(num_memcgs); 10355007d84SGlauber Costa out: 10455007d84SGlauber Costa mutex_unlock(&slab_mutex); 10555007d84SGlauber Costa return ret; 10655007d84SGlauber Costa } 10755007d84SGlauber Costa #endif 10855007d84SGlauber Costa 109039363f3SChristoph Lameter /* 11045906855SChristoph Lameter * Figure out what the alignment of the objects will be given a set of 11145906855SChristoph Lameter * flags, a user specified alignment and the size of the objects. 11245906855SChristoph Lameter */ 11345906855SChristoph Lameter unsigned long calculate_alignment(unsigned long flags, 11445906855SChristoph Lameter unsigned long align, unsigned long size) 11545906855SChristoph Lameter { 11645906855SChristoph Lameter /* 11745906855SChristoph Lameter * If the user wants hardware cache aligned objects then follow that 11845906855SChristoph Lameter * suggestion if the object is sufficiently large. 11945906855SChristoph Lameter * 12045906855SChristoph Lameter * The hardware cache alignment cannot override the specified 12145906855SChristoph Lameter * alignment though. If that is greater then use it. 12245906855SChristoph Lameter */ 12345906855SChristoph Lameter if (flags & SLAB_HWCACHE_ALIGN) { 12445906855SChristoph Lameter unsigned long ralign = cache_line_size(); 12545906855SChristoph Lameter while (size <= ralign / 2) 12645906855SChristoph Lameter ralign /= 2; 12745906855SChristoph Lameter align = max(align, ralign); 12845906855SChristoph Lameter } 12945906855SChristoph Lameter 13045906855SChristoph Lameter if (align < ARCH_SLAB_MINALIGN) 13145906855SChristoph Lameter align = ARCH_SLAB_MINALIGN; 13245906855SChristoph Lameter 13345906855SChristoph Lameter return ALIGN(align, sizeof(void *)); 13445906855SChristoph Lameter } 13545906855SChristoph Lameter 136794b1248SVladimir Davydov static struct kmem_cache * 137794b1248SVladimir Davydov do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, 138794b1248SVladimir Davydov unsigned long flags, void (*ctor)(void *), 139794b1248SVladimir Davydov struct mem_cgroup *memcg, struct kmem_cache *root_cache) 140794b1248SVladimir Davydov { 141794b1248SVladimir Davydov struct kmem_cache *s; 142794b1248SVladimir Davydov int err; 143794b1248SVladimir Davydov 144794b1248SVladimir Davydov err = -ENOMEM; 145794b1248SVladimir Davydov s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); 146794b1248SVladimir Davydov if (!s) 147794b1248SVladimir Davydov goto out; 148794b1248SVladimir Davydov 149794b1248SVladimir Davydov s->name = name; 150794b1248SVladimir Davydov s->object_size = object_size; 151794b1248SVladimir Davydov s->size = size; 152794b1248SVladimir Davydov s->align = align; 153794b1248SVladimir Davydov s->ctor = ctor; 154794b1248SVladimir Davydov 155794b1248SVladimir Davydov err = memcg_alloc_cache_params(memcg, s, root_cache); 156794b1248SVladimir Davydov if (err) 157794b1248SVladimir Davydov goto out_free_cache; 158794b1248SVladimir Davydov 159794b1248SVladimir Davydov err = __kmem_cache_create(s, flags); 160794b1248SVladimir Davydov if (err) 161794b1248SVladimir Davydov goto out_free_cache; 162794b1248SVladimir Davydov 163794b1248SVladimir Davydov s->refcount = 1; 164794b1248SVladimir Davydov list_add(&s->list, &slab_caches); 165794b1248SVladimir Davydov out: 166794b1248SVladimir Davydov if (err) 167794b1248SVladimir Davydov return ERR_PTR(err); 168794b1248SVladimir Davydov return s; 169794b1248SVladimir Davydov 170794b1248SVladimir Davydov out_free_cache: 171794b1248SVladimir Davydov memcg_free_cache_params(s); 172794b1248SVladimir Davydov kfree(s); 173794b1248SVladimir Davydov goto out; 174794b1248SVladimir Davydov } 17545906855SChristoph Lameter 17645906855SChristoph Lameter /* 177039363f3SChristoph Lameter * kmem_cache_create - Create a cache. 178039363f3SChristoph Lameter * @name: A string which is used in /proc/slabinfo to identify this cache. 179039363f3SChristoph Lameter * @size: The size of objects to be created in this cache. 180039363f3SChristoph Lameter * @align: The required alignment for the objects. 181039363f3SChristoph Lameter * @flags: SLAB flags 182039363f3SChristoph Lameter * @ctor: A constructor for the objects. 183039363f3SChristoph Lameter * 184039363f3SChristoph Lameter * Returns a ptr to the cache on success, NULL on failure. 185039363f3SChristoph Lameter * Cannot be called within a interrupt, but can be interrupted. 186039363f3SChristoph Lameter * The @ctor is run when new pages are allocated by the cache. 187039363f3SChristoph Lameter * 188039363f3SChristoph Lameter * The flags are 189039363f3SChristoph Lameter * 190039363f3SChristoph Lameter * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 191039363f3SChristoph Lameter * to catch references to uninitialised memory. 192039363f3SChristoph Lameter * 193039363f3SChristoph Lameter * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 194039363f3SChristoph Lameter * for buffer overruns. 195039363f3SChristoph Lameter * 196039363f3SChristoph Lameter * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 197039363f3SChristoph Lameter * cacheline. This can be beneficial if you're counting cycles as closely 198039363f3SChristoph Lameter * as davem. 199039363f3SChristoph Lameter */ 2002633d7a0SGlauber Costa struct kmem_cache * 201794b1248SVladimir Davydov kmem_cache_create(const char *name, size_t size, size_t align, 202794b1248SVladimir Davydov unsigned long flags, void (*ctor)(void *)) 203039363f3SChristoph Lameter { 204794b1248SVladimir Davydov struct kmem_cache *s; 205794b1248SVladimir Davydov char *cache_name; 2063965fc36SVladimir Davydov int err; 207039363f3SChristoph Lameter 208b920536aSPekka Enberg get_online_cpus(); 20903afc0e2SVladimir Davydov get_online_mems(); 21003afc0e2SVladimir Davydov 211b920536aSPekka Enberg mutex_lock(&slab_mutex); 212686d550dSChristoph Lameter 213794b1248SVladimir Davydov err = kmem_cache_sanity_check(name, size); 2143965fc36SVladimir Davydov if (err) 2153965fc36SVladimir Davydov goto out_unlock; 216686d550dSChristoph Lameter 217d8843922SGlauber Costa /* 218d8843922SGlauber Costa * Some allocators will constraint the set of valid flags to a subset 219d8843922SGlauber Costa * of all flags. We expect them to define CACHE_CREATE_MASK in this 220d8843922SGlauber Costa * case, and we'll just provide them with a sanitized version of the 221d8843922SGlauber Costa * passed flags. 222d8843922SGlauber Costa */ 223d8843922SGlauber Costa flags &= CACHE_CREATE_MASK; 224686d550dSChristoph Lameter 225a44cb944SVladimir Davydov s = __kmem_cache_alias(name, size, align, flags, ctor); 226cbb79694SChristoph Lameter if (s) 2273965fc36SVladimir Davydov goto out_unlock; 228794b1248SVladimir Davydov 229794b1248SVladimir Davydov cache_name = kstrdup(name, GFP_KERNEL); 230794b1248SVladimir Davydov if (!cache_name) { 231794b1248SVladimir Davydov err = -ENOMEM; 232794b1248SVladimir Davydov goto out_unlock; 233a44cb944SVladimir Davydov } 234cbb79694SChristoph Lameter 235794b1248SVladimir Davydov s = do_kmem_cache_create(cache_name, size, size, 236794b1248SVladimir Davydov calculate_alignment(flags, align, size), 237794b1248SVladimir Davydov flags, ctor, NULL, NULL); 238794b1248SVladimir Davydov if (IS_ERR(s)) { 239794b1248SVladimir Davydov err = PTR_ERR(s); 240794b1248SVladimir Davydov kfree(cache_name); 241794b1248SVladimir Davydov } 242db265ecaSChristoph Lameter 2433965fc36SVladimir Davydov out_unlock: 24420cea968SChristoph Lameter mutex_unlock(&slab_mutex); 24503afc0e2SVladimir Davydov 24603afc0e2SVladimir Davydov put_online_mems(); 24720cea968SChristoph Lameter put_online_cpus(); 24820cea968SChristoph Lameter 249ba3253c7SDave Jones if (err) { 250686d550dSChristoph Lameter if (flags & SLAB_PANIC) 251686d550dSChristoph Lameter panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", 252686d550dSChristoph Lameter name, err); 253686d550dSChristoph Lameter else { 254686d550dSChristoph Lameter printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", 255686d550dSChristoph Lameter name, err); 256686d550dSChristoph Lameter dump_stack(); 257686d550dSChristoph Lameter } 258686d550dSChristoph Lameter return NULL; 259686d550dSChristoph Lameter } 260039363f3SChristoph Lameter return s; 261794b1248SVladimir Davydov } 262794b1248SVladimir Davydov EXPORT_SYMBOL(kmem_cache_create); 2633965fc36SVladimir Davydov 264794b1248SVladimir Davydov #ifdef CONFIG_MEMCG_KMEM 265794b1248SVladimir Davydov /* 266776ed0f0SVladimir Davydov * memcg_create_kmem_cache - Create a cache for a memory cgroup. 267794b1248SVladimir Davydov * @memcg: The memory cgroup the new cache is for. 268794b1248SVladimir Davydov * @root_cache: The parent of the new cache. 269073ee1c6SVladimir Davydov * @memcg_name: The name of the memory cgroup (used for naming the new cache). 270794b1248SVladimir Davydov * 271794b1248SVladimir Davydov * This function attempts to create a kmem cache that will serve allocation 272794b1248SVladimir Davydov * requests going from @memcg to @root_cache. The new cache inherits properties 273794b1248SVladimir Davydov * from its parent. 274794b1248SVladimir Davydov */ 275776ed0f0SVladimir Davydov struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, 276073ee1c6SVladimir Davydov struct kmem_cache *root_cache, 277073ee1c6SVladimir Davydov const char *memcg_name) 278794b1248SVladimir Davydov { 279bd673145SVladimir Davydov struct kmem_cache *s = NULL; 280794b1248SVladimir Davydov char *cache_name; 281794b1248SVladimir Davydov 282794b1248SVladimir Davydov get_online_cpus(); 28303afc0e2SVladimir Davydov get_online_mems(); 28403afc0e2SVladimir Davydov 285794b1248SVladimir Davydov mutex_lock(&slab_mutex); 286794b1248SVladimir Davydov 287073ee1c6SVladimir Davydov cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, 288073ee1c6SVladimir Davydov memcg_cache_id(memcg), memcg_name); 289794b1248SVladimir Davydov if (!cache_name) 290794b1248SVladimir Davydov goto out_unlock; 291794b1248SVladimir Davydov 292794b1248SVladimir Davydov s = do_kmem_cache_create(cache_name, root_cache->object_size, 293794b1248SVladimir Davydov root_cache->size, root_cache->align, 294794b1248SVladimir Davydov root_cache->flags, root_cache->ctor, 295794b1248SVladimir Davydov memcg, root_cache); 296bd673145SVladimir Davydov if (IS_ERR(s)) { 297794b1248SVladimir Davydov kfree(cache_name); 298bd673145SVladimir Davydov s = NULL; 299bd673145SVladimir Davydov } 300794b1248SVladimir Davydov 301794b1248SVladimir Davydov out_unlock: 302794b1248SVladimir Davydov mutex_unlock(&slab_mutex); 30303afc0e2SVladimir Davydov 30403afc0e2SVladimir Davydov put_online_mems(); 305794b1248SVladimir Davydov put_online_cpus(); 306bd673145SVladimir Davydov 307bd673145SVladimir Davydov return s; 3082633d7a0SGlauber Costa } 309b8529907SVladimir Davydov 310776ed0f0SVladimir Davydov static int memcg_cleanup_cache_params(struct kmem_cache *s) 311b8529907SVladimir Davydov { 312b8529907SVladimir Davydov int rc; 313b8529907SVladimir Davydov 314b8529907SVladimir Davydov if (!s->memcg_params || 315b8529907SVladimir Davydov !s->memcg_params->is_root_cache) 316b8529907SVladimir Davydov return 0; 317b8529907SVladimir Davydov 318b8529907SVladimir Davydov mutex_unlock(&slab_mutex); 319776ed0f0SVladimir Davydov rc = __memcg_cleanup_cache_params(s); 320b8529907SVladimir Davydov mutex_lock(&slab_mutex); 321b8529907SVladimir Davydov 322b8529907SVladimir Davydov return rc; 323b8529907SVladimir Davydov } 324b8529907SVladimir Davydov #else 325776ed0f0SVladimir Davydov static int memcg_cleanup_cache_params(struct kmem_cache *s) 326b8529907SVladimir Davydov { 327b8529907SVladimir Davydov return 0; 328b8529907SVladimir Davydov } 329794b1248SVladimir Davydov #endif /* CONFIG_MEMCG_KMEM */ 33097d06609SChristoph Lameter 33141a21285SChristoph Lameter void slab_kmem_cache_release(struct kmem_cache *s) 33241a21285SChristoph Lameter { 33341a21285SChristoph Lameter kfree(s->name); 33441a21285SChristoph Lameter kmem_cache_free(kmem_cache, s); 33541a21285SChristoph Lameter } 33641a21285SChristoph Lameter 337945cf2b6SChristoph Lameter void kmem_cache_destroy(struct kmem_cache *s) 338945cf2b6SChristoph Lameter { 339945cf2b6SChristoph Lameter get_online_cpus(); 34003afc0e2SVladimir Davydov get_online_mems(); 34103afc0e2SVladimir Davydov 342945cf2b6SChristoph Lameter mutex_lock(&slab_mutex); 343b8529907SVladimir Davydov 344945cf2b6SChristoph Lameter s->refcount--; 345b8529907SVladimir Davydov if (s->refcount) 346b8529907SVladimir Davydov goto out_unlock; 347b8529907SVladimir Davydov 348776ed0f0SVladimir Davydov if (memcg_cleanup_cache_params(s) != 0) 349b8529907SVladimir Davydov goto out_unlock; 350b8529907SVladimir Davydov 351b8529907SVladimir Davydov if (__kmem_cache_shutdown(s) != 0) { 352b8529907SVladimir Davydov printk(KERN_ERR "kmem_cache_destroy %s: " 353b8529907SVladimir Davydov "Slab cache still has objects\n", s->name); 354b8529907SVladimir Davydov dump_stack(); 355b8529907SVladimir Davydov goto out_unlock; 356b8529907SVladimir Davydov } 357b8529907SVladimir Davydov 3580bd62b11SVladimir Davydov list_del(&s->list); 3590bd62b11SVladimir Davydov 360210ed9deSJiri Kosina mutex_unlock(&slab_mutex); 361945cf2b6SChristoph Lameter if (s->flags & SLAB_DESTROY_BY_RCU) 362945cf2b6SChristoph Lameter rcu_barrier(); 363945cf2b6SChristoph Lameter 3641aa13254SVladimir Davydov memcg_free_cache_params(s); 36541a21285SChristoph Lameter #ifdef SLAB_SUPPORTS_SYSFS 36641a21285SChristoph Lameter sysfs_slab_remove(s); 36741a21285SChristoph Lameter #else 36841a21285SChristoph Lameter slab_kmem_cache_release(s); 36941a21285SChristoph Lameter #endif 37003afc0e2SVladimir Davydov goto out; 371b8529907SVladimir Davydov 372b8529907SVladimir Davydov out_unlock: 373210ed9deSJiri Kosina mutex_unlock(&slab_mutex); 37403afc0e2SVladimir Davydov out: 37503afc0e2SVladimir Davydov put_online_mems(); 376945cf2b6SChristoph Lameter put_online_cpus(); 377945cf2b6SChristoph Lameter } 378945cf2b6SChristoph Lameter EXPORT_SYMBOL(kmem_cache_destroy); 379945cf2b6SChristoph Lameter 38003afc0e2SVladimir Davydov /** 38103afc0e2SVladimir Davydov * kmem_cache_shrink - Shrink a cache. 38203afc0e2SVladimir Davydov * @cachep: The cache to shrink. 38303afc0e2SVladimir Davydov * 38403afc0e2SVladimir Davydov * Releases as many slabs as possible for a cache. 38503afc0e2SVladimir Davydov * To help debugging, a zero exit status indicates all slabs were released. 38603afc0e2SVladimir Davydov */ 38703afc0e2SVladimir Davydov int kmem_cache_shrink(struct kmem_cache *cachep) 38803afc0e2SVladimir Davydov { 38903afc0e2SVladimir Davydov int ret; 39003afc0e2SVladimir Davydov 39103afc0e2SVladimir Davydov get_online_cpus(); 39203afc0e2SVladimir Davydov get_online_mems(); 39303afc0e2SVladimir Davydov ret = __kmem_cache_shrink(cachep); 39403afc0e2SVladimir Davydov put_online_mems(); 39503afc0e2SVladimir Davydov put_online_cpus(); 39603afc0e2SVladimir Davydov return ret; 39703afc0e2SVladimir Davydov } 39803afc0e2SVladimir Davydov EXPORT_SYMBOL(kmem_cache_shrink); 39903afc0e2SVladimir Davydov 40097d06609SChristoph Lameter int slab_is_available(void) 40197d06609SChristoph Lameter { 40297d06609SChristoph Lameter return slab_state >= UP; 40397d06609SChristoph Lameter } 404b7454ad3SGlauber Costa 40545530c44SChristoph Lameter #ifndef CONFIG_SLOB 40645530c44SChristoph Lameter /* Create a cache during boot when no slab services are available yet */ 40745530c44SChristoph Lameter void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, 40845530c44SChristoph Lameter unsigned long flags) 40945530c44SChristoph Lameter { 41045530c44SChristoph Lameter int err; 41145530c44SChristoph Lameter 41245530c44SChristoph Lameter s->name = name; 41345530c44SChristoph Lameter s->size = s->object_size = size; 41445906855SChristoph Lameter s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); 41545530c44SChristoph Lameter err = __kmem_cache_create(s, flags); 41645530c44SChristoph Lameter 41745530c44SChristoph Lameter if (err) 41831ba7346SChristoph Lameter panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n", 41945530c44SChristoph Lameter name, size, err); 42045530c44SChristoph Lameter 42145530c44SChristoph Lameter s->refcount = -1; /* Exempt from merging for now */ 42245530c44SChristoph Lameter } 42345530c44SChristoph Lameter 42445530c44SChristoph Lameter struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, 42545530c44SChristoph Lameter unsigned long flags) 42645530c44SChristoph Lameter { 42745530c44SChristoph Lameter struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 42845530c44SChristoph Lameter 42945530c44SChristoph Lameter if (!s) 43045530c44SChristoph Lameter panic("Out of memory when creating slab %s\n", name); 43145530c44SChristoph Lameter 43245530c44SChristoph Lameter create_boot_cache(s, name, size, flags); 43345530c44SChristoph Lameter list_add(&s->list, &slab_caches); 43445530c44SChristoph Lameter s->refcount = 1; 43545530c44SChristoph Lameter return s; 43645530c44SChristoph Lameter } 43745530c44SChristoph Lameter 4389425c58eSChristoph Lameter struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 4399425c58eSChristoph Lameter EXPORT_SYMBOL(kmalloc_caches); 4409425c58eSChristoph Lameter 4419425c58eSChristoph Lameter #ifdef CONFIG_ZONE_DMA 4429425c58eSChristoph Lameter struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 4439425c58eSChristoph Lameter EXPORT_SYMBOL(kmalloc_dma_caches); 4449425c58eSChristoph Lameter #endif 4459425c58eSChristoph Lameter 446f97d5f63SChristoph Lameter /* 4472c59dd65SChristoph Lameter * Conversion table for small slabs sizes / 8 to the index in the 4482c59dd65SChristoph Lameter * kmalloc array. This is necessary for slabs < 192 since we have non power 4492c59dd65SChristoph Lameter * of two cache sizes there. The size of larger slabs can be determined using 4502c59dd65SChristoph Lameter * fls. 4512c59dd65SChristoph Lameter */ 4522c59dd65SChristoph Lameter static s8 size_index[24] = { 4532c59dd65SChristoph Lameter 3, /* 8 */ 4542c59dd65SChristoph Lameter 4, /* 16 */ 4552c59dd65SChristoph Lameter 5, /* 24 */ 4562c59dd65SChristoph Lameter 5, /* 32 */ 4572c59dd65SChristoph Lameter 6, /* 40 */ 4582c59dd65SChristoph Lameter 6, /* 48 */ 4592c59dd65SChristoph Lameter 6, /* 56 */ 4602c59dd65SChristoph Lameter 6, /* 64 */ 4612c59dd65SChristoph Lameter 1, /* 72 */ 4622c59dd65SChristoph Lameter 1, /* 80 */ 4632c59dd65SChristoph Lameter 1, /* 88 */ 4642c59dd65SChristoph Lameter 1, /* 96 */ 4652c59dd65SChristoph Lameter 7, /* 104 */ 4662c59dd65SChristoph Lameter 7, /* 112 */ 4672c59dd65SChristoph Lameter 7, /* 120 */ 4682c59dd65SChristoph Lameter 7, /* 128 */ 4692c59dd65SChristoph Lameter 2, /* 136 */ 4702c59dd65SChristoph Lameter 2, /* 144 */ 4712c59dd65SChristoph Lameter 2, /* 152 */ 4722c59dd65SChristoph Lameter 2, /* 160 */ 4732c59dd65SChristoph Lameter 2, /* 168 */ 4742c59dd65SChristoph Lameter 2, /* 176 */ 4752c59dd65SChristoph Lameter 2, /* 184 */ 4762c59dd65SChristoph Lameter 2 /* 192 */ 4772c59dd65SChristoph Lameter }; 4782c59dd65SChristoph Lameter 4792c59dd65SChristoph Lameter static inline int size_index_elem(size_t bytes) 4802c59dd65SChristoph Lameter { 4812c59dd65SChristoph Lameter return (bytes - 1) / 8; 4822c59dd65SChristoph Lameter } 4832c59dd65SChristoph Lameter 4842c59dd65SChristoph Lameter /* 4852c59dd65SChristoph Lameter * Find the kmem_cache structure that serves a given size of 4862c59dd65SChristoph Lameter * allocation 4872c59dd65SChristoph Lameter */ 4882c59dd65SChristoph Lameter struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) 4892c59dd65SChristoph Lameter { 4902c59dd65SChristoph Lameter int index; 4912c59dd65SChristoph Lameter 4929de1bc87SJoonsoo Kim if (unlikely(size > KMALLOC_MAX_SIZE)) { 493907985f4SSasha Levin WARN_ON_ONCE(!(flags & __GFP_NOWARN)); 4946286ae97SChristoph Lameter return NULL; 495907985f4SSasha Levin } 4966286ae97SChristoph Lameter 4972c59dd65SChristoph Lameter if (size <= 192) { 4982c59dd65SChristoph Lameter if (!size) 4992c59dd65SChristoph Lameter return ZERO_SIZE_PTR; 5002c59dd65SChristoph Lameter 5012c59dd65SChristoph Lameter index = size_index[size_index_elem(size)]; 5022c59dd65SChristoph Lameter } else 5032c59dd65SChristoph Lameter index = fls(size - 1); 5042c59dd65SChristoph Lameter 5052c59dd65SChristoph Lameter #ifdef CONFIG_ZONE_DMA 506b1e05416SJoonsoo Kim if (unlikely((flags & GFP_DMA))) 5072c59dd65SChristoph Lameter return kmalloc_dma_caches[index]; 5082c59dd65SChristoph Lameter 5092c59dd65SChristoph Lameter #endif 5102c59dd65SChristoph Lameter return kmalloc_caches[index]; 5112c59dd65SChristoph Lameter } 5122c59dd65SChristoph Lameter 5132c59dd65SChristoph Lameter /* 514f97d5f63SChristoph Lameter * Create the kmalloc array. Some of the regular kmalloc arrays 515f97d5f63SChristoph Lameter * may already have been created because they were needed to 516f97d5f63SChristoph Lameter * enable allocations for slab creation. 517f97d5f63SChristoph Lameter */ 518f97d5f63SChristoph Lameter void __init create_kmalloc_caches(unsigned long flags) 519f97d5f63SChristoph Lameter { 520f97d5f63SChristoph Lameter int i; 521f97d5f63SChristoph Lameter 5222c59dd65SChristoph Lameter /* 5232c59dd65SChristoph Lameter * Patch up the size_index table if we have strange large alignment 5242c59dd65SChristoph Lameter * requirements for the kmalloc array. This is only the case for 5252c59dd65SChristoph Lameter * MIPS it seems. The standard arches will not generate any code here. 5262c59dd65SChristoph Lameter * 5272c59dd65SChristoph Lameter * Largest permitted alignment is 256 bytes due to the way we 5282c59dd65SChristoph Lameter * handle the index determination for the smaller caches. 5292c59dd65SChristoph Lameter * 5302c59dd65SChristoph Lameter * Make sure that nothing crazy happens if someone starts tinkering 5312c59dd65SChristoph Lameter * around with ARCH_KMALLOC_MINALIGN 5322c59dd65SChristoph Lameter */ 5332c59dd65SChristoph Lameter BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || 5342c59dd65SChristoph Lameter (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); 5352c59dd65SChristoph Lameter 5362c59dd65SChristoph Lameter for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { 5372c59dd65SChristoph Lameter int elem = size_index_elem(i); 5382c59dd65SChristoph Lameter 5392c59dd65SChristoph Lameter if (elem >= ARRAY_SIZE(size_index)) 5402c59dd65SChristoph Lameter break; 5412c59dd65SChristoph Lameter size_index[elem] = KMALLOC_SHIFT_LOW; 5422c59dd65SChristoph Lameter } 5432c59dd65SChristoph Lameter 5442c59dd65SChristoph Lameter if (KMALLOC_MIN_SIZE >= 64) { 5452c59dd65SChristoph Lameter /* 5462c59dd65SChristoph Lameter * The 96 byte size cache is not used if the alignment 5472c59dd65SChristoph Lameter * is 64 byte. 5482c59dd65SChristoph Lameter */ 5492c59dd65SChristoph Lameter for (i = 64 + 8; i <= 96; i += 8) 5502c59dd65SChristoph Lameter size_index[size_index_elem(i)] = 7; 5512c59dd65SChristoph Lameter 5522c59dd65SChristoph Lameter } 5532c59dd65SChristoph Lameter 5542c59dd65SChristoph Lameter if (KMALLOC_MIN_SIZE >= 128) { 5552c59dd65SChristoph Lameter /* 5562c59dd65SChristoph Lameter * The 192 byte sized cache is not used if the alignment 5572c59dd65SChristoph Lameter * is 128 byte. Redirect kmalloc to use the 256 byte cache 5582c59dd65SChristoph Lameter * instead. 5592c59dd65SChristoph Lameter */ 5602c59dd65SChristoph Lameter for (i = 128 + 8; i <= 192; i += 8) 5612c59dd65SChristoph Lameter size_index[size_index_elem(i)] = 8; 5622c59dd65SChristoph Lameter } 5638a965b3bSChristoph Lameter for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { 5648a965b3bSChristoph Lameter if (!kmalloc_caches[i]) { 565f97d5f63SChristoph Lameter kmalloc_caches[i] = create_kmalloc_cache(NULL, 566f97d5f63SChristoph Lameter 1 << i, flags); 567956e46efSChris Mason } 568f97d5f63SChristoph Lameter 5698a965b3bSChristoph Lameter /* 5708a965b3bSChristoph Lameter * Caches that are not of the two-to-the-power-of size. 5718a965b3bSChristoph Lameter * These have to be created immediately after the 5728a965b3bSChristoph Lameter * earlier power of two caches 5738a965b3bSChristoph Lameter */ 5748a965b3bSChristoph Lameter if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6) 5758a965b3bSChristoph Lameter kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags); 5768a965b3bSChristoph Lameter 5778a965b3bSChristoph Lameter if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7) 5788a965b3bSChristoph Lameter kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags); 5798a965b3bSChristoph Lameter } 5808a965b3bSChristoph Lameter 581f97d5f63SChristoph Lameter /* Kmalloc array is now usable */ 582f97d5f63SChristoph Lameter slab_state = UP; 583f97d5f63SChristoph Lameter 584f97d5f63SChristoph Lameter for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { 585f97d5f63SChristoph Lameter struct kmem_cache *s = kmalloc_caches[i]; 586f97d5f63SChristoph Lameter char *n; 587f97d5f63SChristoph Lameter 588f97d5f63SChristoph Lameter if (s) { 589f97d5f63SChristoph Lameter n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i)); 590f97d5f63SChristoph Lameter 591f97d5f63SChristoph Lameter BUG_ON(!n); 592f97d5f63SChristoph Lameter s->name = n; 593f97d5f63SChristoph Lameter } 594f97d5f63SChristoph Lameter } 595f97d5f63SChristoph Lameter 596f97d5f63SChristoph Lameter #ifdef CONFIG_ZONE_DMA 597f97d5f63SChristoph Lameter for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { 598f97d5f63SChristoph Lameter struct kmem_cache *s = kmalloc_caches[i]; 599f97d5f63SChristoph Lameter 600f97d5f63SChristoph Lameter if (s) { 601f97d5f63SChristoph Lameter int size = kmalloc_size(i); 602f97d5f63SChristoph Lameter char *n = kasprintf(GFP_NOWAIT, 603f97d5f63SChristoph Lameter "dma-kmalloc-%d", size); 604f97d5f63SChristoph Lameter 605f97d5f63SChristoph Lameter BUG_ON(!n); 606f97d5f63SChristoph Lameter kmalloc_dma_caches[i] = create_kmalloc_cache(n, 607f97d5f63SChristoph Lameter size, SLAB_CACHE_DMA | flags); 608f97d5f63SChristoph Lameter } 609f97d5f63SChristoph Lameter } 610f97d5f63SChristoph Lameter #endif 611f97d5f63SChristoph Lameter } 61245530c44SChristoph Lameter #endif /* !CONFIG_SLOB */ 61345530c44SChristoph Lameter 614cea371f4SVladimir Davydov /* 615cea371f4SVladimir Davydov * To avoid unnecessary overhead, we pass through large allocation requests 616cea371f4SVladimir Davydov * directly to the page allocator. We use __GFP_COMP, because we will need to 617cea371f4SVladimir Davydov * know the allocation order to free the pages properly in kfree. 618cea371f4SVladimir Davydov */ 61952383431SVladimir Davydov void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) 62052383431SVladimir Davydov { 62152383431SVladimir Davydov void *ret; 62252383431SVladimir Davydov struct page *page; 62352383431SVladimir Davydov 62452383431SVladimir Davydov flags |= __GFP_COMP; 62552383431SVladimir Davydov page = alloc_kmem_pages(flags, order); 62652383431SVladimir Davydov ret = page ? page_address(page) : NULL; 62752383431SVladimir Davydov kmemleak_alloc(ret, size, 1, flags); 62852383431SVladimir Davydov return ret; 62952383431SVladimir Davydov } 63052383431SVladimir Davydov EXPORT_SYMBOL(kmalloc_order); 63152383431SVladimir Davydov 632f1b6eb6eSChristoph Lameter #ifdef CONFIG_TRACING 633f1b6eb6eSChristoph Lameter void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 634f1b6eb6eSChristoph Lameter { 635f1b6eb6eSChristoph Lameter void *ret = kmalloc_order(size, flags, order); 636f1b6eb6eSChristoph Lameter trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); 637f1b6eb6eSChristoph Lameter return ret; 638f1b6eb6eSChristoph Lameter } 639f1b6eb6eSChristoph Lameter EXPORT_SYMBOL(kmalloc_order_trace); 640f1b6eb6eSChristoph Lameter #endif 64145530c44SChristoph Lameter 642b7454ad3SGlauber Costa #ifdef CONFIG_SLABINFO 643e9b4db2bSWanpeng Li 644e9b4db2bSWanpeng Li #ifdef CONFIG_SLAB 645e9b4db2bSWanpeng Li #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR) 646e9b4db2bSWanpeng Li #else 647e9b4db2bSWanpeng Li #define SLABINFO_RIGHTS S_IRUSR 648e9b4db2bSWanpeng Li #endif 649e9b4db2bSWanpeng Li 650749c5415SGlauber Costa void print_slabinfo_header(struct seq_file *m) 651bcee6e2aSGlauber Costa { 652bcee6e2aSGlauber Costa /* 653bcee6e2aSGlauber Costa * Output format version, so at least we can change it 654bcee6e2aSGlauber Costa * without _too_ many complaints. 655bcee6e2aSGlauber Costa */ 656bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB 657bcee6e2aSGlauber Costa seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 658bcee6e2aSGlauber Costa #else 659bcee6e2aSGlauber Costa seq_puts(m, "slabinfo - version: 2.1\n"); 660bcee6e2aSGlauber Costa #endif 661bcee6e2aSGlauber Costa seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 662bcee6e2aSGlauber Costa "<objperslab> <pagesperslab>"); 663bcee6e2aSGlauber Costa seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 664bcee6e2aSGlauber Costa seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 665bcee6e2aSGlauber Costa #ifdef CONFIG_DEBUG_SLAB 666bcee6e2aSGlauber Costa seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 667bcee6e2aSGlauber Costa "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 668bcee6e2aSGlauber Costa seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 669bcee6e2aSGlauber Costa #endif 670bcee6e2aSGlauber Costa seq_putc(m, '\n'); 671bcee6e2aSGlauber Costa } 672bcee6e2aSGlauber Costa 673b7454ad3SGlauber Costa static void *s_start(struct seq_file *m, loff_t *pos) 674b7454ad3SGlauber Costa { 675b7454ad3SGlauber Costa loff_t n = *pos; 676b7454ad3SGlauber Costa 677b7454ad3SGlauber Costa mutex_lock(&slab_mutex); 678b7454ad3SGlauber Costa if (!n) 679b7454ad3SGlauber Costa print_slabinfo_header(m); 680b7454ad3SGlauber Costa 681b7454ad3SGlauber Costa return seq_list_start(&slab_caches, *pos); 682b7454ad3SGlauber Costa } 683b7454ad3SGlauber Costa 684276a2439SWanpeng Li void *slab_next(struct seq_file *m, void *p, loff_t *pos) 685b7454ad3SGlauber Costa { 686b7454ad3SGlauber Costa return seq_list_next(p, &slab_caches, pos); 687b7454ad3SGlauber Costa } 688b7454ad3SGlauber Costa 689276a2439SWanpeng Li void slab_stop(struct seq_file *m, void *p) 690b7454ad3SGlauber Costa { 691b7454ad3SGlauber Costa mutex_unlock(&slab_mutex); 692b7454ad3SGlauber Costa } 693b7454ad3SGlauber Costa 694749c5415SGlauber Costa static void 695749c5415SGlauber Costa memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) 696b7454ad3SGlauber Costa { 697749c5415SGlauber Costa struct kmem_cache *c; 698749c5415SGlauber Costa struct slabinfo sinfo; 699749c5415SGlauber Costa int i; 700749c5415SGlauber Costa 701749c5415SGlauber Costa if (!is_root_cache(s)) 702749c5415SGlauber Costa return; 703749c5415SGlauber Costa 704749c5415SGlauber Costa for_each_memcg_cache_index(i) { 7052ade4de8SQiang Huang c = cache_from_memcg_idx(s, i); 706749c5415SGlauber Costa if (!c) 707749c5415SGlauber Costa continue; 708749c5415SGlauber Costa 709749c5415SGlauber Costa memset(&sinfo, 0, sizeof(sinfo)); 710749c5415SGlauber Costa get_slabinfo(c, &sinfo); 711749c5415SGlauber Costa 712749c5415SGlauber Costa info->active_slabs += sinfo.active_slabs; 713749c5415SGlauber Costa info->num_slabs += sinfo.num_slabs; 714749c5415SGlauber Costa info->shared_avail += sinfo.shared_avail; 715749c5415SGlauber Costa info->active_objs += sinfo.active_objs; 716749c5415SGlauber Costa info->num_objs += sinfo.num_objs; 717749c5415SGlauber Costa } 718749c5415SGlauber Costa } 719749c5415SGlauber Costa 720749c5415SGlauber Costa int cache_show(struct kmem_cache *s, struct seq_file *m) 721749c5415SGlauber Costa { 7220d7561c6SGlauber Costa struct slabinfo sinfo; 7230d7561c6SGlauber Costa 7240d7561c6SGlauber Costa memset(&sinfo, 0, sizeof(sinfo)); 7250d7561c6SGlauber Costa get_slabinfo(s, &sinfo); 7260d7561c6SGlauber Costa 727749c5415SGlauber Costa memcg_accumulate_slabinfo(s, &sinfo); 728749c5415SGlauber Costa 7290d7561c6SGlauber Costa seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 730749c5415SGlauber Costa cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, 7310d7561c6SGlauber Costa sinfo.objects_per_slab, (1 << sinfo.cache_order)); 7320d7561c6SGlauber Costa 7330d7561c6SGlauber Costa seq_printf(m, " : tunables %4u %4u %4u", 7340d7561c6SGlauber Costa sinfo.limit, sinfo.batchcount, sinfo.shared); 7350d7561c6SGlauber Costa seq_printf(m, " : slabdata %6lu %6lu %6lu", 7360d7561c6SGlauber Costa sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); 7370d7561c6SGlauber Costa slabinfo_show_stats(m, s); 7380d7561c6SGlauber Costa seq_putc(m, '\n'); 7390d7561c6SGlauber Costa return 0; 740b7454ad3SGlauber Costa } 741b7454ad3SGlauber Costa 742749c5415SGlauber Costa static int s_show(struct seq_file *m, void *p) 743749c5415SGlauber Costa { 744749c5415SGlauber Costa struct kmem_cache *s = list_entry(p, struct kmem_cache, list); 745749c5415SGlauber Costa 746749c5415SGlauber Costa if (!is_root_cache(s)) 747749c5415SGlauber Costa return 0; 748749c5415SGlauber Costa return cache_show(s, m); 749749c5415SGlauber Costa } 750749c5415SGlauber Costa 751b7454ad3SGlauber Costa /* 752b7454ad3SGlauber Costa * slabinfo_op - iterator that generates /proc/slabinfo 753b7454ad3SGlauber Costa * 754b7454ad3SGlauber Costa * Output layout: 755b7454ad3SGlauber Costa * cache-name 756b7454ad3SGlauber Costa * num-active-objs 757b7454ad3SGlauber Costa * total-objs 758b7454ad3SGlauber Costa * object size 759b7454ad3SGlauber Costa * num-active-slabs 760b7454ad3SGlauber Costa * total-slabs 761b7454ad3SGlauber Costa * num-pages-per-slab 762b7454ad3SGlauber Costa * + further values on SMP and with statistics enabled 763b7454ad3SGlauber Costa */ 764b7454ad3SGlauber Costa static const struct seq_operations slabinfo_op = { 765b7454ad3SGlauber Costa .start = s_start, 766276a2439SWanpeng Li .next = slab_next, 767276a2439SWanpeng Li .stop = slab_stop, 768b7454ad3SGlauber Costa .show = s_show, 769b7454ad3SGlauber Costa }; 770b7454ad3SGlauber Costa 771b7454ad3SGlauber Costa static int slabinfo_open(struct inode *inode, struct file *file) 772b7454ad3SGlauber Costa { 773b7454ad3SGlauber Costa return seq_open(file, &slabinfo_op); 774b7454ad3SGlauber Costa } 775b7454ad3SGlauber Costa 776b7454ad3SGlauber Costa static const struct file_operations proc_slabinfo_operations = { 777b7454ad3SGlauber Costa .open = slabinfo_open, 778b7454ad3SGlauber Costa .read = seq_read, 779b7454ad3SGlauber Costa .write = slabinfo_write, 780b7454ad3SGlauber Costa .llseek = seq_lseek, 781b7454ad3SGlauber Costa .release = seq_release, 782b7454ad3SGlauber Costa }; 783b7454ad3SGlauber Costa 784b7454ad3SGlauber Costa static int __init slab_proc_init(void) 785b7454ad3SGlauber Costa { 786e9b4db2bSWanpeng Li proc_create("slabinfo", SLABINFO_RIGHTS, NULL, 787e9b4db2bSWanpeng Li &proc_slabinfo_operations); 788b7454ad3SGlauber Costa return 0; 789b7454ad3SGlauber Costa } 790b7454ad3SGlauber Costa module_init(slab_proc_init); 791b7454ad3SGlauber Costa #endif /* CONFIG_SLABINFO */ 792*928cec9cSAndrey Ryabinin 793*928cec9cSAndrey Ryabinin static __always_inline void *__do_krealloc(const void *p, size_t new_size, 794*928cec9cSAndrey Ryabinin gfp_t flags) 795*928cec9cSAndrey Ryabinin { 796*928cec9cSAndrey Ryabinin void *ret; 797*928cec9cSAndrey Ryabinin size_t ks = 0; 798*928cec9cSAndrey Ryabinin 799*928cec9cSAndrey Ryabinin if (p) 800*928cec9cSAndrey Ryabinin ks = ksize(p); 801*928cec9cSAndrey Ryabinin 802*928cec9cSAndrey Ryabinin if (ks >= new_size) 803*928cec9cSAndrey Ryabinin return (void *)p; 804*928cec9cSAndrey Ryabinin 805*928cec9cSAndrey Ryabinin ret = kmalloc_track_caller(new_size, flags); 806*928cec9cSAndrey Ryabinin if (ret && p) 807*928cec9cSAndrey Ryabinin memcpy(ret, p, ks); 808*928cec9cSAndrey Ryabinin 809*928cec9cSAndrey Ryabinin return ret; 810*928cec9cSAndrey Ryabinin } 811*928cec9cSAndrey Ryabinin 812*928cec9cSAndrey Ryabinin /** 813*928cec9cSAndrey Ryabinin * __krealloc - like krealloc() but don't free @p. 814*928cec9cSAndrey Ryabinin * @p: object to reallocate memory for. 815*928cec9cSAndrey Ryabinin * @new_size: how many bytes of memory are required. 816*928cec9cSAndrey Ryabinin * @flags: the type of memory to allocate. 817*928cec9cSAndrey Ryabinin * 818*928cec9cSAndrey Ryabinin * This function is like krealloc() except it never frees the originally 819*928cec9cSAndrey Ryabinin * allocated buffer. Use this if you don't want to free the buffer immediately 820*928cec9cSAndrey Ryabinin * like, for example, with RCU. 821*928cec9cSAndrey Ryabinin */ 822*928cec9cSAndrey Ryabinin void *__krealloc(const void *p, size_t new_size, gfp_t flags) 823*928cec9cSAndrey Ryabinin { 824*928cec9cSAndrey Ryabinin if (unlikely(!new_size)) 825*928cec9cSAndrey Ryabinin return ZERO_SIZE_PTR; 826*928cec9cSAndrey Ryabinin 827*928cec9cSAndrey Ryabinin return __do_krealloc(p, new_size, flags); 828*928cec9cSAndrey Ryabinin 829*928cec9cSAndrey Ryabinin } 830*928cec9cSAndrey Ryabinin EXPORT_SYMBOL(__krealloc); 831*928cec9cSAndrey Ryabinin 832*928cec9cSAndrey Ryabinin /** 833*928cec9cSAndrey Ryabinin * krealloc - reallocate memory. The contents will remain unchanged. 834*928cec9cSAndrey Ryabinin * @p: object to reallocate memory for. 835*928cec9cSAndrey Ryabinin * @new_size: how many bytes of memory are required. 836*928cec9cSAndrey Ryabinin * @flags: the type of memory to allocate. 837*928cec9cSAndrey Ryabinin * 838*928cec9cSAndrey Ryabinin * The contents of the object pointed to are preserved up to the 839*928cec9cSAndrey Ryabinin * lesser of the new and old sizes. If @p is %NULL, krealloc() 840*928cec9cSAndrey Ryabinin * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a 841*928cec9cSAndrey Ryabinin * %NULL pointer, the object pointed to is freed. 842*928cec9cSAndrey Ryabinin */ 843*928cec9cSAndrey Ryabinin void *krealloc(const void *p, size_t new_size, gfp_t flags) 844*928cec9cSAndrey Ryabinin { 845*928cec9cSAndrey Ryabinin void *ret; 846*928cec9cSAndrey Ryabinin 847*928cec9cSAndrey Ryabinin if (unlikely(!new_size)) { 848*928cec9cSAndrey Ryabinin kfree(p); 849*928cec9cSAndrey Ryabinin return ZERO_SIZE_PTR; 850*928cec9cSAndrey Ryabinin } 851*928cec9cSAndrey Ryabinin 852*928cec9cSAndrey Ryabinin ret = __do_krealloc(p, new_size, flags); 853*928cec9cSAndrey Ryabinin if (ret && p != ret) 854*928cec9cSAndrey Ryabinin kfree(p); 855*928cec9cSAndrey Ryabinin 856*928cec9cSAndrey Ryabinin return ret; 857*928cec9cSAndrey Ryabinin } 858*928cec9cSAndrey Ryabinin EXPORT_SYMBOL(krealloc); 859*928cec9cSAndrey Ryabinin 860*928cec9cSAndrey Ryabinin /** 861*928cec9cSAndrey Ryabinin * kzfree - like kfree but zero memory 862*928cec9cSAndrey Ryabinin * @p: object to free memory of 863*928cec9cSAndrey Ryabinin * 864*928cec9cSAndrey Ryabinin * The memory of the object @p points to is zeroed before freed. 865*928cec9cSAndrey Ryabinin * If @p is %NULL, kzfree() does nothing. 866*928cec9cSAndrey Ryabinin * 867*928cec9cSAndrey Ryabinin * Note: this function zeroes the whole allocated buffer which can be a good 868*928cec9cSAndrey Ryabinin * deal bigger than the requested buffer size passed to kmalloc(). So be 869*928cec9cSAndrey Ryabinin * careful when using this function in performance sensitive code. 870*928cec9cSAndrey Ryabinin */ 871*928cec9cSAndrey Ryabinin void kzfree(const void *p) 872*928cec9cSAndrey Ryabinin { 873*928cec9cSAndrey Ryabinin size_t ks; 874*928cec9cSAndrey Ryabinin void *mem = (void *)p; 875*928cec9cSAndrey Ryabinin 876*928cec9cSAndrey Ryabinin if (unlikely(ZERO_OR_NULL_PTR(mem))) 877*928cec9cSAndrey Ryabinin return; 878*928cec9cSAndrey Ryabinin ks = ksize(mem); 879*928cec9cSAndrey Ryabinin memset(mem, 0, ks); 880*928cec9cSAndrey Ryabinin kfree(mem); 881*928cec9cSAndrey Ryabinin } 882*928cec9cSAndrey Ryabinin EXPORT_SYMBOL(kzfree); 883*928cec9cSAndrey Ryabinin 884*928cec9cSAndrey Ryabinin /* Tracepoints definitions. */ 885*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmalloc); 886*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 887*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); 888*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); 889*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kfree); 890*928cec9cSAndrey Ryabinin EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); 891