Lines Matching +full:i +full:- +full:cache +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/cache.h>
79 * Determine the size of a slab object
83 return s->object_size; in kmem_cache_size()
88 static int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument
90 if (!name || in_interrupt() || size < sizeof(void *) || in kmem_cache_sanity_check()
91 size > KMALLOC_MAX_SIZE) { in kmem_cache_sanity_check()
93 return -EINVAL; in kmem_cache_sanity_check()
100 static inline int kmem_cache_sanity_check(const char *name, unsigned int size) in kmem_cache_sanity_check() argument
108 size_t i; in __kmem_cache_free_bulk() local
110 for (i = 0; i < nr; i++) { in __kmem_cache_free_bulk()
112 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
114 kfree(p[i]); in __kmem_cache_free_bulk()
121 size_t i; in __kmem_cache_alloc_bulk() local
123 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
124 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
126 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
130 return i; in __kmem_cache_alloc_bulk()
135 * flags, a user specified alignment and the size of the objects.
138 unsigned int align, unsigned int size) in calculate_alignment() argument
141 * If the user wants hardware cache aligned objects then follow that in calculate_alignment()
144 * The hardware cache alignment cannot override the specified in calculate_alignment()
151 while (size <= ralign / 2) in calculate_alignment()
163 * Find a mergeable slab cache
167 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) in slab_unmergeable()
170 if (s->ctor) in slab_unmergeable()
173 if (s->usersize) in slab_unmergeable()
179 if (s->refcount < 0) in slab_unmergeable()
185 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, in find_mergeable() argument
196 size = ALIGN(size, sizeof(void *)); in find_mergeable()
197 align = calculate_alignment(flags, align, size); in find_mergeable()
198 size = ALIGN(size, align); in find_mergeable()
199 flags = kmem_cache_flags(size, flags, name, NULL); in find_mergeable()
208 if (size > s->size) in find_mergeable()
211 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) in find_mergeable()
217 if ((s->size & ~(align - 1)) != s->size) in find_mergeable()
220 if (s->size - size >= sizeof(void *)) in find_mergeable()
224 (align > s->align || s->align % align)) in find_mergeable()
244 err = -ENOMEM; in create_cache()
249 s->name = name; in create_cache()
250 s->size = s->object_size = object_size; in create_cache()
251 s->align = align; in create_cache()
252 s->ctor = ctor; in create_cache()
253 s->useroffset = useroffset; in create_cache()
254 s->usersize = usersize; in create_cache()
260 s->refcount = 1; in create_cache()
261 list_add(&s->list, &slab_caches); in create_cache()
273 * kmem_cache_create_usercopy - Create a cache with a region suitable
275 * @name: A string which is used in /proc/slabinfo to identify this cache.
276 * @size: The size of objects to be created in this cache.
280 * @usersize: Usercopy region size
284 * The @ctor is run when new pages are allocated by the cache.
288 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
291 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
294 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
298 * Return: a pointer to the cache on success, NULL on failure.
302 unsigned int size, unsigned int align, in kmem_cache_create_usercopy() argument
316 err = kmem_cache_sanity_check(name, size); in kmem_cache_create_usercopy()
323 err = -EINVAL; in kmem_cache_create_usercopy()
337 WARN_ON(size < usersize || size - usersize < useroffset)) in kmem_cache_create_usercopy()
341 s = __kmem_cache_alias(name, size, align, flags, ctor); in kmem_cache_create_usercopy()
347 err = -ENOMEM; in kmem_cache_create_usercopy()
351 s = create_cache(cache_name, size, in kmem_cache_create_usercopy()
352 calculate_alignment(flags, align, size), in kmem_cache_create_usercopy()
381 * kmem_cache_create - Create a cache.
382 * @name: A string which is used in /proc/slabinfo to identify this cache.
383 * @size: The size of objects to be created in this cache.
389 * The @ctor is run when new pages are allocated by the cache.
393 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
396 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
399 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
403 * Return: a pointer to the cache on success, NULL on failure.
406 kmem_cache_create(const char *name, unsigned int size, unsigned int align, in kmem_cache_create() argument
409 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, in kmem_cache_create()
452 return -EBUSY; in shutdown_cache()
454 list_del(&s->list); in shutdown_cache()
456 if (s->flags & SLAB_TYPESAFE_BY_RCU) { in shutdown_cache()
460 list_add_tail(&s->list, &slab_caches_to_rcu_destroy); in shutdown_cache()
477 kfree_const(s->name); in slab_kmem_cache_release()
493 s->refcount--; in kmem_cache_destroy()
494 if (s->refcount) in kmem_cache_destroy()
499 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", in kmem_cache_destroy()
500 s->name); in kmem_cache_destroy()
512 * kmem_cache_shrink - Shrink a cache.
513 * @cachep: The cache to shrink.
515 * Releases as many slabs as possible for a cache.
518 * Return: %0 if all slabs were released, non-zero otherwise
540 /* Create a cache during boot when no slab services are available yet */
542 unsigned int size, slab_flags_t flags, in create_boot_cache() argument
548 s->name = name; in create_boot_cache()
549 s->size = s->object_size = size; in create_boot_cache()
555 if (is_power_of_2(size)) in create_boot_cache()
556 align = max(align, size); in create_boot_cache()
557 s->align = calculate_alignment(flags, align, size); in create_boot_cache()
559 s->useroffset = useroffset; in create_boot_cache()
560 s->usersize = usersize; in create_boot_cache()
565 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", in create_boot_cache()
566 name, size, err); in create_boot_cache()
568 s->refcount = -1; /* Exempt from merging for now */ in create_boot_cache()
572 unsigned int size, slab_flags_t flags, in create_kmalloc_cache() argument
580 create_boot_cache(s, name, size, flags, useroffset, usersize); in create_kmalloc_cache()
581 list_add(&s->list, &slab_caches); in create_kmalloc_cache()
582 s->refcount = 1; in create_kmalloc_cache()
594 * of two cache sizes there. The size of larger slabs can be determined using
626 return (bytes - 1) / 8; in size_index_elem()
630 * Find the kmem_cache structure that serves a given size of
633 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) in kmalloc_slab() argument
637 if (size <= 192) { in kmalloc_slab()
638 if (!size) in kmalloc_slab()
641 index = size_index[size_index_elem(size)]; in kmalloc_slab()
643 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) in kmalloc_slab()
645 index = fls(size - 1); in kmalloc_slab()
654 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
655 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
656 .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
657 .size = __size, \
662 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
663 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
664 .size = __size, \
669 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
671 * kmalloc-67108864.
716 unsigned int i; in setup_kmalloc_cache_index_table() local
719 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); in setup_kmalloc_cache_index_table()
721 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { in setup_kmalloc_cache_index_table()
722 unsigned int elem = size_index_elem(i); in setup_kmalloc_cache_index_table()
731 * The 96 byte size cache is not used if the alignment in setup_kmalloc_cache_index_table()
734 for (i = 64 + 8; i <= 96; i += 8) in setup_kmalloc_cache_index_table()
735 size_index[size_index_elem(i)] = 7; in setup_kmalloc_cache_index_table()
741 * The 192 byte sized cache is not used if the alignment in setup_kmalloc_cache_index_table()
742 * is 128 byte. Redirect kmalloc to use the 256 byte cache in setup_kmalloc_cache_index_table()
745 for (i = 128 + 8; i <= 192; i += 8) in setup_kmalloc_cache_index_table()
746 size_index[size_index_elem(i)] = 8; in setup_kmalloc_cache_index_table()
758 kmalloc_info[idx].size, flags, 0, in new_kmalloc_cache()
759 kmalloc_info[idx].size); in new_kmalloc_cache()
769 int i; in create_kmalloc_caches() local
773 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { in create_kmalloc_caches()
774 if (!kmalloc_caches[type][i]) in create_kmalloc_caches()
775 new_kmalloc_cache(i, type, flags); in create_kmalloc_caches()
778 * Caches that are not of the two-to-the-power-of size. in create_kmalloc_caches()
782 if (KMALLOC_MIN_SIZE <= 32 && i == 6 && in create_kmalloc_caches()
785 if (KMALLOC_MIN_SIZE <= 64 && i == 7 && in create_kmalloc_caches()
795 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) { in create_kmalloc_caches()
796 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i]; in create_kmalloc_caches()
799 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache( in create_kmalloc_caches()
800 kmalloc_info[i].name[KMALLOC_DMA], in create_kmalloc_caches()
801 kmalloc_info[i].size, in create_kmalloc_caches()
803 kmalloc_info[i].size); in create_kmalloc_caches()
827 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) in kmalloc_order() argument
842 ret = kasan_kmalloc_large(ret, size, flags); in kmalloc_order()
844 kmemleak_alloc(ret, size, 1, flags); in kmalloc_order()
850 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
852 void *ret = kmalloc_order(size, flags, order); in kmalloc_order_trace()
853 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); in kmalloc_order_trace()
865 unsigned int i; in freelist_randomize() local
867 for (i = 0; i < count; i++) in freelist_randomize()
868 list[i] = i; in freelist_randomize()
870 /* Fisher-Yates shuffle */ in freelist_randomize()
871 for (i = count - 1; i > 0; i--) { in freelist_randomize()
873 rand %= (i + 1); in freelist_randomize()
874 swap(list[i], list[rand]); in freelist_randomize()
878 /* Create a random sequence per cache */
884 if (count < 2 || cachep->random_seq) in cache_random_seq_create()
887 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); in cache_random_seq_create()
888 if (!cachep->random_seq) in cache_random_seq_create()
889 return -ENOMEM; in cache_random_seq_create()
894 freelist_randomize(&state, cachep->random_seq, count); in cache_random_seq_create()
898 /* Destroy the per-cache random freelist sequence */
901 kfree(cachep->random_seq); in cache_random_seq_destroy()
902 cachep->random_seq = NULL; in cache_random_seq_destroy()
920 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); in print_slabinfo_header()
922 seq_puts(m, "slabinfo - version: 2.1\n"); in print_slabinfo_header()
957 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", in cache_show()
958 s->name, sinfo.active_objs, sinfo.num_objs, s->size, in cache_show()
1000 if (s->flags & SLAB_RECLAIM_ACCOUNT) in dump_unreclaimable_slab()
1006 pr_info("%-17s %10luKB %10luKB\n", s->name, in dump_unreclaimable_slab()
1007 (sinfo.active_objs * s->size) / 1024, in dump_unreclaimable_slab()
1008 (sinfo.num_objs * s->size) / 1024); in dump_unreclaimable_slab()
1025 * slabinfo_op - iterator that generates /proc/slabinfo
1028 * cache-name
1029 * num-active-objs
1030 * total-objs
1031 * object size
1032 * num-active-slabs
1033 * total-slabs
1034 * num-pages-per-slab
1088 * krealloc - reallocate memory. The contents will remain unchanged.
1118 * kfree_sensitive - Clear sensitive information in memory before freeing
1125 * deal bigger than the requested buffer size passed to kmalloc(). So be
1141 * ksize - get the actual amount of memory allocated for a given object
1152 * Return: size of the actual memory used by @objp in bytes
1156 size_t size; in ksize() local
1163 * use-after-free or double-free). in ksize()
1174 size = __ksize(objp); in ksize()
1179 kasan_unpoison_shadow(objp, size); in ksize()
1180 return size; in ksize()
1195 return -ENOMEM; in should_failslab()