Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
32 #include <linux/fault-inject.h>
45 * 2. node->list_lock
46 * 3. slab_lock(page) (Only on some arches and for debugging)
55 * A. page->freelist -> List of object free in a page
56 * B. page->inuse -> Number of objects in use
57 * C. page->objects -> Number of objects in page
58 * D. page->frozen -> frozen state
62 * slab is the one who can perform list operations on the page. Other
65 * page's freelist.
93 * minimal so we rely on the page allocators per cpu caches for
96 * page->frozen The slab is frozen and exempt from list processing.
133 p += s->red_left_pad; in fixup_red_left()
150 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
152 * - Variable sizing of the per node arrays
193 #define OO_MASK ((1 << OO_SHIFT) - 1)
194 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
232 * avoid this_cpu_add()'s irq-disable overhead. in stat()
234 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
244 * with an XOR of the address where the pointer is held and a per-cache
261 return (void *)((unsigned long)ptr ^ s->random ^ in freelist_ptr()
278 return freelist_dereference(s, object + s->offset); in get_freepointer()
283 prefetch(object + s->offset); in prefetch_freepointer()
294 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
301 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
313 __p < (__addr) + (__objects) * (__s)->size; \
314 __p += (__s)->size)
316 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument
318 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects()
322 unsigned int size) in oo_make() argument
325 (order << OO_SHIFT) + order_objects(order, size) in oo_make()
344 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
346 VM_BUG_ON_PAGE(PageTail(page), page); in slab_lock()
347 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
350 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
352 VM_BUG_ON_PAGE(PageTail(page), page); in slab_unlock()
353 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
357 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
365 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
366 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
373 slab_lock(page); in __cmpxchg_double_slab()
374 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
375 page->counters == counters_old) { in __cmpxchg_double_slab()
376 page->freelist = freelist_new; in __cmpxchg_double_slab()
377 page->counters = counters_new; in __cmpxchg_double_slab()
378 slab_unlock(page); in __cmpxchg_double_slab()
381 slab_unlock(page); in __cmpxchg_double_slab()
388 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
394 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
401 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
402 if (cmpxchg_double(&page->freelist, &page->counters, in cmpxchg_double_slab()
412 slab_lock(page); in cmpxchg_double_slab()
413 if (page->freelist == freelist_old && in cmpxchg_double_slab()
414 page->counters == counters_old) { in cmpxchg_double_slab()
415 page->freelist = freelist_new; in cmpxchg_double_slab()
416 page->counters = counters_new; in cmpxchg_double_slab()
417 slab_unlock(page); in cmpxchg_double_slab()
421 slab_unlock(page); in cmpxchg_double_slab()
429 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
440 * Determine a map of object in use on a page.
442 * Node listlock must be held to guarantee that the page does
445 static unsigned long *get_map(struct kmem_cache *s, struct page *page) in get_map() argument
449 void *addr = page_address(page); in get_map()
455 bitmap_zero(object_map, page->objects); in get_map()
457 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
471 if (s->flags & SLAB_RED_ZONE) in size_from_object()
472 return s->size - s->red_left_pad; in size_from_object()
474 return s->size; in size_from_object()
479 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
480 p -= s->red_left_pad; in restore_red_left()
517 /* Verify that a pointer has an address that is valid within a slab page */
519 struct page *page, void *object) in check_valid_pointer() argument
526 base = page_address(page); in check_valid_pointer()
529 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
530 (object - base) % s->size) { in check_valid_pointer()
551 return s->offset >= s->inuse; in freeptr_outside_object()
561 return s->inuse + sizeof(void *); in get_info_end()
563 return s->inuse; in get_info_end()
586 nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); in set_track()
590 p->addrs[nr_entries] = 0; in set_track()
592 p->addr = addr; in set_track()
593 p->cpu = smp_processor_id(); in set_track()
594 p->pid = current->pid; in set_track()
595 p->when = jiffies; in set_track()
603 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
612 if (!t->addr) in print_track()
616 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
621 if (t->addrs[i]) in print_track()
622 pr_err("\t%pS\n", (void *)t->addrs[i]); in print_track()
632 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
639 static void print_page_info(struct page *page) in print_page_info() argument
642 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
655 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
656 pr_err("-----------------------------------------------------------------------------\n\n"); in slab_bug()
670 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
674 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
677 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
678 !check_valid_pointer(s, page, nextfree) && freelist) { in freelist_corrupted()
679 object_err(s, page, *freelist, "Freechain corrupt"); in freelist_corrupted()
688 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
691 u8 *addr = page_address(page); in print_trailer()
695 print_page_info(page); in print_trailer()
698 p, p - addr, get_freepointer(s, p)); in print_trailer()
700 if (s->flags & SLAB_RED_ZONE) in print_trailer()
701 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
702 s->red_left_pad); in print_trailer()
704 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); in print_trailer()
707 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
708 if (s->flags & SLAB_RED_ZONE) in print_trailer()
709 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
710 s->inuse - s->object_size); in print_trailer()
714 if (s->flags & SLAB_STORE_USER) in print_trailer()
722 size_from_object(s) - off); in print_trailer()
727 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
731 print_trailer(s, page, object); in object_err()
734 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
744 print_page_info(page); in slab_err()
752 if (s->flags & SLAB_RED_ZONE) in init_object()
753 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
755 if (s->flags & __OBJECT_POISON) { in init_object()
756 memset(p, POISON_FREE, s->object_size - 1); in init_object()
757 p[s->object_size - 1] = POISON_END; in init_object()
760 if (s->flags & SLAB_RED_ZONE) in init_object()
761 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
767 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
768 memset(from, data, to - from); in restore_bytes()
771 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
777 u8 *addr = page_address(page); in check_bytes_and_report()
786 while (end > fault && end[-1] == value) in check_bytes_and_report()
787 end--; in check_bytes_and_report()
790 pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", in check_bytes_and_report()
791 fault, end - 1, fault - addr, in check_bytes_and_report()
793 print_trailer(s, page, object); in check_bytes_and_report()
810 * object + s->object_size
818 * object + s->inuse
829 * object + s->size
830 * Nothing is used beyond s->size.
837 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
841 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
850 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
851 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
854 /* Check the pad bytes at the end of a slab page */
855 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
864 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
867 start = page_address(page); in slab_pad_check()
868 length = page_size(page); in slab_pad_check()
870 remainder = length % s->size; in slab_pad_check()
874 pad = end - remainder; in slab_pad_check()
880 while (end > fault && end[-1] == POISON_INUSE) in slab_pad_check()
881 end--; in slab_pad_check()
883 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
884 fault, end - 1, fault - start); in slab_pad_check()
891 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
895 u8 *endobject = object + s->object_size; in check_object()
897 if (s->flags & SLAB_RED_ZONE) { in check_object()
898 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
899 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
902 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
903 endobject, val, s->inuse - s->object_size)) in check_object()
906 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
907 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
909 s->inuse - s->object_size); in check_object()
913 if (s->flags & SLAB_POISON) { in check_object()
914 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
915 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
916 POISON_FREE, s->object_size - 1) || in check_object()
917 !check_bytes_and_report(s, page, p, "Poison", in check_object()
918 p + s->object_size - 1, POISON_END, 1))) in check_object()
923 check_pad_bytes(s, page, p); in check_object()
934 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
935 object_err(s, page, p, "Freepointer corrupt"); in check_object()
947 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
953 if (!PageSlab(page)) { in check_slab()
954 slab_err(s, page, "Not a valid slab page"); in check_slab()
958 maxobj = order_objects(compound_order(page), s->size); in check_slab()
959 if (page->objects > maxobj) { in check_slab()
960 slab_err(s, page, "objects %u > max %u", in check_slab()
961 page->objects, maxobj); in check_slab()
964 if (page->inuse > page->objects) { in check_slab()
965 slab_err(s, page, "inuse %u > max %u", in check_slab()
966 page->inuse, page->objects); in check_slab()
970 slab_pad_check(s, page); in check_slab()
975 * Determine if a certain object on a page is on the freelist. Must hold the
978 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
985 fp = page->freelist; in on_freelist()
986 while (fp && nr <= page->objects) { in on_freelist()
989 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
991 object_err(s, page, object, in on_freelist()
995 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
996 page->freelist = NULL; in on_freelist()
997 page->inuse = page->objects; in on_freelist()
1008 max_objects = order_objects(compound_order(page), s->size); in on_freelist()
1012 if (page->objects != max_objects) { in on_freelist()
1013 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1014 page->objects, max_objects); in on_freelist()
1015 page->objects = max_objects; in on_freelist()
1018 if (page->inuse != page->objects - nr) { in on_freelist()
1019 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1020 page->inuse, page->objects - nr); in on_freelist()
1021 page->inuse = page->objects - nr; in on_freelist()
1027 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
1030 if (s->flags & SLAB_TRACE) { in trace()
1032 s->name, in trace()
1034 object, page->inuse, in trace()
1035 page->freelist); in trace()
1039 s->object_size); in trace()
1049 struct kmem_cache_node *n, struct page *page) in add_full() argument
1051 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1054 lockdep_assert_held(&n->list_lock); in add_full()
1055 list_add(&page->slab_list, &n->full); in add_full()
1058 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1060 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1063 lockdep_assert_held(&n->list_lock); in remove_full()
1064 list_del(&page->slab_list); in remove_full()
1072 return atomic_long_read(&n->nr_slabs); in slabs_node()
1077 return atomic_long_read(&n->nr_slabs); in node_nr_slabs()
1086 * kmem_cache_node structure. Solve the chicken-egg in inc_slabs_node()
1091 atomic_long_inc(&n->nr_slabs); in inc_slabs_node()
1092 atomic_long_add(objects, &n->total_objects); in inc_slabs_node()
1099 atomic_long_dec(&n->nr_slabs); in dec_slabs_node()
1100 atomic_long_sub(objects, &n->total_objects); in dec_slabs_node()
1104 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1115 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) in setup_page_debug() argument
1121 memset(addr, POISON_INUSE, page_size(page)); in setup_page_debug()
1126 struct page *page, void *object) in alloc_consistency_checks() argument
1128 if (!check_slab(s, page)) in alloc_consistency_checks()
1131 if (!check_valid_pointer(s, page, object)) { in alloc_consistency_checks()
1132 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1136 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1143 struct page *page, in alloc_debug_processing() argument
1146 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1147 if (!alloc_consistency_checks(s, page, object)) in alloc_debug_processing()
1152 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1154 trace(s, page, object, 1); in alloc_debug_processing()
1159 if (PageSlab(page)) { in alloc_debug_processing()
1161 * If this is a slab page then lets do the best we can in alloc_debug_processing()
1166 page->inuse = page->objects; in alloc_debug_processing()
1167 page->freelist = NULL; in alloc_debug_processing()
1173 struct page *page, void *object, unsigned long addr) in free_consistency_checks() argument
1175 if (!check_valid_pointer(s, page, object)) { in free_consistency_checks()
1176 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1180 if (on_freelist(s, page, object)) { in free_consistency_checks()
1181 object_err(s, page, object, "Object already free"); in free_consistency_checks()
1185 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1188 if (unlikely(s != page->slab_cache)) { in free_consistency_checks()
1189 if (!PageSlab(page)) { in free_consistency_checks()
1190 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1192 } else if (!page->slab_cache) { in free_consistency_checks()
1197 object_err(s, page, object, in free_consistency_checks()
1198 "page slab pointer corrupt."); in free_consistency_checks()
1206 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1210 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1216 spin_lock_irqsave(&n->list_lock, flags); in free_debug_processing()
1217 slab_lock(page); in free_debug_processing()
1219 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1220 if (!check_slab(s, page)) in free_debug_processing()
1227 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1228 if (!free_consistency_checks(s, page, object, addr)) in free_debug_processing()
1232 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1234 trace(s, page, object, 0); in free_debug_processing()
1247 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1250 slab_unlock(page); in free_debug_processing()
1251 spin_unlock_irqrestore(&n->list_lock, flags); in free_debug_processing()
1263 * @init: assume this is initial parsing and not per-kmem-create parsing
1289 case '-': in parse_slub_debug_flags()
1389 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); in setup_slub_debug()
1396 * kmem_cache_flags - apply debugging options to the cache
1397 * @object_size: the size of an object without meta data
1404 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1430 end = next_block - 1; in kmem_cache_flags()
1432 glob = strnchr(iter, end - iter, '*'); in kmem_cache_flags()
1434 cmplen = glob - iter; in kmem_cache_flags()
1436 cmplen = max_t(size_t, len, (end - iter)); in kmem_cache_flags()
1453 struct page *page, void *object) {} in setup_object_debug() argument
1455 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} in setup_page_debug() argument
1458 struct page *page, void *object, unsigned long addr) { return 0; } in alloc_debug_processing() argument
1461 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1465 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1467 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1470 struct page *page) {} in add_full() argument
1472 struct page *page) {} in remove_full() argument
1492 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
1503 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) in kmalloc_large_node_hook() argument
1505 ptr = kasan_kmalloc_large(ptr, size, flags); in kmalloc_large_node_hook()
1507 kmemleak_alloc(ptr, size, 1, flags); in kmalloc_large_node_hook()
1519 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1531 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1535 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1536 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1538 /* Use KCSAN to help debug racy use-after-free. */ in slab_free_hook()
1539 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) in slab_free_hook()
1540 __kcsan_check_access(x, s->object_size, in slab_free_hook()
1569 memset(object, 0, s->object_size); in slab_free_freelist_hook()
1570 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad in slab_free_freelist_hook()
1572 memset((char *)object + s->inuse, 0, in slab_free_freelist_hook()
1573 s->size - s->inuse - rsize); in slab_free_freelist_hook()
1592 static void *setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1595 setup_object_debug(s, page, object); in setup_object()
1597 if (unlikely(s->ctor)) { in setup_object()
1599 s->ctor(object); in setup_object()
1608 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page()
1611 struct page *page; in alloc_slab_page() local
1615 page = alloc_pages(flags, order); in alloc_slab_page()
1617 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1619 if (page) in alloc_slab_page()
1620 account_slab_page(page, order, s); in alloc_slab_page()
1622 return page; in alloc_slab_page()
1626 /* Pre-initialize the random sequence cache */
1629 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1633 if (s->random_seq) in init_cache_random_seq()
1639 s->name); in init_cache_random_seq()
1644 if (s->random_seq) { in init_cache_random_seq()
1648 s->random_seq[i] *= s->size; in init_cache_random_seq()
1666 /* Get the next entry on the pre-computed freelist randomized */
1667 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, in next_freelist_entry() argument
1675 * If the target page allocation failed, the number of objects on the in next_freelist_entry()
1676 * page might be smaller than the usual size defined by the cache. in next_freelist_entry()
1679 idx = s->random_seq[*pos]; in next_freelist_entry()
1688 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1689 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1696 if (page->objects < 2 || !s->random_seq) in shuffle_freelist()
1699 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1702 page_limit = page->objects * s->size; in shuffle_freelist()
1703 start = fixup_red_left(s, page_address(page)); in shuffle_freelist()
1706 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1708 cur = setup_object(s, page, cur); in shuffle_freelist()
1709 page->freelist = cur; in shuffle_freelist()
1711 for (idx = 1; idx < page->objects; idx++) { in shuffle_freelist()
1712 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1714 next = setup_object(s, page, next); in shuffle_freelist()
1728 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1734 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1736 struct page *page; in allocate_slab() local
1737 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1748 flags |= s->allocflags; in allocate_slab()
1751 * Let the initial higher-order allocation fail under memory pressure in allocate_slab()
1752 * so we fall-back to the minimum order allocation. in allocate_slab()
1755 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1758 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1759 if (unlikely(!page)) { in allocate_slab()
1760 oo = s->min; in allocate_slab()
1766 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1767 if (unlikely(!page)) in allocate_slab()
1772 page->objects = oo_objects(oo); in allocate_slab()
1774 page->slab_cache = s; in allocate_slab()
1775 __SetPageSlab(page); in allocate_slab()
1776 if (page_is_pfmemalloc(page)) in allocate_slab()
1777 SetPageSlabPfmemalloc(page); in allocate_slab()
1779 kasan_poison_slab(page); in allocate_slab()
1781 start = page_address(page); in allocate_slab()
1783 setup_page_debug(s, page, start); in allocate_slab()
1785 shuffle = shuffle_freelist(s, page); in allocate_slab()
1789 start = setup_object(s, page, start); in allocate_slab()
1790 page->freelist = start; in allocate_slab()
1791 for (idx = 0, p = start; idx < page->objects - 1; idx++) { in allocate_slab()
1792 next = p + s->size; in allocate_slab()
1793 next = setup_object(s, page, next); in allocate_slab()
1800 page->inuse = page->objects; in allocate_slab()
1801 page->frozen = 1; in allocate_slab()
1806 if (!page) in allocate_slab()
1809 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1811 return page; in allocate_slab()
1814 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
1823 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1825 int order = compound_order(page); in __free_slab()
1831 slab_pad_check(s, page); in __free_slab()
1832 for_each_object(p, s, page_address(page), in __free_slab()
1833 page->objects) in __free_slab()
1834 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1837 __ClearPageSlabPfmemalloc(page); in __free_slab()
1838 __ClearPageSlab(page); in __free_slab()
1840 page->mapping = NULL; in __free_slab()
1841 if (current->reclaim_state) in __free_slab()
1842 current->reclaim_state->reclaimed_slab += pages; in __free_slab()
1843 unaccount_slab_page(page, order, s); in __free_slab()
1844 __free_pages(page, order); in __free_slab()
1849 struct page *page = container_of(h, struct page, rcu_head); in rcu_free_slab() local
1851 __free_slab(page->slab_cache, page); in rcu_free_slab()
1854 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1856 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
1857 call_rcu(&page->rcu_head, rcu_free_slab); in free_slab()
1859 __free_slab(s, page); in free_slab()
1862 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1864 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1865 free_slab(s, page); in discard_slab()
1872 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1874 n->nr_partial++; in __add_partial()
1876 list_add_tail(&page->slab_list, &n->partial); in __add_partial()
1878 list_add(&page->slab_list, &n->partial); in __add_partial()
1882 struct page *page, int tail) in add_partial() argument
1884 lockdep_assert_held(&n->list_lock); in add_partial()
1885 __add_partial(n, page, tail); in add_partial()
1889 struct page *page) in remove_partial() argument
1891 lockdep_assert_held(&n->list_lock); in remove_partial()
1892 list_del(&page->slab_list); in remove_partial()
1893 n->nr_partial--; in remove_partial()
1903 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1908 struct page new; in acquire_slab()
1910 lockdep_assert_held(&n->list_lock); in acquire_slab()
1917 freelist = page->freelist; in acquire_slab()
1918 counters = page->counters; in acquire_slab()
1920 *objects = new.objects - new.inuse; in acquire_slab()
1922 new.inuse = page->objects; in acquire_slab()
1931 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1937 remove_partial(n, page); in acquire_slab()
1942 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1943 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1951 struct page *page, *page2; in get_partial_node() local
1962 if (!n || !n->nr_partial) in get_partial_node()
1965 spin_lock(&n->list_lock); in get_partial_node()
1966 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { in get_partial_node()
1969 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1972 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1978 c->page = page; in get_partial_node()
1982 put_cpu_partial(s, page, 0); in get_partial_node()
1990 spin_unlock(&n->list_lock); in get_partial_node()
1995 * Get a page from somewhere. Search in increasing NUMA distances.
2026 if (!s->remote_node_defrag_ratio || in get_any_partial()
2027 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2039 n->nr_partial > s->min_partial) { in get_any_partial()
2044 * here - if mems_allowed was updated in in get_any_partial()
2059 * Get a partial page, lock it and return it.
2118 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2120 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2124 pr_warn("due to cpu change %d -> %d\n", in note_cmpxchg_failure()
2129 pr_warn("due to cpu running other code. Event %ld->%ld\n", in note_cmpxchg_failure()
2143 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2149 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
2153 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2158 struct page new; in deactivate_slab()
2159 struct page old; in deactivate_slab()
2161 if (page->freelist) { in deactivate_slab()
2168 * to the page freelist while it is still frozen. Leave the in deactivate_slab()
2171 * There is no need to take the list->lock because the page in deactivate_slab()
2183 if (freelist_corrupted(s, page, &freelist, nextfree)) in deactivate_slab()
2187 prior = page->freelist; in deactivate_slab()
2188 counters = page->counters; in deactivate_slab()
2191 new.inuse--; in deactivate_slab()
2194 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2203 * Stage two: Ensure that the page is unfrozen while the in deactivate_slab()
2208 * with the count. If there is a mismatch then the page in deactivate_slab()
2209 * is not unfrozen but the page is on the wrong list. in deactivate_slab()
2212 * the page from the list that we just put it on again in deactivate_slab()
2218 old.freelist = page->freelist; in deactivate_slab()
2219 old.counters = page->counters; in deactivate_slab()
2225 new.inuse--; in deactivate_slab()
2233 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
2241 * that acquire_slab() will see a slab page that in deactivate_slab()
2244 spin_lock(&n->list_lock); in deactivate_slab()
2249 if ((s->flags & SLAB_STORE_USER) && !lock) { in deactivate_slab()
2256 spin_lock(&n->list_lock); in deactivate_slab()
2263 remove_partial(n, page); in deactivate_slab()
2265 remove_full(s, n, page); in deactivate_slab()
2268 add_partial(n, page, tail); in deactivate_slab()
2270 add_full(s, n, page); in deactivate_slab()
2274 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2281 spin_unlock(&n->list_lock); in deactivate_slab()
2289 discard_slab(s, page); in deactivate_slab()
2293 c->page = NULL; in deactivate_slab()
2294 c->freelist = NULL; in deactivate_slab()
2309 struct page *page, *discard_page = NULL; in unfreeze_partials() local
2311 while ((page = slub_percpu_partial(c))) { in unfreeze_partials()
2312 struct page new; in unfreeze_partials()
2313 struct page old; in unfreeze_partials()
2315 slub_set_percpu_partial(c, page); in unfreeze_partials()
2317 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2320 spin_unlock(&n->list_lock); in unfreeze_partials()
2323 spin_lock(&n->list_lock); in unfreeze_partials()
2328 old.freelist = page->freelist; in unfreeze_partials()
2329 old.counters = page->counters; in unfreeze_partials()
2337 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2342 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2343 page->next = discard_page; in unfreeze_partials()
2344 discard_page = page; in unfreeze_partials()
2346 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2352 spin_unlock(&n->list_lock); in unfreeze_partials()
2355 page = discard_page; in unfreeze_partials()
2356 discard_page = discard_page->next; in unfreeze_partials()
2359 discard_slab(s, page); in unfreeze_partials()
2366 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2367 * partial page slot if available.
2372 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2375 struct page *oldpage; in put_cpu_partial()
2383 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2386 pobjects = oldpage->pobjects; in put_cpu_partial()
2387 pages = oldpage->pages; in put_cpu_partial()
2395 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2405 pobjects += page->objects - page->inuse; in put_cpu_partial()
2407 page->pages = pages; in put_cpu_partial()
2408 page->pobjects = pobjects; in put_cpu_partial()
2409 page->next = oldpage; in put_cpu_partial()
2411 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2417 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2427 deactivate_slab(s, c->page, c->freelist, c); in flush_slab()
2429 c->tid = next_tid(c->tid); in flush_slab()
2439 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2441 if (c->page) in __flush_cpu_slab()
2457 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2459 return c->page || slub_percpu_partial(c); in has_cpu_slab()
2490 static inline int node_match(struct page *page, int node) in node_match() argument
2493 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match()
2500 static int count_free(struct page *page) in count_free() argument
2502 return page->objects - page->inuse; in count_free()
2507 return atomic_long_read(&n->total_objects); in node_nr_objs()
2513 int (*get_count)(struct page *)) in count_partial() argument
2517 struct page *page; in count_partial() local
2519 spin_lock_irqsave(&n->list_lock, flags); in count_partial()
2520 list_for_each_entry(page, &n->partial, slab_list) in count_partial()
2521 x += get_count(page); in count_partial()
2522 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial()
2541 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", in slab_out_of_memory()
2542 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2543 oo_order(s->min)); in slab_out_of_memory()
2545 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2547 s->name); in slab_out_of_memory()
2569 struct page *page; in new_slab_objects() local
2571 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab_objects()
2578 page = new_slab(s, flags, node); in new_slab_objects()
2579 if (page) { in new_slab_objects()
2580 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2581 if (c->page) in new_slab_objects()
2585 * No other reference to the page yet so we can in new_slab_objects()
2588 freelist = page->freelist; in new_slab_objects()
2589 page->freelist = NULL; in new_slab_objects()
2592 c->page = page; in new_slab_objects()
2599 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) in pfmemalloc_match() argument
2601 if (unlikely(PageSlabPfmemalloc(page))) in pfmemalloc_match()
2608 * Check the page->freelist of a page and either transfer the freelist to the
2609 * per cpu freelist or deactivate the page.
2611 * The page is still frozen if the return value is not NULL.
2613 * If this function returns NULL then the page has been unfrozen.
2617 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2619 struct page new; in get_freelist()
2624 freelist = page->freelist; in get_freelist()
2625 counters = page->counters; in get_freelist()
2630 new.inuse = page->objects; in get_freelist()
2633 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2655 * a call to the page allocator and the setup of a new slab.
2664 struct page *page; in ___slab_alloc() local
2668 page = c->page; in ___slab_alloc()
2669 if (!page) { in ___slab_alloc()
2681 if (unlikely(!node_match(page, node))) { in ___slab_alloc()
2691 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2697 * By rights, we should be searching for a slab page that was in ___slab_alloc()
2699 * information when the page leaves the per-cpu allocator in ___slab_alloc()
2701 if (unlikely(!pfmemalloc_match(page, gfpflags))) { in ___slab_alloc()
2702 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2706 /* must check again c->freelist in case of cpu migration or IRQ */ in ___slab_alloc()
2707 freelist = c->freelist; in ___slab_alloc()
2711 freelist = get_freelist(s, page); in ___slab_alloc()
2714 c->page = NULL; in ___slab_alloc()
2724 * page is pointing to the page from which the objects are obtained. in ___slab_alloc()
2725 * That page must be frozen for per cpu allocations to work. in ___slab_alloc()
2727 VM_BUG_ON(!c->page->frozen); in ___slab_alloc()
2728 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2729 c->tid = next_tid(c->tid); in ___slab_alloc()
2735 page = c->page = slub_percpu_partial(c); in ___slab_alloc()
2736 slub_set_percpu_partial(c, page); in ___slab_alloc()
2748 page = c->page; in ___slab_alloc()
2749 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2754 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2757 deactivate_slab(s, page, get_freepointer(s, freelist), c); in ___slab_alloc()
2778 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2794 memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); in maybe_wipe_obj_freeptr()
2812 struct page *page; in slab_alloc_node() local
2831 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2832 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2834 unlikely(tid != READ_ONCE(c->tid))); in slab_alloc_node()
2839 * on c to guarantee that object and page associated with previous tid in slab_alloc_node()
2841 * page could be one associated with next tid and our alloc/free in slab_alloc_node()
2853 object = c->freelist; in slab_alloc_node()
2854 page = c->page; in slab_alloc_node()
2855 if (unlikely(!object || !page || !node_match(page, node))) { in slab_alloc_node()
2875 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2889 memset(object, 0, s->object_size); in slab_alloc_node()
2906 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2907 s->size, gfpflags); in kmem_cache_alloc()
2914 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2917 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2918 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_trace()
2930 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2939 int node, size_t size) in kmem_cache_alloc_node_trace() argument
2944 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2946 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_node_trace()
2958 * lock and free the item. If there is no additional partial page
2961 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2968 struct page new; in __slab_free()
2976 !free_debug_processing(s, page, head, tail, cnt, addr)) in __slab_free()
2981 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2984 prior = page->freelist; in __slab_free()
2985 counters = page->counters; in __slab_free()
2989 new.inuse -= cnt; in __slab_free()
3004 n = get_node(s, page_to_nid(page)); in __slab_free()
3013 spin_lock_irqsave(&n->list_lock, flags); in __slab_free()
3018 } while (!cmpxchg_double_slab(s, page, in __slab_free()
3033 * If we just froze the page then put it onto the in __slab_free()
3036 put_cpu_partial(s, page, 1); in __slab_free()
3043 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
3051 remove_full(s, n, page); in __slab_free()
3052 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
3055 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
3063 remove_partial(n, page); in __slab_free()
3067 remove_full(s, n, page); in __slab_free()
3070 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
3072 discard_slab(s, page); in __slab_free()
3087 * same page) possible by specifying head and tail ptr, plus objects
3091 struct page *page, void *head, void *tail, in do_slab_free() argument
3107 tid = this_cpu_read(s->cpu_slab->tid); in do_slab_free()
3108 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3110 unlikely(tid != READ_ONCE(c->tid))); in do_slab_free()
3115 if (likely(page == c->page)) { in do_slab_free()
3116 void **freelist = READ_ONCE(c->freelist); in do_slab_free()
3121 s->cpu_slab->freelist, s->cpu_slab->tid, in do_slab_free()
3130 __slab_free(s, page, head, tail_obj, cnt, addr); in do_slab_free()
3134 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
3143 do_slab_free(s, page, head, tail, cnt, addr); in slab_free()
3164 struct page *page; member
3174 * page. It builds a detached freelist directly within the given
3175 * page/objects. This can happen without any need for
3184 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3190 struct page *page; in build_detached_freelist() local
3192 /* Always re-init detached_freelist */ in build_detached_freelist()
3193 df->page = NULL; in build_detached_freelist()
3196 object = p[--size]; in build_detached_freelist()
3198 } while (!object && size); in build_detached_freelist()
3203 page = virt_to_head_page(object); in build_detached_freelist()
3206 if (unlikely(!PageSlab(page))) { in build_detached_freelist()
3207 BUG_ON(!PageCompound(page)); in build_detached_freelist()
3209 __free_pages(page, compound_order(page)); in build_detached_freelist()
3210 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3211 return size; in build_detached_freelist()
3214 df->s = page->slab_cache; in build_detached_freelist()
3216 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3220 df->page = page; in build_detached_freelist()
3221 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3222 df->tail = object; in build_detached_freelist()
3223 df->freelist = object; in build_detached_freelist()
3224 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3225 df->cnt = 1; in build_detached_freelist()
3227 while (size) { in build_detached_freelist()
3228 object = p[--size]; in build_detached_freelist()
3232 /* df->page is always set at this point */ in build_detached_freelist()
3233 if (df->page == virt_to_head_page(object)) { in build_detached_freelist()
3235 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3236 df->freelist = object; in build_detached_freelist()
3237 df->cnt++; in build_detached_freelist()
3238 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3244 if (!--lookahead) in build_detached_freelist()
3248 first_skipped_index = size + 1; in build_detached_freelist()
3255 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3257 if (WARN_ON(!size)) in kmem_cache_free_bulk()
3260 memcg_slab_free_hook(s, p, size); in kmem_cache_free_bulk()
3264 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3265 if (!df.page) in kmem_cache_free_bulk()
3268 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3269 } while (likely(size)); in kmem_cache_free_bulk()
3274 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3282 s = slab_pre_alloc_hook(s, &objcg, size, flags); in kmem_cache_alloc_bulk()
3291 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3293 for (i = 0; i < size; i++) { in kmem_cache_alloc_bulk()
3294 void *object = c->freelist; in kmem_cache_alloc_bulk()
3298 * We may have removed an object from c->freelist using in kmem_cache_alloc_bulk()
3300 * c->tid has not been bumped yet. in kmem_cache_alloc_bulk()
3302 * allocating memory, we should bump c->tid now. in kmem_cache_alloc_bulk()
3304 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
3307 * Invoking slow path likely have side-effect in kmem_cache_alloc_bulk()
3308 * of re-populating per CPU c->freelist in kmem_cache_alloc_bulk()
3315 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3318 continue; /* goto for-loop */ in kmem_cache_alloc_bulk()
3320 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3324 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
3332 memset(p[j], 0, s->object_size); in kmem_cache_alloc_bulk()
3336 slab_post_alloc_hook(s, objcg, flags, size, p); in kmem_cache_alloc_bulk()
3349 * offset 0. If we tune the size of the object to the alignment then we can
3371 * Calculate the order of allocation given an slab object size.
3375 * order 0 does not cause fragmentation in the page allocator. Larger objects
3387 * we try to keep the page order as low as possible. So we accept more waste
3388 * of space in favor of a small page order.
3395 static inline unsigned int slab_order(unsigned int size, in slab_order() argument
3402 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) in slab_order()
3403 return get_order(size * MAX_OBJS_PER_PAGE) - 1; in slab_order()
3405 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); in slab_order()
3411 rem = slab_size % size; in slab_order()
3420 static inline int calculate_order(unsigned int size) in calculate_order() argument
3437 max_objects = order_objects(slub_max_order, size); in calculate_order()
3445 order = slab_order(size, min_objects, in calculate_order()
3451 min_objects--; in calculate_order()
3458 order = slab_order(size, 1, slub_max_order, 1); in calculate_order()
3465 order = slab_order(size, 1, MAX_ORDER, 1); in calculate_order()
3468 return -ENOSYS; in calculate_order()
3474 n->nr_partial = 0; in init_kmem_cache_node()
3475 spin_lock_init(&n->list_lock); in init_kmem_cache_node()
3476 INIT_LIST_HEAD(&n->partial); in init_kmem_cache_node()
3478 atomic_long_set(&n->nr_slabs, 0); in init_kmem_cache_node()
3479 atomic_long_set(&n->total_objects, 0); in init_kmem_cache_node()
3480 INIT_LIST_HEAD(&n->full); in init_kmem_cache_node()
3493 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
3496 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
3517 struct page *page; in early_kmem_cache_node_alloc() local
3520 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); in early_kmem_cache_node_alloc()
3522 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
3524 BUG_ON(!page); in early_kmem_cache_node_alloc()
3525 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
3530 n = page->freelist; in early_kmem_cache_node_alloc()
3538 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
3539 page->inuse = 1; in early_kmem_cache_node_alloc()
3540 page->frozen = 0; in early_kmem_cache_node_alloc()
3541 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
3543 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
3549 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
3558 s->node[node] = NULL; in free_kmem_cache_nodes()
3566 free_percpu(s->cpu_slab); in __kmem_cache_release()
3590 s->node[node] = n; in init_kmem_cache_nodes()
3601 s->min_partial = min; in set_min_partial()
3626 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
3628 else if (s->size >= 1024) in set_cpu_partial()
3630 else if (s->size >= 256) in set_cpu_partial()
3643 slab_flags_t flags = s->flags; in calculate_sizes()
3644 unsigned int size = s->object_size; in calculate_sizes() local
3649 * Round up object size to the next word boundary. We can only in calculate_sizes()
3653 size = ALIGN(size, sizeof(void *)); in calculate_sizes()
3656 * safely written. If redzoning adds more to the inuse size, we in calculate_sizes()
3658 * s->offset must be limited within this for the general case. in calculate_sizes()
3660 freepointer_area = size; in calculate_sizes()
3669 !s->ctor) in calculate_sizes()
3670 s->flags |= __OBJECT_POISON; in calculate_sizes()
3672 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3680 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3681 size += sizeof(void *); in calculate_sizes()
3688 s->inuse = size; in calculate_sizes()
3691 s->ctor)) { in calculate_sizes()
3700 * The assumption that s->offset >= s->inuse means free in calculate_sizes()
3705 s->offset = size; in calculate_sizes()
3706 size += sizeof(void *); in calculate_sizes()
3713 s->offset = ALIGN(freepointer_area / 2, sizeof(void *)); in calculate_sizes()
3722 size += 2 * sizeof(struct track); in calculate_sizes()
3725 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
3735 size += sizeof(void *); in calculate_sizes()
3737 s->red_left_pad = sizeof(void *); in calculate_sizes()
3738 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
3739 size += s->red_left_pad; in calculate_sizes()
3745 * offset 0. In order to align the objects we have to simply size in calculate_sizes()
3748 size = ALIGN(size, s->align); in calculate_sizes()
3749 s->size = size; in calculate_sizes()
3750 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
3754 order = calculate_order(size); in calculate_sizes()
3759 s->allocflags = 0; in calculate_sizes()
3761 s->allocflags |= __GFP_COMP; in calculate_sizes()
3763 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3764 s->allocflags |= GFP_DMA; in calculate_sizes()
3766 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
3767 s->allocflags |= GFP_DMA32; in calculate_sizes()
3769 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3770 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3775 s->oo = oo_make(order, size); in calculate_sizes()
3776 s->min = oo_make(get_order(size), size); in calculate_sizes()
3777 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3778 s->max = s->oo; in calculate_sizes()
3780 return !!oo_objects(s->oo); in calculate_sizes()
3785 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); in kmem_cache_open()
3787 s->random = get_random_long(); in kmem_cache_open()
3790 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3797 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3798 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3799 s->offset = 0; in kmem_cache_open()
3800 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3807 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
3809 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3813 * The larger the object size is, the more pages we want on the partial in kmem_cache_open()
3814 * list to avoid pounding the page allocator excessively. in kmem_cache_open()
3816 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3821 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3824 /* Initialize the pre-computed randomized freelist if slab is up */ in kmem_cache_open()
3838 return -EINVAL; in kmem_cache_open()
3841 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3845 void *addr = page_address(page); in list_slab_objects()
3849 slab_err(s, page, text, s->name); in list_slab_objects()
3850 slab_lock(page); in list_slab_objects()
3852 map = get_map(s, page); in list_slab_objects()
3853 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3856 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); in list_slab_objects()
3861 slab_unlock(page); in list_slab_objects()
3873 struct page *page, *h; in free_partial() local
3876 spin_lock_irq(&n->list_lock); in free_partial()
3877 list_for_each_entry_safe(page, h, &n->partial, slab_list) { in free_partial()
3878 if (!page->inuse) { in free_partial()
3879 remove_partial(n, page); in free_partial()
3880 list_add(&page->slab_list, &discard); in free_partial()
3882 list_slab_objects(s, page, in free_partial()
3886 spin_unlock_irq(&n->list_lock); in free_partial()
3888 list_for_each_entry_safe(page, h, &discard, slab_list) in free_partial()
3889 discard_slab(s, page); in free_partial()
3898 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
3915 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
3937 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); in setup_slub_max_order()
3953 void *__kmalloc(size_t size, gfp_t flags) in __kmalloc() argument
3958 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) in __kmalloc()
3959 return kmalloc_large(size, flags); in __kmalloc()
3961 s = kmalloc_slab(size, flags); in __kmalloc()
3968 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3970 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc()
3977 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) in kmalloc_large_node() argument
3979 struct page *page; in kmalloc_large_node() local
3981 unsigned int order = get_order(size); in kmalloc_large_node()
3984 page = alloc_pages_node(node, flags, order); in kmalloc_large_node()
3985 if (page) { in kmalloc_large_node()
3986 ptr = page_address(page); in kmalloc_large_node()
3987 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, in kmalloc_large_node()
3991 return kmalloc_large_node_hook(ptr, size, flags); in kmalloc_large_node()
3994 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3999 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { in __kmalloc_node()
4000 ret = kmalloc_large_node(size, flags, node); in __kmalloc_node()
4003 size, PAGE_SIZE << get_order(size), in __kmalloc_node()
4009 s = kmalloc_slab(size, flags); in __kmalloc_node()
4016 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
4018 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc_node()
4034 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, in __check_heap_object() argument
4043 /* Find object and usable object size. */ in __check_heap_object()
4044 s = page->slab_cache; in __check_heap_object()
4047 if (ptr < page_address(page)) in __check_heap_object()
4048 usercopy_abort("SLUB object not in SLUB page?!", NULL, in __check_heap_object()
4052 offset = (ptr - page_address(page)) % s->size; in __check_heap_object()
4056 if (offset < s->red_left_pad) in __check_heap_object()
4058 s->name, to_user, offset, n); in __check_heap_object()
4059 offset -= s->red_left_pad; in __check_heap_object()
4063 if (offset >= s->useroffset && in __check_heap_object()
4064 offset - s->useroffset <= s->usersize && in __check_heap_object()
4065 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
4076 offset <= object_size && n <= object_size - offset) { in __check_heap_object()
4077 usercopy_warn("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4081 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4087 struct page *page; in __ksize() local
4092 page = virt_to_head_page(object); in __ksize()
4094 if (unlikely(!PageSlab(page))) { in __ksize()
4095 WARN_ON(!PageCompound(page)); in __ksize()
4096 return page_size(page); in __ksize()
4099 return slab_ksize(page->slab_cache); in __ksize()
4105 struct page *page; in kfree() local
4113 page = virt_to_head_page(x); in kfree()
4114 if (unlikely(!PageSlab(page))) { in kfree()
4115 unsigned int order = compound_order(page); in kfree()
4117 BUG_ON(!PageCompound(page)); in kfree()
4119 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, in kfree()
4120 -(PAGE_SIZE << order)); in kfree()
4121 __free_pages(page, order); in kfree()
4124 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); in kfree()
4144 struct page *page; in __kmem_cache_shrink() local
4145 struct page *t; in __kmem_cache_shrink()
4157 spin_lock_irqsave(&n->list_lock, flags); in __kmem_cache_shrink()
4163 * list_lock. page->inuse here is the upper limit. in __kmem_cache_shrink()
4165 list_for_each_entry_safe(page, t, &n->partial, slab_list) { in __kmem_cache_shrink()
4166 int free = page->objects - page->inuse; in __kmem_cache_shrink()
4168 /* Do not reread page->inuse */ in __kmem_cache_shrink()
4174 if (free == page->objects) { in __kmem_cache_shrink()
4175 list_move(&page->slab_list, &discard); in __kmem_cache_shrink()
4176 n->nr_partial--; in __kmem_cache_shrink()
4178 list_move(&page->slab_list, promote + free - 1); in __kmem_cache_shrink()
4185 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) in __kmem_cache_shrink()
4186 list_splice(promote + i, &n->partial); in __kmem_cache_shrink()
4188 spin_unlock_irqrestore(&n->list_lock, flags); in __kmem_cache_shrink()
4191 list_for_each_entry_safe(page, t, &discard, slab_list) in __kmem_cache_shrink()
4192 discard_slab(s, page); in __kmem_cache_shrink()
4220 offline_node = marg->status_change_nid_normal; in slab_mem_offline_callback()
4234 * if n->nr_slabs > 0, slabs still exist on the node in slab_mem_offline_callback()
4241 s->node[offline_node] = NULL; in slab_mem_offline_callback()
4253 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback()
4277 ret = -ENOMEM; in slab_mem_going_online_callback()
4281 s->node[nid] = n; in slab_mem_going_online_callback()
4326 * the page allocator. Allocate them properly then fix up the pointers
4336 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
4345 struct page *p; in bootstrap()
4347 list_for_each_entry(p, &n->partial, slab_list) in bootstrap()
4348 p->slab_cache = s; in bootstrap()
4351 list_for_each_entry(p, &n->full, slab_list) in bootstrap()
4352 p->slab_cache = s; in bootstrap()
4355 list_add(&s->list, &slab_caches); in bootstrap()
4396 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", in kmem_cache_init()
4407 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, in __kmem_cache_alias() argument
4412 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4414 s->refcount++; in __kmem_cache_alias()
4420 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4421 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4424 s->refcount--; in __kmem_cache_alias()
4451 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) in __kmalloc_track_caller() argument
4456 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) in __kmalloc_track_caller()
4457 return kmalloc_large(size, gfpflags); in __kmalloc_track_caller()
4459 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
4467 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
4474 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, in __kmalloc_node_track_caller() argument
4480 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { in __kmalloc_node_track_caller()
4481 ret = kmalloc_large_node(size, gfpflags, node); in __kmalloc_node_track_caller()
4484 size, PAGE_SIZE << get_order(size), in __kmalloc_node_track_caller()
4490 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
4498 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4506 static int count_inuse(struct page *page) in count_inuse() argument
4508 return page->inuse; in count_inuse()
4511 static int count_total(struct page *page) in count_total() argument
4513 return page->objects; in count_total()
4518 static void validate_slab(struct kmem_cache *s, struct page *page) in validate_slab() argument
4521 void *addr = page_address(page); in validate_slab()
4524 slab_lock(page); in validate_slab()
4526 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) in validate_slab()
4530 map = get_map(s, page); in validate_slab()
4531 for_each_object(p, s, addr, page->objects) { in validate_slab()
4535 if (!check_object(s, page, p, val)) in validate_slab()
4540 slab_unlock(page); in validate_slab()
4547 struct page *page; in validate_slab_node() local
4550 spin_lock_irqsave(&n->list_lock, flags); in validate_slab_node()
4552 list_for_each_entry(page, &n->partial, slab_list) { in validate_slab_node()
4553 validate_slab(s, page); in validate_slab_node()
4556 if (count != n->nr_partial) in validate_slab_node()
4558 s->name, count, n->nr_partial); in validate_slab_node()
4560 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4563 list_for_each_entry(page, &n->full, slab_list) { in validate_slab_node()
4564 validate_slab(s, page); in validate_slab_node()
4567 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node()
4569 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4572 spin_unlock_irqrestore(&n->list_lock, flags); in validate_slab_node()
4613 if (t->max) in free_loc_track()
4614 free_pages((unsigned long)t->loc, in free_loc_track()
4615 get_order(sizeof(struct location) * t->max)); in free_loc_track()
4629 if (t->count) { in alloc_loc_track()
4630 memcpy(l, t->loc, sizeof(struct location) * t->count); in alloc_loc_track()
4633 t->max = max; in alloc_loc_track()
4634 t->loc = l; in alloc_loc_track()
4644 unsigned long age = jiffies - track->when; in add_location()
4646 start = -1; in add_location()
4647 end = t->count; in add_location()
4650 pos = start + (end - start + 1) / 2; in add_location()
4659 caddr = t->loc[pos].addr; in add_location()
4660 if (track->addr == caddr) { in add_location()
4662 l = &t->loc[pos]; in add_location()
4663 l->count++; in add_location()
4664 if (track->when) { in add_location()
4665 l->sum_time += age; in add_location()
4666 if (age < l->min_time) in add_location()
4667 l->min_time = age; in add_location()
4668 if (age > l->max_time) in add_location()
4669 l->max_time = age; in add_location()
4671 if (track->pid < l->min_pid) in add_location()
4672 l->min_pid = track->pid; in add_location()
4673 if (track->pid > l->max_pid) in add_location()
4674 l->max_pid = track->pid; in add_location()
4676 cpumask_set_cpu(track->cpu, in add_location()
4677 to_cpumask(l->cpus)); in add_location()
4679 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
4683 if (track->addr < caddr) in add_location()
4692 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) in add_location()
4695 l = t->loc + pos; in add_location()
4696 if (pos < t->count) in add_location()
4698 (t->count - pos) * sizeof(struct location)); in add_location()
4699 t->count++; in add_location()
4700 l->count = 1; in add_location()
4701 l->addr = track->addr; in add_location()
4702 l->sum_time = age; in add_location()
4703 l->min_time = age; in add_location()
4704 l->max_time = age; in add_location()
4705 l->min_pid = track->pid; in add_location()
4706 l->max_pid = track->pid; in add_location()
4707 cpumask_clear(to_cpumask(l->cpus)); in add_location()
4708 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); in add_location()
4709 nodes_clear(l->nodes); in add_location()
4710 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
4715 struct page *page, enum track_item alloc) in process_slab() argument
4717 void *addr = page_address(page); in process_slab()
4721 map = get_map(s, page); in process_slab()
4722 for_each_object(p, s, addr, page->objects) in process_slab()
4746 struct page *page; in list_locations() local
4748 if (!atomic_long_read(&n->nr_slabs)) in list_locations()
4751 spin_lock_irqsave(&n->list_lock, flags); in list_locations()
4752 list_for_each_entry(page, &n->partial, slab_list) in list_locations()
4753 process_slab(&t, s, page, alloc); in list_locations()
4754 list_for_each_entry(page, &n->full, slab_list) in list_locations()
4755 process_slab(&t, s, page, alloc); in list_locations()
4756 spin_unlock_irqrestore(&n->list_lock, flags); in list_locations()
4762 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) in list_locations()
4764 len += sprintf(buf + len, "%7ld ", l->count); in list_locations()
4766 if (l->addr) in list_locations()
4767 len += sprintf(buf + len, "%pS", (void *)l->addr); in list_locations()
4769 len += sprintf(buf + len, "<not-available>"); in list_locations()
4771 if (l->sum_time != l->min_time) { in list_locations()
4773 l->min_time, in list_locations()
4774 (long)div_u64(l->sum_time, l->count), in list_locations()
4775 l->max_time); in list_locations()
4778 l->min_time); in list_locations()
4780 if (l->min_pid != l->max_pid) in list_locations()
4781 len += sprintf(buf + len, " pid=%ld-%ld", in list_locations()
4782 l->min_pid, l->max_pid); in list_locations()
4785 l->min_pid); in list_locations()
4788 !cpumask_empty(to_cpumask(l->cpus)) && in list_locations()
4789 len < PAGE_SIZE - 60) in list_locations()
4790 len += scnprintf(buf + len, PAGE_SIZE - len - 50, in list_locations()
4792 cpumask_pr_args(to_cpumask(l->cpus))); in list_locations()
4794 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && in list_locations()
4795 len < PAGE_SIZE - 60) in list_locations()
4796 len += scnprintf(buf + len, PAGE_SIZE - len - 50, in list_locations()
4798 nodemask_pr_args(&l->nodes)); in list_locations()
4819 pr_err("-----------------------\n"); in resiliency_test()
4824 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", in resiliency_test()
4832 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", in resiliency_test()
4840 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", in resiliency_test()
4849 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); in resiliency_test()
4855 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); in resiliency_test()
4861 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); in resiliency_test()
4911 return -ENOMEM; in show_slab_objects()
4917 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4920 struct page *page; in show_slab_objects() local
4922 page = READ_ONCE(c->page); in show_slab_objects()
4923 if (!page) in show_slab_objects()
4926 node = page_to_nid(page); in show_slab_objects()
4928 x = page->objects; in show_slab_objects()
4930 x = page->inuse; in show_slab_objects()
4937 page = slub_percpu_partial_read_once(c); in show_slab_objects()
4938 if (page) { in show_slab_objects()
4939 node = page_to_nid(page); in show_slab_objects()
4945 x = page->pages; in show_slab_objects()
4956 * mem_hotplug_lock->slab_mutex->kernfs_mutex in show_slab_objects()
4960 * unplug code doesn't destroy the kmem_cache->node[] data. in show_slab_objects()
4970 x = atomic_long_read(&n->total_objects); in show_slab_objects()
4972 x = atomic_long_read(&n->total_objects) - in show_slab_objects()
4975 x = atomic_long_read(&n->nr_slabs); in show_slab_objects()
4991 x = n->nr_partial; in show_slab_objects()
5026 return sprintf(buf, "%u\n", s->size); in slab_size_show()
5032 return sprintf(buf, "%u\n", s->align); in align_show()
5038 return sprintf(buf, "%u\n", s->object_size); in object_size_show()
5044 return sprintf(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
5050 return sprintf(buf, "%u\n", oo_order(s->oo)); in order_show()
5056 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
5089 return -EINVAL; in cpu_partial_store()
5099 if (!s->ctor) in ctor_show()
5101 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
5107 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5143 struct page *page; in slabs_cpu_partial_show() local
5145 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5147 if (page) { in slabs_cpu_partial_show()
5148 pages += page->pages; in slabs_cpu_partial_show()
5149 objects += page->pobjects; in slabs_cpu_partial_show()
5157 struct page *page; in slabs_cpu_partial_show() local
5159 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5161 if (page && len < PAGE_SIZE - 20) in slabs_cpu_partial_show()
5163 page->pobjects, page->pages); in slabs_cpu_partial_show()
5172 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5178 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5185 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5192 return sprintf(buf, "%u\n", s->usersize); in usersize_show()
5198 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5217 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5223 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5229 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5236 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5243 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5256 int ret = -EINVAL; in validate_store()
5269 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
5270 return -ENOSYS; in alloc_calls_show()
5277 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
5278 return -ENOSYS; in free_calls_show()
5287 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5303 return -EINVAL; in shrink_store()
5311 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5324 return -ERANGE; in remote_node_defrag_ratio_store()
5326 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5342 return -ENOMEM; in show_stat()
5345 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5355 if (data[cpu] && len < PAGE_SIZE - 20) in show_stat()
5368 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5380 return -EINVAL; \
5502 if (!attribute->show) in slab_attr_show()
5503 return -EIO; in slab_attr_show()
5505 err = attribute->show(s, buf); in slab_attr_show()
5521 if (!attribute->store) in slab_attr_store()
5522 return -EIO; in slab_attr_store()
5524 err = attribute->store(s, buf, len); in slab_attr_store()
5554 * Format :[flags-]size
5571 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5573 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5575 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5577 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5579 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
5582 *p++ = '-'; in create_unique_id()
5583 p += sprintf(p, "%07u", s->size); in create_unique_id()
5585 BUG_ON(p > name + ID_STR_LENGTH - 1); in create_unique_id()
5597 kobject_init(&s->kobj, &slab_ktype); in sysfs_slab_add()
5611 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5612 name = s->name; in sysfs_slab_add()
5621 s->kobj.kset = kset; in sysfs_slab_add()
5622 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5624 kobject_put(&s->kobj); in sysfs_slab_add()
5628 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5634 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5641 kobject_del(&s->kobj); in sysfs_slab_add()
5648 kobject_del(&s->kobj); in sysfs_slab_unlink()
5654 kobject_put(&s->kobj); in sysfs_slab_release()
5677 sysfs_remove_link(&slab_kset->kobj, name); in sysfs_slab_alias()
5678 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5683 return -ENOMEM; in sysfs_slab_alias()
5685 al->s = s; in sysfs_slab_alias()
5686 al->name = name; in sysfs_slab_alias()
5687 al->next = alias_list; in sysfs_slab_alias()
5703 return -ENOSYS; in slab_sysfs_init()
5712 s->name); in slab_sysfs_init()
5718 alias_list = alias_list->next; in slab_sysfs_init()
5719 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5722 al->name); in slab_sysfs_init()
5752 sinfo->active_objs = nr_objs - nr_free; in get_slabinfo()
5753 sinfo->num_objs = nr_objs; in get_slabinfo()
5754 sinfo->active_slabs = nr_slabs; in get_slabinfo()
5755 sinfo->num_slabs = nr_slabs; in get_slabinfo()
5756 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5757 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5767 return -EIO; in slabinfo_write()