Lines Matching refs:objects
417 void *objects[]; member
789 return sizeof(struct slabobj_ext) * slab->objects; in obj_exts_size_in_slab()
797 objext_offset = s->size * slab->objects; in obj_exts_offset_in_slab()
914 bitmap_zero(obj_map, slab->objects); in __fill_map()
999 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
1110 slab, slab->objects, slab->inuse, slab->freelist, in print_slab_info()
1543 if (slab->objects > maxobj) { in check_slab()
1545 slab->objects, maxobj); in check_slab()
1548 if (slab->inuse > slab->objects) { in check_slab()
1550 slab->inuse, slab->objects); in check_slab()
1575 while (fp && nr <= slab->objects) { in on_freelist()
1587 slab->inuse = slab->objects; in on_freelist()
1597 if (nr > slab->objects) { in on_freelist()
1600 slab->inuse = slab->objects; in on_freelist()
1609 if (slab->objects != max_objects) { in on_freelist()
1611 slab->objects, max_objects); in on_freelist()
1612 slab->objects = max_objects; in on_freelist()
1615 if (slab->inuse != slab->objects - nr) { in on_freelist()
1617 slab->inuse, slab->objects - nr); in on_freelist()
1618 slab->inuse = slab->objects - nr; in on_freelist()
1669 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1674 atomic_long_add(objects, &n->total_objects); in inc_slabs_node()
1676 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1681 atomic_long_sub(objects, &n->total_objects); in dec_slabs_node()
1742 slab->inuse = slab->objects; in alloc_debug_processing()
2027 int objects) {} in inc_slabs_node() argument
2029 int objects) {} in dec_slabs_node() argument
2076 struct slabobj_ext *vec, unsigned int objects) in handle_failed_objexts_alloc() argument
2086 for (i = 0; i < objects; i++) in handle_failed_objexts_alloc()
2096 struct slabobj_ext *vec, unsigned int objects) {} in handle_failed_objexts_alloc() argument
2119 size_t sz = sizeof(struct slabobj_ext) * slab->objects; in obj_exts_alloc_size()
2145 unsigned int objects = objs_per_slab(s, slab); in alloc_slab_obj_exts() local
2191 handle_failed_objexts_alloc(old_exts, vec, objects); in alloc_slab_obj_exts()
2294 for_each_object(addr, s, slab_address(slab), slab->objects) in alloc_slab_obj_exts_early()
2397 int objects) in __alloc_tagging_slab_free_hook() argument
2411 for (i = 0; i < objects; i++) { in __alloc_tagging_slab_free_hook()
2421 int objects) in alloc_tagging_slab_free_hook() argument
2424 __alloc_tagging_slab_free_hook(s, slab, p, objects); in alloc_tagging_slab_free_hook()
2436 int objects) in alloc_tagging_slab_free_hook() argument
2472 int objects) in memcg_slab_free_hook() argument
2484 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); in memcg_slab_free_hook()
2558 void **p, int objects) in memcg_slab_free_hook() argument
2763 sheaf_size = struct_size(sheaf, objects, capacity); in __alloc_empty_sheaf()
2812 filled = refill_objects(s, &sheaf->objects[sheaf->size], gfp, to_fill, in refill_sheaf()
2866 void *objects[PCS_BATCH_MAX]; in __sheaf_flush_main_batch() local
2877 memcpy(objects, sheaf->objects + sheaf->size, batch * sizeof(void *)); in __sheaf_flush_main_batch()
2883 __kmem_cache_free_bulk(s, batch, &objects[0]); in __sheaf_flush_main_batch()
2935 __kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); in sheaf_flush_unused()
2944 void **p = &sheaf->objects[0]; in __rcu_free_sheaf_prepare()
3379 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
3397 page_limit = slab->objects * s->size; in shuffle_freelist()
3405 for (idx = 1; idx < slab->objects; idx++) { in shuffle_freelist()
3495 slab->objects = oo_objects(oo); in allocate_slab()
3520 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { in allocate_slab()
3582 for_each_object(p, s, slab_address(slab), slab->objects) in free_slab()
3594 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
3674 if (slab->inuse == slab->objects) { in alloc_single_from_partial()
3721 if (slab->inuse == slab->objects) in alloc_single_from_new_slab()
3726 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
3769 slab_free = flc.objects - flc.inuse; in get_partial_node_bulk()
4105 return slab->objects - slab->inuse; in count_free()
4200 x += slab->objects - slab->inuse; in count_partial_free_approx()
4210 x += slab->objects - slab->inuse; in count_partial_free_approx()
4215 x += slab->objects - slab->inuse; in count_partial_free_approx()
4294 new.inuse = old.objects; in get_freelist_nofreeze()
4329 needs_add_partial = (slab->objects > count); in alloc_from_new_slab()
4363 inc_slabs_node(s, slab_nid(slab), slab->objects); in alloc_from_new_slab()
4722 object = pcs->main->objects[pcs->main->size - 1]; in alloc_from_pcs()
4809 memcpy(p, main->objects + main->size, batch * sizeof(void *)); in alloc_from_pcs_bulk()
4972 sheaf = kzalloc_flex(*sheaf, objects, size, gfp); in kmem_cache_prefill_sheaf()
4985 &sheaf->objects[0])) { in kmem_cache_prefill_sheaf()
5121 &sheaf->objects[sheaf->size])) { in kmem_cache_refill_sheaf()
5168 ret = sheaf->objects[--sheaf->size]; in kmem_cache_alloc_from_sheaf_noprof()
5451 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); in free_to_partial_list()
5780 pcs->main->objects[pcs->main->size++] = object; in free_to_pcs()
5935 rcu_sheaf->objects[rcu_sheaf->size++] = obj; in __kfree_rcu_sheaf()
6047 memcpy(main->objects + main->size, p, batch * sizeof(void *)); in free_to_pcs_bulk()
6088 struct llist_head objects; member
6095 .objects = LLIST_HEAD_INIT(objects),
6107 struct llist_head *objs = &df->objects; in free_deferred_objects()
6146 if (llist_add(head + s->offset, &df->objects)) in defer_free()
7539 inc_slabs_node(kmem_cache_node, node, slab->objects); in early_kmem_cache_node_alloc()
7645 size = struct_size_t(struct slab_sheaf, objects, capacity); in calculate_sheaf_capacity()
7647 capacity = (size - struct_size_t(struct slab_sheaf, objects, 0)) / sizeof(void *); in calculate_sheaf_capacity()
7829 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
7931 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
8125 int free = slab->objects - slab->inuse; in __kmem_cache_do_shrink()
8133 if (free == slab->objects) { in __kmem_cache_do_shrink()
8137 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
8536 return slab->objects; in count_total()
8557 for_each_object(p, s, addr, slab->objects) { in validate_slab()
8782 for_each_object(p, s, addr, slab->objects) in process_slab()
8956 unsigned int objects; in cpu_partial_store() local
8959 err = kstrtouint(buf, 10, &objects); in cpu_partial_store()
8962 if (objects) in cpu_partial_store()
9058 SLAB_ATTR_RO(objects);