Lines Matching +full:page +full:- +full:size

1 /* SPDX-License-Identifier: GPL-2.0 */
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */ member
26 unsigned int usersize; /* Usercopy region size */
44 #include <linux/fault-inject.h>
61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
80 unsigned int size; member
88 /* Find the kmalloc slab corresponding for a certain size */
97 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
101 unsigned int size, slab_flags_t flags,
105 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
109 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, in __kmem_cache_alias() argument
209 return (s->flags & SLAB_RECLAIM_ACCOUNT) ? in cache_vmstat_idx()
236 return s->flags & flags; in kmem_cache_debug_flags()
242 static inline struct obj_cgroup **page_obj_cgroups(struct page *page) in page_obj_cgroups() argument
245 * page->mem_cgroup and page->obj_cgroups are sharing the same in page_obj_cgroups()
247 * that the page is a slab page (e.g. page_cgroup_ino()), let's in page_obj_cgroups()
251 ((unsigned long)page->obj_cgroups & ~0x1UL); in page_obj_cgroups()
254 static inline bool page_has_obj_cgroups(struct page *page) in page_has_obj_cgroups() argument
256 return ((unsigned long)page->obj_cgroups & 0x1UL); in page_has_obj_cgroups()
259 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
262 static inline void memcg_free_page_obj_cgroups(struct page *page) in memcg_free_page_obj_cgroups() argument
264 kfree(page_obj_cgroups(page)); in memcg_free_page_obj_cgroups()
265 page->obj_cgroups = NULL; in memcg_free_page_obj_cgroups()
274 return s->size + sizeof(struct obj_cgroup *); in obj_full_size()
289 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) in memcg_slab_pre_alloc_hook()
321 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook() argument
324 struct page *page; in memcg_slab_post_alloc_hook() local
332 for (i = 0; i < size; i++) { in memcg_slab_post_alloc_hook()
334 page = virt_to_head_page(p[i]); in memcg_slab_post_alloc_hook()
336 if (!page_has_obj_cgroups(page) && in memcg_slab_post_alloc_hook()
337 memcg_alloc_page_obj_cgroups(page, s, flags)) { in memcg_slab_post_alloc_hook()
342 off = obj_to_index(s, page, p[i]); in memcg_slab_post_alloc_hook()
344 page_obj_cgroups(page)[off] = objcg; in memcg_slab_post_alloc_hook() local
345 mod_objcg_state(objcg, page_pgdat(page), in memcg_slab_post_alloc_hook()
359 struct page *page; in memcg_slab_free_hook() local
370 page = virt_to_head_page(p[i]); in memcg_slab_free_hook()
371 if (!page_has_obj_cgroups(page)) in memcg_slab_free_hook()
375 s = page->slab_cache; in memcg_slab_free_hook()
379 off = obj_to_index(s, page, p[i]); in memcg_slab_free_hook()
380 objcg = page_obj_cgroups(page)[off]; in memcg_slab_free_hook()
384 page_obj_cgroups(page)[off] = NULL; in memcg_slab_free_hook() local
386 mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), in memcg_slab_free_hook()
387 -obj_full_size(s)); in memcg_slab_free_hook()
393 static inline bool page_has_obj_cgroups(struct page *page) in page_has_obj_cgroups() argument
403 static inline int memcg_alloc_page_obj_cgroups(struct page *page, in memcg_alloc_page_obj_cgroups() argument
409 static inline void memcg_free_page_obj_cgroups(struct page *page) in memcg_free_page_obj_cgroups() argument
422 gfp_t flags, size_t size, in memcg_slab_post_alloc_hook() argument
435 struct page *page; in virt_to_cache() local
437 page = virt_to_head_page(obj); in virt_to_cache()
438 if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", in virt_to_cache()
441 return page->slab_cache; in virt_to_cache()
444 static __always_inline void account_slab_page(struct page *page, int order, in account_slab_page() argument
447 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), in account_slab_page()
451 static __always_inline void unaccount_slab_page(struct page *page, int order, in unaccount_slab_page() argument
455 memcg_free_page_obj_cgroups(page); in unaccount_slab_page()
457 mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), in unaccount_slab_page()
458 -(PAGE_SIZE << order)); in unaccount_slab_page()
472 __func__, s->name, cachep->name)) in cache_from_obj()
480 return s->object_size; in slab_ksize()
488 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) in slab_ksize()
489 return s->object_size; in slab_ksize()
491 if (s->flags & SLAB_KASAN) in slab_ksize()
492 return s->object_size; in slab_ksize()
498 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) in slab_ksize()
499 return s->inuse; in slab_ksize()
503 return s->size; in slab_ksize()
509 size_t size, gfp_t flags) in slab_pre_alloc_hook() argument
521 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) in slab_pre_alloc_hook()
529 gfp_t flags, size_t size, void **p) in slab_post_alloc_hook() argument
534 for (i = 0; i < size; i++) { in slab_post_alloc_hook()
537 kmemleak_alloc_recursive(p[i], s->object_size, 1, in slab_post_alloc_hook()
538 s->flags, flags); in slab_post_alloc_hook()
541 memcg_slab_post_alloc_hook(s, objcg, flags, size, p); in slab_post_alloc_hook()
559 unsigned int colour_next; /* Per-node cache coloring */
580 return s->node[node]; in get_node()
624 if (c->ctor) in slab_want_init_on_alloc()
626 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) in slab_want_init_on_alloc()
636 return !(c->ctor || in slab_want_init_on_free()
637 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); in slab_want_init_on_free()