Lines Matching full:class
139 * determined). NOTE: all those class sizes must be set as multiple of
188 * Size of objects stored in this class. Must be multiple
248 unsigned int class:CLASS_BITS + 1; member
486 *class_idx = zspage->class; in get_zspage_mapping()
492 return pool->size_class[zspage->class]; in zspage_class()
499 zspage->class = class_idx; in set_zspage_mapping()
505 * class maintains a list of zspages where each zspage is divided
508 * size class which has chunk size big enough to hold the given size.
521 static inline void class_stat_inc(struct size_class *class, in class_stat_inc() argument
524 class->stats.objs[type] += cnt; in class_stat_inc()
527 static inline void class_stat_dec(struct size_class *class, in class_stat_dec() argument
530 class->stats.objs[type] -= cnt; in class_stat_dec()
533 static inline unsigned long zs_stat_get(struct size_class *class, int type) in zs_stat_get() argument
535 return class->stats.objs[type]; in zs_stat_get()
555 static unsigned long zs_can_compact(struct size_class *class);
561 struct size_class *class; in zs_stats_size_show() local
569 "class", "size", "10%", "20%", "30%", "40%", in zs_stats_size_show()
576 class = pool->size_class[i]; in zs_stats_size_show()
578 if (class->index != i) in zs_stats_size_show()
583 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
585 inuse_totals[fg] += zs_stat_get(class, fg); in zs_stats_size_show()
586 seq_printf(s, "%9lu ", zs_stat_get(class, fg)); in zs_stats_size_show()
589 obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED); in zs_stats_size_show()
590 obj_used = zs_stat_get(class, ZS_OBJS_INUSE); in zs_stats_size_show()
591 freeable = zs_can_compact(class); in zs_stats_size_show()
594 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
596 class->pages_per_zspage; in zs_stats_size_show()
600 class->pages_per_zspage, freeable); in zs_stats_size_show()
660 * For each size class, zspages are divided into different groups
664 static int get_fullness_group(struct size_class *class, struct zspage *zspage) in get_fullness_group() argument
669 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
686 * Each size class maintains various freelists and zspages are assigned
689 * identified by <class, fullness_group>.
691 static void insert_zspage(struct size_class *class, in insert_zspage() argument
695 class_stat_inc(class, fullness, 1); in insert_zspage()
696 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
701 * by <class, fullness_group>.
703 static void remove_zspage(struct size_class *class, in remove_zspage() argument
707 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
710 class_stat_dec(class, fullness, 1); in remove_zspage()
714 * Each size class maintains zspages in different fullness groups depending
722 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) in fix_fullness_group() argument
728 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
732 remove_zspage(class, zspage, currfg); in fix_fullness_group()
733 insert_zspage(class, zspage, newfg); in fix_fullness_group()
848 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
875 class_stat_dec(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
876 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
879 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
895 remove_zspage(class, zspage, ZS_INUSE_RATIO_0); in free_zspage()
896 __free_zspage(pool, class, zspage); in free_zspage()
900 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
916 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
918 link += class->size / sizeof(*link); in init_zspage()
944 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
950 int nr_pages = class->pages_per_zspage; in create_page_chain()
967 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
968 class->pages_per_zspage == 1)) in create_page_chain()
978 * Allocate a zspage for the given size class
981 struct size_class *class, in alloc_zspage() argument
994 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1011 create_page_chain(class, zspage, pages); in alloc_zspage()
1012 init_zspage(class, zspage); in alloc_zspage()
1018 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1024 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1140 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1142 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1163 struct size_class *class; in zs_lookup_class_index() local
1165 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1167 return class->index; in zs_lookup_class_index()
1200 struct size_class *class; in zs_map_object() local
1221 * zs_unmap_object API so delegate the locking from class to zspage in zs_map_object()
1227 class = zspage_class(pool, zspage); in zs_map_object()
1228 off = offset_in_page(class->size * obj_idx); in zs_map_object()
1233 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1245 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1261 struct size_class *class; in zs_unmap_object() local
1267 class = zspage_class(pool, zspage); in zs_unmap_object()
1268 off = offset_in_page(class->size * obj_idx); in zs_unmap_object()
1271 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1280 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1293 * The function returns the size of the first huge class - any object of equal
1313 struct size_class *class; in obj_malloc() local
1319 class = pool->size_class[zspage->class]; in obj_malloc()
1323 offset = obj * class->size; in obj_malloc()
1363 struct size_class *class; in zs_malloc() local
1379 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1383 zspage = find_get_zspage(class); in zs_malloc()
1387 fix_fullness_group(class, zspage); in zs_malloc()
1389 class_stat_inc(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1396 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1404 newfg = get_fullness_group(class, zspage); in zs_malloc()
1405 insert_zspage(class, zspage, newfg); in zs_malloc()
1406 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1408 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1409 class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1410 class_stat_inc(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1453 struct size_class *class; in zs_free() local
1467 class = zspage_class(pool, zspage); in zs_free()
1469 class_stat_dec(class, ZS_OBJS_INUSE, 1); in zs_free()
1470 obj_free(class->size, obj); in zs_free()
1472 fullness = fix_fullness_group(class, zspage); in zs_free()
1474 free_zspage(pool, class, zspage); in zs_free()
1481 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1491 s_size = d_size = class->size; in zs_object_copy()
1496 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1497 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1499 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1502 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1513 if (written == class->size) in zs_object_copy()
1534 s_size = class->size - written; in zs_object_copy()
1542 d_size = class->size - written; in zs_object_copy()
1555 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1564 offset += class->size * index; in find_alloced_obj()
1570 offset += class->size; in find_alloced_obj()
1588 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage() local
1591 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1602 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1605 obj_free(class->size, used_obj); in migrate_zspage()
1608 if (zspage_full(class, dst_zspage)) in migrate_zspage()
1617 static struct zspage *isolate_src_zspage(struct size_class *class) in isolate_src_zspage() argument
1623 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1626 remove_zspage(class, zspage, fg); in isolate_src_zspage()
1634 static struct zspage *isolate_dst_zspage(struct size_class *class) in isolate_dst_zspage() argument
1640 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1643 remove_zspage(class, zspage, fg); in isolate_dst_zspage()
1652 * putback_zspage - add @zspage into right class's fullness list
1653 * @class: destination class
1658 static int putback_zspage(struct size_class *class, struct zspage *zspage) in putback_zspage() argument
1662 fullness = get_fullness_group(class, zspage); in putback_zspage()
1663 insert_zspage(class, zspage, fullness); in putback_zspage()
1664 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1758 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1774 create_page_chain(class, zspage, pages); in replace_sub_page()
1805 struct size_class *class; in zs_page_migrate() local
1833 class = zspage_class(pool, zspage); in zs_page_migrate()
1849 addr += class->size) { in zs_page_migrate()
1861 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
1909 struct size_class *class; in async_free_zspage() local
1918 class = pool->size_class[i]; in async_free_zspage()
1919 if (class->index != i) in async_free_zspage()
1923 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1934 class = pool->size_class[class_idx]; in async_free_zspage()
1936 __free_zspage(pool, class, zspage); in async_free_zspage()
1975 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
1978 unsigned long obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED); in zs_can_compact()
1979 unsigned long obj_used = zs_stat_get(class, ZS_OBJS_INUSE); in zs_can_compact()
1985 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1987 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1991 struct size_class *class) in __zs_compact() argument
2002 while (zs_can_compact(class)) { in __zs_compact()
2006 dst_zspage = isolate_dst_zspage(class); in __zs_compact()
2012 src_zspage = isolate_src_zspage(class); in __zs_compact()
2019 fg = putback_zspage(class, src_zspage); in __zs_compact()
2023 free_zspage(pool, class, src_zspage); in __zs_compact()
2024 pages_freed += class->pages_per_zspage; in __zs_compact()
2028 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 in __zs_compact()
2030 putback_zspage(class, dst_zspage); in __zs_compact()
2041 putback_zspage(class, src_zspage); in __zs_compact()
2046 putback_zspage(class, dst_zspage); in __zs_compact()
2057 struct size_class *class; in zs_compact() local
2070 class = pool->size_class[i]; in zs_compact()
2071 if (class->index != i) in zs_compact()
2073 pages_freed += __zs_compact(pool, class); in zs_compact()
2108 struct size_class *class; in zs_shrinker_count() local
2113 class = pool->size_class[i]; in zs_shrinker_count()
2114 if (class->index != i) in zs_shrinker_count()
2117 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2204 struct size_class *class; in zs_create_pool() local
2216 * class. Any object bigger than or equal to that will in zs_create_pool()
2217 * endup in the huge class. in zs_create_pool()
2226 * size class search - so object may be smaller than in zs_create_pool()
2227 * huge class size, yet it still can end up in the huge in zs_create_pool()
2228 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2229 * right before class lookup. in zs_create_pool()
2250 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2251 if (!class) in zs_create_pool()
2254 class->size = size; in zs_create_pool()
2255 class->index = i; in zs_create_pool()
2256 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2257 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2258 pool->size_class[i] = class; in zs_create_pool()
2262 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2266 prev_class = class; in zs_create_pool()
2298 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2300 if (!class) in zs_destroy_pool()
2303 if (class->index != i) in zs_destroy_pool()
2307 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2310 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2311 class->size, fg); in zs_destroy_pool()
2313 kfree(class); in zs_destroy_pool()