Lines Matching full:class
22 * class->lock
124 * determined). NOTE: all those class sizes must be set as multiple of
174 * Size of objects stored in this class. Must be multiple
273 unsigned int class:CLASS_BITS + 1; member
439 /* class->lock(which owns the handle) synchronizes races */
522 /* Protected by class->lock */
572 return pool->size_class[zspage->class]; in zspage_class()
577 * class maintains a list of zspages where each zspage is divided
580 * size class which has chunk size big enough to hold the given size.
593 static inline void class_stat_add(struct size_class *class, int type, in class_stat_add() argument
596 class->stats.objs[type] += cnt; in class_stat_add()
599 static inline void class_stat_sub(struct size_class *class, int type, in class_stat_sub() argument
602 class->stats.objs[type] -= cnt; in class_stat_sub()
605 static inline unsigned long class_stat_read(struct size_class *class, int type) in class_stat_read() argument
607 return class->stats.objs[type]; in class_stat_read()
627 static unsigned long zs_can_compact(struct size_class *class);
633 struct size_class *class; in zs_stats_size_show() local
641 "class", "size", "10%", "20%", "30%", "40%", in zs_stats_size_show()
648 class = pool->size_class[i]; in zs_stats_size_show()
650 if (class->index != i) in zs_stats_size_show()
653 spin_lock(&class->lock); in zs_stats_size_show()
655 seq_printf(s, " %5u %5u ", i, class->size); in zs_stats_size_show()
657 inuse_totals[fg] += class_stat_read(class, fg); in zs_stats_size_show()
658 seq_printf(s, "%9lu ", class_stat_read(class, fg)); in zs_stats_size_show()
661 obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_stats_size_show()
662 obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_stats_size_show()
663 freeable = zs_can_compact(class); in zs_stats_size_show()
664 spin_unlock(&class->lock); in zs_stats_size_show()
666 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
668 class->pages_per_zspage; in zs_stats_size_show()
672 class->pages_per_zspage, freeable); in zs_stats_size_show()
732 * For each size class, zspages are divided into different groups
736 static int get_fullness_group(struct size_class *class, struct zspage *zspage) in get_fullness_group() argument
741 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
758 * Each size class maintains various freelists and zspages are assigned
761 * identified by <class, fullness_group>.
763 static void insert_zspage(struct size_class *class, in insert_zspage() argument
767 class_stat_add(class, fullness, 1); in insert_zspage()
768 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
774 * by <class, fullness_group>.
776 static void remove_zspage(struct size_class *class, struct zspage *zspage) in remove_zspage() argument
780 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
783 class_stat_sub(class, fullness, 1); in remove_zspage()
787 * Each size class maintains zspages in different fullness groups depending
795 static int fix_fullness_group(struct size_class *class, struct zspage *zspage) in fix_fullness_group() argument
799 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
803 remove_zspage(class, zspage); in fix_fullness_group()
804 insert_zspage(class, zspage, newfg); in fix_fullness_group()
917 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
922 assert_spin_locked(&class->lock); in __free_zspage()
940 class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in __free_zspage()
941 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); in __free_zspage()
944 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
960 remove_zspage(class, zspage); in free_zspage()
961 __free_zspage(pool, class, zspage); in free_zspage()
965 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
981 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
983 link += class->size / sizeof(*link); in init_zspage()
1009 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1015 int nr_zpdescs = class->pages_per_zspage; in create_page_chain()
1032 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1033 class->pages_per_zspage == 1)) in create_page_chain()
1043 * Allocate a zspage for the given size class
1046 struct size_class *class, in alloc_zspage() argument
1058 zspage->class = class->index; in alloc_zspage()
1061 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1080 create_page_chain(class, zspage, zpdescs); in alloc_zspage()
1081 init_zspage(class, zspage); in alloc_zspage()
1086 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1092 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1111 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1113 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1134 struct size_class *class; in zs_lookup_class_index() local
1136 class = pool->size_class[get_size_class_index(size)]; in zs_lookup_class_index()
1138 return class->index; in zs_lookup_class_index()
1155 struct size_class *class; in zs_obj_read_begin() local
1168 class = zspage_class(pool, zspage); in zs_obj_read_begin()
1169 off = offset_in_page(class->size * obj_idx); in zs_obj_read_begin()
1171 if (off + class->size <= PAGE_SIZE) { in zs_obj_read_begin()
1180 sizes[1] = class->size - sizes[0]; in zs_obj_read_begin()
1205 struct size_class *class; in zs_obj_read_end() local
1210 class = zspage_class(pool, zspage); in zs_obj_read_end()
1211 off = offset_in_page(class->size * obj_idx); in zs_obj_read_end()
1213 if (off + class->size <= PAGE_SIZE) { in zs_obj_read_end()
1231 struct size_class *class; in zs_obj_write() local
1243 class = zspage_class(pool, zspage); in zs_obj_write()
1244 off = offset_in_page(class->size * obj_idx); in zs_obj_write()
1278 * The function returns the size of the first huge class - any object of equal
1298 struct size_class *class; in obj_malloc() local
1304 class = pool->size_class[zspage->class]; in obj_malloc()
1307 offset = obj * class->size; in obj_malloc()
1347 struct size_class *class; in zs_malloc() local
1363 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1365 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1366 spin_lock(&class->lock); in zs_malloc()
1367 zspage = find_get_zspage(class); in zs_malloc()
1371 fix_fullness_group(class, zspage); in zs_malloc()
1372 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1377 spin_unlock(&class->lock); in zs_malloc()
1379 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1385 spin_lock(&class->lock); in zs_malloc()
1387 newfg = get_fullness_group(class, zspage); in zs_malloc()
1388 insert_zspage(class, zspage, newfg); in zs_malloc()
1389 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); in zs_malloc()
1390 class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1391 class_stat_add(class, ZS_OBJS_INUSE, 1); in zs_malloc()
1396 spin_unlock(&class->lock); in zs_malloc()
1435 struct size_class *class; in zs_free() local
1449 class = zspage_class(pool, zspage); in zs_free()
1450 spin_lock(&class->lock); in zs_free()
1453 class_stat_sub(class, ZS_OBJS_INUSE, 1); in zs_free()
1454 obj_free(class->size, obj); in zs_free()
1456 fullness = fix_fullness_group(class, zspage); in zs_free()
1458 free_zspage(pool, class, zspage); in zs_free()
1460 spin_unlock(&class->lock); in zs_free()
1465 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1475 s_size = d_size = class->size; in zs_object_copy()
1480 s_off = offset_in_page(class->size * s_objidx); in zs_object_copy()
1481 d_off = offset_in_page(class->size * d_objidx); in zs_object_copy()
1483 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1486 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1497 if (written == class->size) in zs_object_copy()
1518 s_size = class->size - written; in zs_object_copy()
1526 d_size = class->size - written; in zs_object_copy()
1539 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1548 offset += class->size * index; in find_alloced_obj()
1554 offset += class->size; in find_alloced_obj()
1572 struct size_class *class = pool->size_class[src_zspage->class]; in migrate_zspage() local
1575 handle = find_alloced_obj(class, s_zpdesc, &obj_idx); in migrate_zspage()
1586 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1588 obj_free(class->size, used_obj); in migrate_zspage()
1591 if (zspage_full(class, dst_zspage)) in migrate_zspage()
1600 static struct zspage *isolate_src_zspage(struct size_class *class) in isolate_src_zspage() argument
1606 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_src_zspage()
1609 remove_zspage(class, zspage); in isolate_src_zspage()
1617 static struct zspage *isolate_dst_zspage(struct size_class *class) in isolate_dst_zspage() argument
1623 zspage = list_first_entry_or_null(&class->fullness_list[fg], in isolate_dst_zspage()
1626 remove_zspage(class, zspage); in isolate_dst_zspage()
1635 * putback_zspage - add @zspage into right class's fullness list
1636 * @class: destination class
1641 static int putback_zspage(struct size_class *class, struct zspage *zspage) in putback_zspage() argument
1645 fullness = get_fullness_group(class, zspage); in putback_zspage()
1646 insert_zspage(class, zspage, fullness); in putback_zspage()
1699 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1716 create_page_chain(class, zspage, zpdescs); in replace_sub_page()
1739 struct size_class *class; in zs_page_migrate() local
1761 class = zspage_class(pool, zspage); in zs_page_migrate()
1764 * the class lock protects zpage alloc/free in the zspage. in zs_page_migrate()
1766 spin_lock(&class->lock); in zs_page_migrate()
1769 spin_unlock(&class->lock); in zs_page_migrate()
1788 addr += class->size) { in zs_page_migrate()
1799 replace_sub_page(class, zspage, newzpdesc, zpdesc); in zs_page_migrate()
1805 spin_unlock(&class->lock); in zs_page_migrate()
1838 struct size_class *class; in async_free_zspage() local
1845 class = pool->size_class[i]; in async_free_zspage()
1846 if (class->index != i) in async_free_zspage()
1849 spin_lock(&class->lock); in async_free_zspage()
1850 list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0], in async_free_zspage()
1852 spin_unlock(&class->lock); in async_free_zspage()
1859 class = zspage_class(pool, zspage); in async_free_zspage()
1860 spin_lock(&class->lock); in async_free_zspage()
1861 class_stat_sub(class, ZS_INUSE_RATIO_0, 1); in async_free_zspage()
1862 __free_zspage(pool, class, zspage); in async_free_zspage()
1863 spin_unlock(&class->lock); in async_free_zspage()
1901 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
1904 unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED); in zs_can_compact()
1905 unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE); in zs_can_compact()
1911 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
1913 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
1917 struct size_class *class) in __zs_compact() argument
1928 spin_lock(&class->lock); in __zs_compact()
1929 while (zs_can_compact(class)) { in __zs_compact()
1933 dst_zspage = isolate_dst_zspage(class); in __zs_compact()
1938 src_zspage = isolate_src_zspage(class); in __zs_compact()
1948 fg = putback_zspage(class, src_zspage); in __zs_compact()
1950 free_zspage(pool, class, src_zspage); in __zs_compact()
1951 pages_freed += class->pages_per_zspage; in __zs_compact()
1955 if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 in __zs_compact()
1957 putback_zspage(class, dst_zspage); in __zs_compact()
1960 spin_unlock(&class->lock); in __zs_compact()
1964 spin_lock(&class->lock); in __zs_compact()
1969 putback_zspage(class, src_zspage); in __zs_compact()
1972 putback_zspage(class, dst_zspage); in __zs_compact()
1974 spin_unlock(&class->lock); in __zs_compact()
1983 struct size_class *class; in zs_compact() local
1996 class = pool->size_class[i]; in zs_compact()
1997 if (class->index != i) in zs_compact()
1999 pages_freed += __zs_compact(pool, class); in zs_compact()
2034 struct size_class *class; in zs_shrinker_count() local
2039 class = pool->size_class[i]; in zs_shrinker_count()
2040 if (class->index != i) in zs_shrinker_count()
2043 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2130 struct size_class *class; in zs_create_pool() local
2142 * class. Any object bigger than or equal to that will in zs_create_pool()
2143 * endup in the huge class. in zs_create_pool()
2152 * size class search - so object may be smaller than in zs_create_pool()
2153 * huge class size, yet it still can end up in the huge in zs_create_pool()
2154 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2155 * right before class lookup. in zs_create_pool()
2176 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2177 if (!class) in zs_create_pool()
2180 class->size = size; in zs_create_pool()
2181 class->index = i; in zs_create_pool()
2182 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2183 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2184 spin_lock_init(&class->lock); in zs_create_pool()
2185 pool->size_class[i] = class; in zs_create_pool()
2189 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2193 prev_class = class; in zs_create_pool()
2225 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2227 if (!class) in zs_destroy_pool()
2230 if (class->index != i) in zs_destroy_pool()
2234 if (list_empty(&class->fullness_list[fg])) in zs_destroy_pool()
2237 pr_err("Class-%d fullness group %d is not empty\n", in zs_destroy_pool()
2238 class->size, fg); in zs_destroy_pool()
2240 kfree(class); in zs_destroy_pool()