Lines Matching full:pool
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
56 * be 63, or 62, respectively, freelists per pool.
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
93 unsigned long pool; /* back link + flags */ member
102 * pool
107 * @pool: pointer to the containing pool
121 struct z3fold_pool *pool; member
133 * struct z3fold_pool - stores metadata for each z3fold pool
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
143 * @pages_nr: number of z3fold pages in the pool.
146 * pool creation time.
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument
215 slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots()
221 slots->pool = (unsigned long)pool; in alloc_slots()
230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
331 if (!test_bit(HANDLES_ORPHANED, &slots->pool)) { in free_handle()
344 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local
346 kmem_cache_free(pool->c_handle, slots); in free_handle()
379 static int z3fold_register_migration(struct z3fold_pool *pool) in z3fold_register_migration() argument
381 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); in z3fold_register_migration()
382 if (IS_ERR(pool->inode)) { in z3fold_register_migration()
383 pool->inode = NULL; in z3fold_register_migration()
387 pool->inode->i_mapping->private_data = pool; in z3fold_register_migration()
388 pool->inode->i_mapping->a_ops = &z3fold_aops; in z3fold_register_migration()
392 static void z3fold_unregister_migration(struct z3fold_pool *pool) in z3fold_unregister_migration() argument
394 if (pool->inode) in z3fold_unregister_migration()
395 iput(pool->inode); in z3fold_unregister_migration()
400 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument
414 slots = alloc_slots(pool, gfp); in init_z3fold_page()
429 zhdr->pool = pool; in init_z3fold_page()
455 * Pool lock should be held as this function accesses first_num
521 return zhdr->pool; in zhdr_to_pool()
527 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page() local
534 spin_lock(&pool->lock); in __release_z3fold_page()
537 spin_unlock(&pool->lock); in __release_z3fold_page()
548 set_bit(HANDLES_ORPHANED, &zhdr->slots->pool); in __release_z3fold_page()
552 kmem_cache_free(pool->c_handle, zhdr->slots); in __release_z3fold_page()
557 spin_lock(&pool->stale_lock); in __release_z3fold_page()
558 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
559 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page()
560 spin_unlock(&pool->stale_lock); in __release_z3fold_page()
583 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list() local
585 spin_lock(&pool->lock); in release_z3fold_page_locked_list()
587 spin_unlock(&pool->lock); in release_z3fold_page_locked_list()
595 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); in free_pages_work() local
597 spin_lock(&pool->stale_lock); in free_pages_work()
598 while (!list_empty(&pool->stale)) { in free_pages_work()
599 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work()
606 spin_unlock(&pool->stale_lock); in free_pages_work()
610 spin_lock(&pool->stale_lock); in free_pages_work()
612 spin_unlock(&pool->stale_lock); in free_pages_work()
640 static inline void add_to_unbuddied(struct z3fold_pool *pool, in add_to_unbuddied() argument
645 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
648 spin_lock(&pool->lock); in add_to_unbuddied()
650 spin_unlock(&pool->lock); in add_to_unbuddied()
652 put_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
674 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy() local
710 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy()
757 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
768 atomic64_dec(&pool->pages_nr); in compact_single_buddy()
770 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
829 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page() local
841 spin_lock(&pool->lock); in do_compact_page()
843 spin_unlock(&pool->lock); in do_compact_page()
846 atomic64_dec(&pool->pages_nr); in do_compact_page()
860 atomic64_dec(&pool->pages_nr); in do_compact_page()
867 add_to_unbuddied(pool, zhdr); in do_compact_page()
880 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, in __z3fold_alloc() argument
890 unbuddied = get_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
901 spin_lock(&pool->lock); in __z3fold_alloc()
906 spin_unlock(&pool->lock); in __z3fold_alloc()
908 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
915 spin_unlock(&pool->lock); in __z3fold_alloc()
922 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
930 * list while pool lock was held, and then we've taken in __z3fold_alloc()
937 put_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
946 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); in __z3fold_alloc()
947 spin_lock(&pool->lock); in __z3fold_alloc()
954 spin_unlock(&pool->lock); in __z3fold_alloc()
960 spin_unlock(&pool->lock); in __z3fold_alloc()
984 * z3fold_create_pool() - create a new z3fold pool
985 * @name: pool name
986 * @gfp: gfp flags when allocating the z3fold pool structure
987 * @ops: user-defined operations for the z3fold pool
989 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
995 struct z3fold_pool *pool = NULL; in z3fold_create_pool() local
998 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool()
999 if (!pool) in z3fold_create_pool()
1001 pool->c_handle = kmem_cache_create("z3fold_handle", in z3fold_create_pool()
1004 if (!pool->c_handle) in z3fold_create_pool()
1006 spin_lock_init(&pool->lock); in z3fold_create_pool()
1007 spin_lock_init(&pool->stale_lock); in z3fold_create_pool()
1008 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); in z3fold_create_pool()
1009 if (!pool->unbuddied) in z3fold_create_pool()
1013 per_cpu_ptr(pool->unbuddied, cpu); in z3fold_create_pool()
1017 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool()
1018 INIT_LIST_HEAD(&pool->stale); in z3fold_create_pool()
1019 atomic64_set(&pool->pages_nr, 0); in z3fold_create_pool()
1020 pool->name = name; in z3fold_create_pool()
1021 pool->compact_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1022 if (!pool->compact_wq) in z3fold_create_pool()
1024 pool->release_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1025 if (!pool->release_wq) in z3fold_create_pool()
1027 if (z3fold_register_migration(pool)) in z3fold_create_pool()
1029 INIT_WORK(&pool->work, free_pages_work); in z3fold_create_pool()
1030 pool->ops = ops; in z3fold_create_pool()
1031 return pool; in z3fold_create_pool()
1034 destroy_workqueue(pool->release_wq); in z3fold_create_pool()
1036 destroy_workqueue(pool->compact_wq); in z3fold_create_pool()
1038 free_percpu(pool->unbuddied); in z3fold_create_pool()
1040 kmem_cache_destroy(pool->c_handle); in z3fold_create_pool()
1042 kfree(pool); in z3fold_create_pool()
1048 * z3fold_destroy_pool() - destroys an existing z3fold pool
1049 * @pool: the z3fold pool to be destroyed
1051 * The pool should be emptied before this function is called.
1053 static void z3fold_destroy_pool(struct z3fold_pool *pool) in z3fold_destroy_pool() argument
1055 kmem_cache_destroy(pool->c_handle); in z3fold_destroy_pool()
1058 * We need to destroy pool->compact_wq before pool->release_wq, in z3fold_destroy_pool()
1059 * as any pending work on pool->compact_wq will call in z3fold_destroy_pool()
1060 * queue_work(pool->release_wq, &pool->work). in z3fold_destroy_pool()
1066 destroy_workqueue(pool->compact_wq); in z3fold_destroy_pool()
1067 destroy_workqueue(pool->release_wq); in z3fold_destroy_pool()
1068 z3fold_unregister_migration(pool); in z3fold_destroy_pool()
1069 kfree(pool); in z3fold_destroy_pool()
1074 * @pool: z3fold pool from which to allocate
1076 * @gfp: gfp flags used if the pool needs to grow
1079 * This function will attempt to find a free region in the pool large enough to
1082 * allocated and added to the pool to satisfy the request.
1085 * as z3fold pool pages.
1088 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1091 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument
1110 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1125 atomic64_dec(&pool->pages_nr); in z3fold_alloc()
1140 spin_lock(&pool->stale_lock); in z3fold_alloc()
1141 zhdr = list_first_entry_or_null(&pool->stale, in z3fold_alloc()
1150 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1154 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1163 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1168 atomic64_inc(&pool->pages_nr); in z3fold_alloc()
1176 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1180 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1195 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1198 spin_lock(&pool->lock); in z3fold_alloc()
1203 list_add(&page->lru, &pool->lru); in z3fold_alloc()
1206 spin_unlock(&pool->lock); in z3fold_alloc()
1215 * @pool: pool in which the allocation resided
1223 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) in z3fold_free() argument
1241 spin_lock(&pool->lock); in z3fold_free()
1243 spin_unlock(&pool->lock); in z3fold_free()
1246 atomic64_dec(&pool->pages_nr); in z3fold_free()
1275 atomic64_dec(&pool->pages_nr); in z3fold_free()
1290 spin_lock(&pool->lock); in z3fold_free()
1292 spin_unlock(&pool->lock); in z3fold_free()
1301 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1306 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1307 * @pool: pool from which a page will attempt to be evicted
1320 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1321 * call the user-defined eviction handler with the pool and handle as
1341 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) in z3fold_reclaim_page() argument
1349 spin_lock(&pool->lock); in z3fold_reclaim_page()
1350 if (!pool->ops || !pool->ops->evict || retries == 0) { in z3fold_reclaim_page()
1351 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1355 if (list_empty(&pool->lru)) { in z3fold_reclaim_page()
1356 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1359 list_for_each_prev(pos, &pool->lru) { in z3fold_reclaim_page()
1363 * we pass over to the next page in the pool. in z3fold_reclaim_page()
1400 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1429 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page()
1435 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page()
1441 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
1450 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1453 spin_lock(&pool->lock); in z3fold_reclaim_page()
1454 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1455 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1461 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1466 * free. Take the global pool lock then to be able in z3fold_reclaim_page()
1469 spin_lock(&pool->lock); in z3fold_reclaim_page()
1470 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1471 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1476 /* We started off locked to we need to lock the pool back */ in z3fold_reclaim_page()
1477 spin_lock(&pool->lock); in z3fold_reclaim_page()
1479 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1485 * @pool: pool in which the allocation resides
1493 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) in z3fold_map() argument
1535 * @pool: pool in which the allocation resides
1538 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) in z3fold_unmap() argument
1558 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1559 * @pool: pool whose size is being queried
1561 * Returns: size in pages of the given pool.
1563 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) in z3fold_get_pool_size() argument
1565 return atomic64_read(&pool->pages_nr); in z3fold_get_pool_size()
1571 struct z3fold_pool *pool; in z3fold_page_isolate() local
1589 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1590 spin_lock(&pool->lock); in z3fold_page_isolate()
1595 spin_unlock(&pool->lock); in z3fold_page_isolate()
1610 struct z3fold_pool *pool; in z3fold_page_migrate() local
1618 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1657 spin_lock(&pool->lock); in z3fold_page_migrate()
1658 list_add(&newpage->lru, &pool->lru); in z3fold_page_migrate()
1659 spin_unlock(&pool->lock); in z3fold_page_migrate()
1663 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); in z3fold_page_migrate()
1673 struct z3fold_pool *pool; in z3fold_page_putback() local
1676 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1683 atomic64_dec(&pool->pages_nr); in z3fold_page_putback()
1686 spin_lock(&pool->lock); in z3fold_page_putback()
1687 list_add(&page->lru, &pool->lru); in z3fold_page_putback()
1688 spin_unlock(&pool->lock); in z3fold_page_putback()
1702 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) in z3fold_zpool_evict() argument
1704 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in z3fold_zpool_evict()
1705 return pool->zpool_ops->evict(pool->zpool, handle); in z3fold_zpool_evict()
1718 struct z3fold_pool *pool; in z3fold_zpool_create() local
1720 pool = z3fold_create_pool(name, gfp, in z3fold_zpool_create()
1722 if (pool) { in z3fold_zpool_create()
1723 pool->zpool = zpool; in z3fold_zpool_create()
1724 pool->zpool_ops = zpool_ops; in z3fold_zpool_create()
1726 return pool; in z3fold_zpool_create()
1729 static void z3fold_zpool_destroy(void *pool) in z3fold_zpool_destroy() argument
1731 z3fold_destroy_pool(pool); in z3fold_zpool_destroy()
1734 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc() argument
1737 return z3fold_alloc(pool, size, gfp, handle); in z3fold_zpool_malloc()
1739 static void z3fold_zpool_free(void *pool, unsigned long handle) in z3fold_zpool_free() argument
1741 z3fold_free(pool, handle); in z3fold_zpool_free()
1744 static int z3fold_zpool_shrink(void *pool, unsigned int pages, in z3fold_zpool_shrink() argument
1751 ret = z3fold_reclaim_page(pool, 8); in z3fold_zpool_shrink()
1763 static void *z3fold_zpool_map(void *pool, unsigned long handle, in z3fold_zpool_map() argument
1766 return z3fold_map(pool, handle); in z3fold_zpool_map()
1768 static void z3fold_zpool_unmap(void *pool, unsigned long handle) in z3fold_zpool_unmap() argument
1770 z3fold_unmap(pool, handle); in z3fold_zpool_unmap()
1773 static u64 z3fold_zpool_total_size(void *pool) in z3fold_zpool_total_size() argument
1775 return z3fold_get_pool_size(pool) * PAGE_SIZE; in z3fold_zpool_total_size()