Lines Matching full:pool

35 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)  argument
37 #define recycle_stat_inc(pool, __stat) \ argument
39 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
43 #define recycle_stat_add(pool, __stat, val) \ argument
45 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
64 * page_pool_get_stats() - fetch page pool stats
65 * @pool: pool from which page was allocated
74 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
83 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
84 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
85 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats()
86 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats()
87 stats->alloc_stats.refill += pool->alloc_stats.refill; in page_pool_get_stats()
88 stats->alloc_stats.waive += pool->alloc_stats.waive; in page_pool_get_stats()
92 per_cpu_ptr(pool->recycle_stats, cpu); in page_pool_get_stats()
145 #define alloc_stat_inc(pool, __stat) argument
146 #define recycle_stat_inc(pool, __stat) argument
147 #define recycle_stat_add(pool, __stat, val) argument
150 static bool page_pool_producer_lock(struct page_pool *pool) in page_pool_producer_lock() argument
151 __acquires(&pool->ring.producer_lock) in page_pool_producer_lock()
156 spin_lock(&pool->ring.producer_lock); in page_pool_producer_lock()
158 spin_lock_bh(&pool->ring.producer_lock); in page_pool_producer_lock()
163 static void page_pool_producer_unlock(struct page_pool *pool, in page_pool_producer_unlock() argument
165 __releases(&pool->ring.producer_lock) in page_pool_producer_unlock()
168 spin_unlock(&pool->ring.producer_lock); in page_pool_producer_unlock()
170 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_producer_unlock()
173 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
178 memcpy(&pool->p, &params->fast, sizeof(pool->p)); in page_pool_init()
179 memcpy(&pool->slow, &params->slow, sizeof(pool->slow)); in page_pool_init()
182 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
185 if (pool->p.pool_size) in page_pool_init()
186 ring_qsize = pool->p.pool_size; in page_pool_init()
196 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
197 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
198 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
202 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
206 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
209 if (!pool->p.max_len) in page_pool_init()
212 /* pool->p.offset has to be set according to the address in page_pool_init()
217 pool->has_init_callback = !!pool->slow.init_callback; in page_pool_init()
220 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); in page_pool_init()
221 if (!pool->recycle_stats) in page_pool_init()
225 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { in page_pool_init()
227 free_percpu(pool->recycle_stats); in page_pool_init()
232 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
235 refcount_set(&pool->user_cnt, 1); in page_pool_init()
237 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_init()
238 get_device(pool->p.dev); in page_pool_init()
243 static void page_pool_uninit(struct page_pool *pool) in page_pool_uninit() argument
245 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_uninit()
247 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_uninit()
248 put_device(pool->p.dev); in page_pool_uninit()
251 free_percpu(pool->recycle_stats); in page_pool_uninit()
256 * page_pool_create() - create a page pool.
261 struct page_pool *pool; in page_pool_create() local
264 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create()
265 if (!pool) in page_pool_create()
268 err = page_pool_init(pool, params); in page_pool_create()
272 err = page_pool_list(pool); in page_pool_create()
276 return pool; in page_pool_create()
279 page_pool_uninit(pool); in page_pool_create()
282 kfree(pool); in page_pool_create()
287 static void page_pool_return_page(struct page_pool *pool, struct page *page);
290 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
292 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
298 alloc_stat_inc(pool, empty); in page_pool_refill_alloc_cache()
306 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
308 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ in page_pool_refill_alloc_cache()
319 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
326 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
327 alloc_stat_inc(pool, waive); in page_pool_refill_alloc_cache()
331 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
334 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache()
335 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
336 alloc_stat_inc(pool, refill); in page_pool_refill_alloc_cache()
343 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
348 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
350 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
351 alloc_stat_inc(pool, fast); in __page_pool_get_cached()
353 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
359 static void page_pool_dma_sync_for_device(struct page_pool *pool, in page_pool_dma_sync_for_device() argument
365 dma_sync_size = min(dma_sync_size, pool->p.max_len); in page_pool_dma_sync_for_device()
366 dma_sync_single_range_for_device(pool->p.dev, dma_addr, in page_pool_dma_sync_for_device()
367 pool->p.offset, dma_sync_size, in page_pool_dma_sync_for_device()
368 pool->p.dma_dir); in page_pool_dma_sync_for_device()
371 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
378 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
380 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
381 (PAGE_SIZE << pool->p.order), in page_pool_dma_map()
382 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | in page_pool_dma_map()
384 if (dma_mapping_error(pool->p.dev, dma)) in page_pool_dma_map()
390 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_dma_map()
391 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
397 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_dma_map()
398 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_dma_map()
403 static void page_pool_set_pp_info(struct page_pool *pool, in page_pool_set_pp_info() argument
406 page->pp = pool; in page_pool_set_pp_info()
416 if (pool->has_init_callback) in page_pool_set_pp_info()
417 pool->slow.init_callback(page, pool->slow.init_arg); in page_pool_set_pp_info()
426 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order() argument
432 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
436 if ((pool->p.flags & PP_FLAG_DMA_MAP) && in __page_pool_alloc_page_order()
437 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
442 alloc_stat_inc(pool, slow_high_order); in __page_pool_alloc_page_order()
443 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
446 pool->pages_state_hold_cnt++; in __page_pool_alloc_page_order()
447 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
453 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() argument
457 unsigned int pp_flags = pool->p.flags; in __page_pool_alloc_pages_slow()
458 unsigned int pp_order = pool->p.order; in __page_pool_alloc_pages_slow()
464 return __page_pool_alloc_page_order(pool, gfp); in __page_pool_alloc_pages_slow()
467 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_pages_slow()
468 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
471 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_pages_slow()
473 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, in __page_pool_alloc_pages_slow()
474 pool->alloc.cache); in __page_pool_alloc_pages_slow()
482 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
484 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
489 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
490 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
492 pool->pages_state_hold_cnt++; in __page_pool_alloc_pages_slow()
493 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
494 pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
498 if (likely(pool->alloc.count > 0)) { in __page_pool_alloc_pages_slow()
499 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
500 alloc_stat_inc(pool, slow); in __page_pool_alloc_pages_slow()
512 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
517 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
522 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
532 s32 page_pool_inflight(const struct page_pool *pool, bool strict) in page_pool_inflight() argument
534 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
535 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
541 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
552 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) in __page_pool_release_page_dma() argument
556 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in __page_pool_release_page_dma()
564 /* When page is unmapped, it cannot be returned to our pool */ in __page_pool_release_page_dma()
565 dma_unmap_page_attrs(pool->p.dev, dma, in __page_pool_release_page_dma()
566 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in __page_pool_release_page_dma()
576 void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
580 __page_pool_release_page_dma(pool, page); in page_pool_return_page()
584 /* This may be the last page returned, releasing the pool, so in page_pool_return_page()
585 * it is not safe to reference pool afterwards. in page_pool_return_page()
587 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); in page_pool_return_page()
588 trace_page_pool_state_release(pool, page, count); in page_pool_return_page()
591 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
597 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
602 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
604 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
607 recycle_stat_inc(pool, ring); in page_pool_recycle_in_ring()
620 struct page_pool *pool) in page_pool_recycle_in_cache() argument
622 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { in page_pool_recycle_in_cache()
623 recycle_stat_inc(pool, cache_full); in page_pool_recycle_in_cache()
628 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
629 recycle_stat_inc(pool, cached); in page_pool_recycle_in_cache()
635 * the configured size min(dma_sync_size, pool->max_len).
640 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
657 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in __page_pool_put_page()
658 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
662 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
681 recycle_stat_inc(pool, released_refcnt); in __page_pool_put_page()
682 page_pool_return_page(pool, page); in __page_pool_put_page()
687 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, in page_pool_put_unrefed_page() argument
690 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_unrefed_page()
691 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_unrefed_page()
693 recycle_stat_inc(pool, ring_full); in page_pool_put_unrefed_page()
694 page_pool_return_page(pool, page); in page_pool_put_unrefed_page()
701 * @pool: pool from which pages were allocated
714 void page_pool_put_page_bulk(struct page_pool *pool, void **data, in page_pool_put_page_bulk() argument
727 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
737 in_softirq = page_pool_producer_lock(pool); in page_pool_put_page_bulk()
739 if (__ptr_ring_produce(&pool->ring, data[i])) { in page_pool_put_page_bulk()
741 recycle_stat_inc(pool, ring_full); in page_pool_put_page_bulk()
745 recycle_stat_add(pool, ring, i); in page_pool_put_page_bulk()
746 page_pool_producer_unlock(pool, in_softirq); in page_pool_put_page_bulk()
756 page_pool_return_page(pool, data[i]); in page_pool_put_page_bulk()
760 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
763 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_drain_frag()
770 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_drain_frag()
771 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
776 page_pool_return_page(pool, page); in page_pool_drain_frag()
780 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() argument
782 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_free_frag()
783 struct page *page = pool->frag_page; in page_pool_free_frag()
785 pool->frag_page = NULL; in page_pool_free_frag()
790 page_pool_return_page(pool, page); in page_pool_free_frag()
793 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag() argument
797 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag()
798 struct page *page = pool->frag_page; in page_pool_alloc_frag()
804 *offset = pool->frag_offset; in page_pool_alloc_frag()
807 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
809 alloc_stat_inc(pool, fast); in page_pool_alloc_frag()
815 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
817 pool->frag_page = NULL; in page_pool_alloc_frag()
821 pool->frag_page = page; in page_pool_alloc_frag()
824 pool->frag_users = 1; in page_pool_alloc_frag()
826 pool->frag_offset = size; in page_pool_alloc_frag()
831 pool->frag_users++; in page_pool_alloc_frag()
832 pool->frag_offset = *offset + size; in page_pool_alloc_frag()
833 alloc_stat_inc(pool, fast); in page_pool_alloc_frag()
838 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
843 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
849 page_pool_return_page(pool, page); in page_pool_empty_ring()
853 static void __page_pool_destroy(struct page_pool *pool) in __page_pool_destroy() argument
855 if (pool->disconnect) in __page_pool_destroy()
856 pool->disconnect(pool); in __page_pool_destroy()
858 page_pool_unlist(pool); in __page_pool_destroy()
859 page_pool_uninit(pool); in __page_pool_destroy()
860 kfree(pool); in __page_pool_destroy()
863 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
867 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
874 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
875 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
876 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
880 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
882 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
883 pool->destroy_cnt++; in page_pool_scrub()
888 page_pool_empty_ring(pool); in page_pool_scrub()
891 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
895 page_pool_scrub(pool); in page_pool_release()
896 inflight = page_pool_inflight(pool, true); in page_pool_release()
898 __page_pool_destroy(pool); in page_pool_release()
906 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
910 inflight = page_pool_release(pool); in page_pool_release_retry()
915 netdev = READ_ONCE(pool->slow.netdev); in page_pool_release_retry()
916 if (time_after_eq(jiffies, pool->defer_warn) && in page_pool_release_retry()
918 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
920 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n", in page_pool_release_retry()
921 __func__, pool->user.id, inflight, sec); in page_pool_release_retry()
922 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
926 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
929 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), in page_pool_use_xdp_mem() argument
932 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
933 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
934 pool->xdp_mem_id = mem->id; in page_pool_use_xdp_mem()
937 void page_pool_unlink_napi(struct page_pool *pool) in page_pool_unlink_napi() argument
939 if (!pool->p.napi) in page_pool_unlink_napi()
943 * pool and NAPI are unlinked when NAPI is disabled. in page_pool_unlink_napi()
945 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || in page_pool_unlink_napi()
946 READ_ONCE(pool->p.napi->list_owner) != -1); in page_pool_unlink_napi()
948 WRITE_ONCE(pool->p.napi, NULL); in page_pool_unlink_napi()
952 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
954 if (!pool) in page_pool_destroy()
957 if (!page_pool_put(pool)) in page_pool_destroy()
960 page_pool_unlink_napi(pool); in page_pool_destroy()
961 page_pool_free_frag(pool); in page_pool_destroy()
963 if (!page_pool_release(pool)) in page_pool_destroy()
966 page_pool_detached(pool); in page_pool_destroy()
967 pool->defer_start = jiffies; in page_pool_destroy()
968 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
970 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
971 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
976 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
980 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
981 pool->p.nid = new_nid; in page_pool_update_nid()
983 /* Flush pool alloc cache, as refill will check NUMA node */ in page_pool_update_nid()
984 while (pool->alloc.count) { in page_pool_update_nid()
985 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
986 page_pool_return_page(pool, page); in page_pool_update_nid()