Lines Matching full:pool
46 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
48 #define recycle_stat_inc(pool, __stat) \ argument
50 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
54 #define recycle_stat_add(pool, __stat, val) \ argument
56 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
75 * page_pool_get_stats() - fetch page pool stats
76 * @pool: pool from which page was allocated
85 bool page_pool_get_stats(const struct page_pool *pool, in page_pool_get_stats() argument
94 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
95 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
96 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats()
97 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats()
98 stats->alloc_stats.refill += pool->alloc_stats.refill; in page_pool_get_stats()
99 stats->alloc_stats.waive += pool->alloc_stats.waive; in page_pool_get_stats()
103 per_cpu_ptr(pool->recycle_stats, cpu); in page_pool_get_stats()
161 static bool page_pool_producer_lock(struct page_pool *pool) in page_pool_producer_lock() argument
162 __acquires(&pool->ring.producer_lock) in page_pool_producer_lock()
167 spin_lock(&pool->ring.producer_lock); in page_pool_producer_lock()
169 spin_lock_bh(&pool->ring.producer_lock); in page_pool_producer_lock()
174 static void page_pool_producer_unlock(struct page_pool *pool, in page_pool_producer_unlock() argument
176 __releases(&pool->ring.producer_lock) in page_pool_producer_unlock()
179 spin_unlock(&pool->ring.producer_lock); in page_pool_producer_unlock()
181 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_producer_unlock()
193 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
203 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p)); in page_pool_init()
204 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); in page_pool_init()
206 pool->cpuid = cpuid; in page_pool_init()
207 pool->dma_sync_for_cpu = true; in page_pool_init()
210 if (pool->slow.flags & ~PP_FLAG_ALL) in page_pool_init()
213 if (pool->p.pool_size) in page_pool_init()
214 ring_qsize = min(pool->p.pool_size, 16384); in page_pool_init()
220 if (pool->slow.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
221 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
222 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
225 pool->dma_map = true; in page_pool_init()
228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
235 if (!pool->p.max_len) in page_pool_init()
238 pool->dma_sync = true; in page_pool_init()
240 /* pool->p.offset has to be set according to the address in page_pool_init()
245 pool->has_init_callback = !!pool->slow.init_callback; in page_pool_init()
248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) { in page_pool_init()
249 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); in page_pool_init()
250 if (!pool->recycle_stats) in page_pool_init()
253 /* For system page pool instance we use a singular stats object in page_pool_init()
255 * (also percpu) page pool instance. in page_pool_init()
257 pool->recycle_stats = &pp_system_recycle_stats; in page_pool_init()
258 pool->system = true; in page_pool_init()
262 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { in page_pool_init()
264 if (!pool->system) in page_pool_init()
265 free_percpu(pool->recycle_stats); in page_pool_init()
270 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
273 refcount_set(&pool->user_cnt, 1); in page_pool_init()
275 xa_init_flags(&pool->dma_mapped, XA_FLAGS_ALLOC1); in page_pool_init()
277 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { in page_pool_init()
278 netdev_assert_locked(pool->slow.netdev); in page_pool_init()
279 rxq = __netif_get_rx_queue(pool->slow.netdev, in page_pool_init()
280 pool->slow.queue_idx); in page_pool_init()
281 pool->mp_priv = rxq->mp_params.mp_priv; in page_pool_init()
282 pool->mp_ops = rxq->mp_params.mp_ops; in page_pool_init()
285 if (pool->mp_ops) { in page_pool_init()
286 if (!pool->dma_map || !pool->dma_sync) { in page_pool_init()
291 if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) { in page_pool_init()
296 err = pool->mp_ops->init(pool); in page_pool_init()
304 } else if (pool->p.order > MAX_PAGE_ORDER) { in page_pool_init()
312 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_init()
313 xa_destroy(&pool->dma_mapped); in page_pool_init()
315 if (!pool->system) in page_pool_init()
316 free_percpu(pool->recycle_stats); in page_pool_init()
321 static void page_pool_uninit(struct page_pool *pool) in page_pool_uninit() argument
323 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_uninit()
324 xa_destroy(&pool->dma_mapped); in page_pool_uninit()
327 if (!pool->system) in page_pool_uninit()
328 free_percpu(pool->recycle_stats); in page_pool_uninit()
333 * page_pool_create_percpu() - create a page pool for a given cpu.
340 struct page_pool *pool; in page_pool_create_percpu() local
343 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create_percpu()
344 if (!pool) in page_pool_create_percpu()
347 err = page_pool_init(pool, params, cpuid); in page_pool_create_percpu()
351 err = page_pool_list(pool); in page_pool_create_percpu()
355 return pool; in page_pool_create_percpu()
358 page_pool_uninit(pool); in page_pool_create_percpu()
361 kfree(pool); in page_pool_create_percpu()
367 * page_pool_create() - create a page pool
376 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem);
378 static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
380 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
386 alloc_stat_inc(pool, empty); in page_pool_refill_alloc_cache()
394 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
396 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ in page_pool_refill_alloc_cache()
407 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_refill_alloc_cache()
414 page_pool_return_netmem(pool, netmem); in page_pool_refill_alloc_cache()
415 alloc_stat_inc(pool, waive); in page_pool_refill_alloc_cache()
419 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
422 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache()
423 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
424 alloc_stat_inc(pool, refill); in page_pool_refill_alloc_cache()
431 static netmem_ref __page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
436 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
438 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
439 alloc_stat_inc(pool, fast); in __page_pool_get_cached()
441 netmem = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
447 static void __page_pool_dma_sync_for_device(const struct page_pool *pool, in __page_pool_dma_sync_for_device() argument
454 dma_sync_size = min(dma_sync_size, pool->p.max_len); in __page_pool_dma_sync_for_device()
455 __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, in __page_pool_dma_sync_for_device()
456 dma_sync_size, pool->p.dma_dir); in __page_pool_dma_sync_for_device()
461 page_pool_dma_sync_for_device(const struct page_pool *pool, in page_pool_dma_sync_for_device() argument
465 if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) { in page_pool_dma_sync_for_device()
468 if (pool->dma_sync) in page_pool_dma_sync_for_device()
469 __page_pool_dma_sync_for_device(pool, netmem, in page_pool_dma_sync_for_device()
475 static int page_pool_register_dma_index(struct page_pool *pool, in page_pool_register_dma_index() argument
485 err = xa_alloc(&pool->dma_mapped, &id, netmem_to_page(netmem), in page_pool_register_dma_index()
488 err = xa_alloc_bh(&pool->dma_mapped, &id, netmem_to_page(netmem), in page_pool_register_dma_index()
500 static int page_pool_release_dma_index(struct page_pool *pool, in page_pool_release_dma_index() argument
514 old = xa_cmpxchg(&pool->dma_mapped, id, page, NULL, 0); in page_pool_release_dma_index()
516 old = xa_cmpxchg_bh(&pool->dma_mapped, id, page, NULL, 0); in page_pool_release_dma_index()
525 static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem, gfp_t gfp) in page_pool_dma_map() argument
533 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
535 dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, in page_pool_dma_map()
536 (PAGE_SIZE << pool->p.order), pool->p.dma_dir, in page_pool_dma_map()
539 if (dma_mapping_error(pool->p.dev, dma)) in page_pool_dma_map()
547 err = page_pool_register_dma_index(pool, netmem, gfp); in page_pool_dma_map()
551 page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); in page_pool_dma_map()
558 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_dma_map()
559 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_dma_map()
564 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order() argument
570 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
574 if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page), gfp))) { in __page_pool_alloc_page_order()
579 alloc_stat_inc(pool, slow_high_order); in __page_pool_alloc_page_order()
580 page_pool_set_pp_info(pool, page_to_netmem(page)); in __page_pool_alloc_page_order()
583 pool->pages_state_hold_cnt++; in __page_pool_alloc_page_order()
584 trace_page_pool_state_hold(pool, page_to_netmem(page), in __page_pool_alloc_page_order()
585 pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
590 static noinline netmem_ref __page_pool_alloc_netmems_slow(struct page_pool *pool, in __page_pool_alloc_netmems_slow() argument
594 unsigned int pp_order = pool->p.order; in __page_pool_alloc_netmems_slow()
595 bool dma_map = pool->dma_map; in __page_pool_alloc_netmems_slow()
607 return page_to_netmem(__page_pool_alloc_page_order(pool, gfp)); in __page_pool_alloc_netmems_slow()
610 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_netmems_slow()
611 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_netmems_slow()
614 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_netmems_slow()
616 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, in __page_pool_alloc_netmems_slow()
617 (struct page **)pool->alloc.cache); in __page_pool_alloc_netmems_slow()
625 netmem = pool->alloc.cache[i]; in __page_pool_alloc_netmems_slow()
626 if (dma_map && unlikely(!page_pool_dma_map(pool, netmem, gfp))) { in __page_pool_alloc_netmems_slow()
631 page_pool_set_pp_info(pool, netmem); in __page_pool_alloc_netmems_slow()
632 pool->alloc.cache[pool->alloc.count++] = netmem; in __page_pool_alloc_netmems_slow()
634 pool->pages_state_hold_cnt++; in __page_pool_alloc_netmems_slow()
635 trace_page_pool_state_hold(pool, netmem, in __page_pool_alloc_netmems_slow()
636 pool->pages_state_hold_cnt); in __page_pool_alloc_netmems_slow()
640 if (likely(pool->alloc.count > 0)) { in __page_pool_alloc_netmems_slow()
641 netmem = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_netmems_slow()
642 alloc_stat_inc(pool, slow); in __page_pool_alloc_netmems_slow()
654 netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_netmems() argument
659 netmem = __page_pool_get_cached(pool); in page_pool_alloc_netmems()
664 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) in page_pool_alloc_netmems()
665 netmem = pool->mp_ops->alloc_netmems(pool, gfp); in page_pool_alloc_netmems()
667 netmem = __page_pool_alloc_netmems_slow(pool, gfp); in page_pool_alloc_netmems()
673 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
675 return netmem_to_page(page_pool_alloc_netmems(pool, gfp)); in page_pool_alloc_pages()
684 s32 page_pool_inflight(const struct page_pool *pool, bool strict) in page_pool_inflight() argument
686 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
687 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
693 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
703 void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) in page_pool_set_pp_info() argument
705 netmem_set_pp(netmem, pool); in page_pool_set_pp_info()
715 if (pool->has_init_callback) in page_pool_set_pp_info()
716 pool->slow.init_callback(netmem, pool->slow.init_arg); in page_pool_set_pp_info()
725 static __always_inline void __page_pool_release_netmem_dma(struct page_pool *pool, in __page_pool_release_netmem_dma() argument
730 if (!pool->dma_map) in __page_pool_release_netmem_dma()
736 if (page_pool_release_dma_index(pool, netmem)) in __page_pool_release_netmem_dma()
741 /* When page is unmapped, it cannot be returned to our pool */ in __page_pool_release_netmem_dma()
742 dma_unmap_page_attrs(pool->p.dev, dma, in __page_pool_release_netmem_dma()
743 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in __page_pool_release_netmem_dma()
753 static void page_pool_return_netmem(struct page_pool *pool, netmem_ref netmem) in page_pool_return_netmem() argument
759 if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) in page_pool_return_netmem()
760 put = pool->mp_ops->release_netmem(pool, netmem); in page_pool_return_netmem()
762 __page_pool_release_netmem_dma(pool, netmem); in page_pool_return_netmem()
764 /* This may be the last page returned, releasing the pool, so in page_pool_return_netmem()
765 * it is not safe to reference pool afterwards. in page_pool_return_netmem()
767 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); in page_pool_return_netmem()
768 trace_page_pool_state_release(pool, netmem, count); in page_pool_return_netmem()
774 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_netmem()
780 static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) in page_pool_recycle_in_ring() argument
785 in_softirq = page_pool_producer_lock(pool); in page_pool_recycle_in_ring()
786 ret = !__ptr_ring_produce(&pool->ring, (__force void *)netmem); in page_pool_recycle_in_ring()
788 recycle_stat_inc(pool, ring); in page_pool_recycle_in_ring()
789 page_pool_producer_unlock(pool, in_softirq); in page_pool_recycle_in_ring()
800 struct page_pool *pool) in page_pool_recycle_in_cache() argument
802 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { in page_pool_recycle_in_cache()
803 recycle_stat_inc(pool, cache_full); in page_pool_recycle_in_cache()
808 pool->alloc.cache[pool->alloc.count++] = netmem; in page_pool_recycle_in_cache()
809 recycle_stat_inc(pool, cached); in page_pool_recycle_in_cache()
821 * If pool->dma_sync is set, we'll try to sync the DMA area for
822 * the configured size min(dma_sync_size, pool->max_len).
827 __page_pool_put_page(struct page_pool *pool, netmem_ref netmem, in __page_pool_put_page() argument
844 page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); in __page_pool_put_page()
846 if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) in __page_pool_put_page()
866 recycle_stat_inc(pool, released_refcnt); in __page_pool_put_page()
867 page_pool_return_netmem(pool, netmem); in __page_pool_put_page()
872 static bool page_pool_napi_local(const struct page_pool *pool) in page_pool_napi_local() argument
891 if (READ_ONCE(pool->cpuid) == cpuid) in page_pool_napi_local()
894 napi = READ_ONCE(pool->p.napi); in page_pool_napi_local()
899 void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, in page_pool_put_unrefed_netmem() argument
903 allow_direct = page_pool_napi_local(pool); in page_pool_put_unrefed_netmem()
905 netmem = __page_pool_put_page(pool, netmem, dma_sync_size, in page_pool_put_unrefed_netmem()
907 if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { in page_pool_put_unrefed_netmem()
909 recycle_stat_inc(pool, ring_full); in page_pool_put_unrefed_netmem()
910 page_pool_return_netmem(pool, netmem); in page_pool_put_unrefed_netmem()
915 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, in page_pool_put_unrefed_page() argument
918 page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size, in page_pool_put_unrefed_page()
923 static void page_pool_recycle_ring_bulk(struct page_pool *pool, in page_pool_recycle_ring_bulk() argument
931 in_softirq = page_pool_producer_lock(pool); in page_pool_recycle_ring_bulk()
934 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) { in page_pool_recycle_ring_bulk()
936 recycle_stat_inc(pool, ring_full); in page_pool_recycle_ring_bulk()
941 page_pool_producer_unlock(pool, in_softirq); in page_pool_recycle_ring_bulk()
942 recycle_stat_add(pool, ring, i); in page_pool_recycle_ring_bulk()
953 page_pool_return_netmem(pool, bulk[i]); in page_pool_recycle_ring_bulk()
984 struct page_pool *pool = NULL; in page_pool_put_netmem_bulk() local
995 if (unlikely(!pool)) { in page_pool_put_netmem_bulk()
996 pool = netmem_pp; in page_pool_put_netmem_bulk()
997 allow_direct = page_pool_napi_local(pool); in page_pool_put_netmem_bulk()
998 } else if (netmem_pp != pool) { in page_pool_put_netmem_bulk()
1007 netmem = __page_pool_put_page(pool, netmem, -1, in page_pool_put_netmem_bulk()
1015 page_pool_recycle_ring_bulk(pool, bulk, bulk_len); in page_pool_put_netmem_bulk()
1022 static netmem_ref page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
1025 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_drain_frag()
1032 page_pool_dma_sync_for_device(pool, netmem, -1); in page_pool_drain_frag()
1036 page_pool_return_netmem(pool, netmem); in page_pool_drain_frag()
1040 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() argument
1042 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_free_frag()
1043 netmem_ref netmem = pool->frag_page; in page_pool_free_frag()
1045 pool->frag_page = 0; in page_pool_free_frag()
1050 page_pool_return_netmem(pool, netmem); in page_pool_free_frag()
1053 netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, in page_pool_alloc_frag_netmem() argument
1057 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag_netmem()
1058 netmem_ref netmem = pool->frag_page; in page_pool_alloc_frag_netmem()
1064 *offset = pool->frag_offset; in page_pool_alloc_frag_netmem()
1067 netmem = page_pool_drain_frag(pool, netmem); in page_pool_alloc_frag_netmem()
1069 recycle_stat_inc(pool, cached); in page_pool_alloc_frag_netmem()
1070 alloc_stat_inc(pool, fast); in page_pool_alloc_frag_netmem()
1076 netmem = page_pool_alloc_netmems(pool, gfp); in page_pool_alloc_frag_netmem()
1078 pool->frag_page = 0; in page_pool_alloc_frag_netmem()
1082 pool->frag_page = netmem; in page_pool_alloc_frag_netmem()
1085 pool->frag_users = 1; in page_pool_alloc_frag_netmem()
1087 pool->frag_offset = size; in page_pool_alloc_frag_netmem()
1092 pool->frag_users++; in page_pool_alloc_frag_netmem()
1093 pool->frag_offset = *offset + size; in page_pool_alloc_frag_netmem()
1098 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, in page_pool_alloc_frag() argument
1101 return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size, in page_pool_alloc_frag()
1106 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
1111 while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
1117 page_pool_return_netmem(pool, netmem); in page_pool_empty_ring()
1121 static void __page_pool_destroy(struct page_pool *pool) in __page_pool_destroy() argument
1123 if (pool->disconnect) in __page_pool_destroy()
1124 pool->disconnect(pool); in __page_pool_destroy()
1126 page_pool_unlist(pool); in __page_pool_destroy()
1127 page_pool_uninit(pool); in __page_pool_destroy()
1129 if (pool->mp_ops) { in __page_pool_destroy()
1130 pool->mp_ops->destroy(pool); in __page_pool_destroy()
1134 kfree(pool); in __page_pool_destroy()
1137 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
1141 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
1148 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
1149 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
1150 page_pool_return_netmem(pool, netmem); in page_pool_empty_alloc_cache_once()
1154 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
1159 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
1160 if (!pool->destroy_cnt++ && pool->dma_map) { in page_pool_scrub()
1161 if (pool->dma_sync) { in page_pool_scrub()
1163 pool->dma_sync = false; in page_pool_scrub()
1171 if (dma_dev_need_sync(pool->p.dev) && in page_pool_scrub()
1172 !xa_empty(&pool->dma_mapped)) in page_pool_scrub()
1176 xa_for_each(&pool->dma_mapped, id, ptr) in page_pool_scrub()
1177 __page_pool_release_netmem_dma(pool, page_to_netmem((struct page *)ptr)); in page_pool_scrub()
1183 page_pool_empty_ring(pool); in page_pool_scrub()
1186 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
1191 page_pool_scrub(pool); in page_pool_release()
1192 inflight = page_pool_inflight(pool, true); in page_pool_release()
1194 in_softirq = page_pool_producer_lock(pool); in page_pool_release()
1195 page_pool_producer_unlock(pool, in_softirq); in page_pool_release()
1197 __page_pool_destroy(pool); in page_pool_release()
1205 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
1209 inflight = page_pool_release(pool); in page_pool_release_retry()
1220 netdev = READ_ONCE(pool->slow.netdev); in page_pool_release_retry()
1221 if (time_after_eq(jiffies, pool->defer_warn) && in page_pool_release_retry()
1223 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
1225 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n", in page_pool_release_retry()
1226 __func__, pool->user.id, inflight, sec); in page_pool_release_retry()
1227 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
1231 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
1234 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), in page_pool_use_xdp_mem() argument
1237 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
1238 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
1239 pool->xdp_mem_id = mem->id; in page_pool_use_xdp_mem()
1243 * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
1244 * @pool: page pool to modify
1245 * @napi: NAPI instance to associate the page pool with
1247 * Associate a page pool with a NAPI instance for lockless page recycling.
1248 * This is useful when a new page pool has to be added to a NAPI instance
1250 * path "hands over" the page pool to the NAPI instance. In most cases driver
1258 void page_pool_enable_direct_recycling(struct page_pool *pool, in page_pool_enable_direct_recycling() argument
1261 if (READ_ONCE(pool->p.napi) == napi) in page_pool_enable_direct_recycling()
1263 WARN_ON(!napi || pool->p.napi); in page_pool_enable_direct_recycling()
1266 WRITE_ONCE(pool->p.napi, napi); in page_pool_enable_direct_recycling()
1271 void page_pool_disable_direct_recycling(struct page_pool *pool) in page_pool_disable_direct_recycling() argument
1273 /* Disable direct recycling based on pool->cpuid. in page_pool_disable_direct_recycling()
1276 WRITE_ONCE(pool->cpuid, -1); in page_pool_disable_direct_recycling()
1278 if (!pool->p.napi) in page_pool_disable_direct_recycling()
1281 napi_assert_will_not_race(pool->p.napi); in page_pool_disable_direct_recycling()
1284 WRITE_ONCE(pool->p.napi, NULL); in page_pool_disable_direct_recycling()
1289 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
1291 if (!pool) in page_pool_destroy()
1294 if (!page_pool_put(pool)) in page_pool_destroy()
1297 page_pool_disable_direct_recycling(pool); in page_pool_destroy()
1298 page_pool_free_frag(pool); in page_pool_destroy()
1300 if (!page_pool_release(pool)) in page_pool_destroy()
1303 page_pool_detached(pool); in page_pool_destroy()
1304 pool->defer_start = jiffies; in page_pool_destroy()
1305 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
1307 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
1308 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
1313 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
1317 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
1318 pool->p.nid = new_nid; in page_pool_update_nid()
1320 /* Flush pool alloc cache, as refill will check NUMA node */ in page_pool_update_nid()
1321 while (pool->alloc.count) { in page_pool_update_nid()
1322 netmem = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
1323 page_pool_return_netmem(pool, netmem); in page_pool_update_nid()
1333 /* Associate a niov with a page pool. Should follow with a matching
1336 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov) in net_mp_niov_set_page_pool() argument
1340 page_pool_set_pp_info(pool, netmem); in net_mp_niov_set_page_pool()
1342 pool->pages_state_hold_cnt++; in net_mp_niov_set_page_pool()
1343 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); in net_mp_niov_set_page_pool()
1346 /* Disassociate a niov from a page pool. Should only be used in the