Lines Matching full:pool
24 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
29 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
32 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
35 if (pool->p.pool_size) in page_pool_init()
36 ring_qsize = pool->p.pool_size; in page_pool_init()
46 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
47 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
48 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
52 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
56 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
59 if (!pool->p.max_len) in page_pool_init()
62 /* pool->p.offset has to be set according to the address in page_pool_init()
67 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) in page_pool_init()
70 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
73 refcount_set(&pool->user_cnt, 1); in page_pool_init()
75 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_init()
76 get_device(pool->p.dev); in page_pool_init()
83 struct page_pool *pool; in page_pool_create() local
86 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create()
87 if (!pool) in page_pool_create()
90 err = page_pool_init(pool, params); in page_pool_create()
93 kfree(pool); in page_pool_create()
97 return pool; in page_pool_create()
101 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
106 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
118 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
120 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ in page_pool_refill_alloc_cache()
134 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
141 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
145 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
148 if (likely(pool->alloc.count > 0)) in page_pool_refill_alloc_cache()
149 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
156 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
161 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
163 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
165 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
171 static void page_pool_dma_sync_for_device(struct page_pool *pool, in page_pool_dma_sync_for_device() argument
175 dma_sync_size = min(dma_sync_size, pool->p.max_len); in page_pool_dma_sync_for_device()
176 dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, in page_pool_dma_sync_for_device()
177 pool->p.offset, dma_sync_size, in page_pool_dma_sync_for_device()
178 pool->p.dma_dir); in page_pool_dma_sync_for_device()
183 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() argument
193 if (pool->p.order) in __page_pool_alloc_pages_slow()
205 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_pages_slow()
207 page = alloc_pages(gfp, pool->p.order); in __page_pool_alloc_pages_slow()
212 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in __page_pool_alloc_pages_slow()
218 * This mapping is kept for lifetime of page, until leaving pool. in __page_pool_alloc_pages_slow()
220 dma = dma_map_page_attrs(pool->p.dev, page, 0, in __page_pool_alloc_pages_slow()
221 (PAGE_SIZE << pool->p.order), in __page_pool_alloc_pages_slow()
222 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); in __page_pool_alloc_pages_slow()
223 if (dma_mapping_error(pool->p.dev, dma)) { in __page_pool_alloc_pages_slow()
229 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in __page_pool_alloc_pages_slow()
230 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in __page_pool_alloc_pages_slow()
234 pool->pages_state_hold_cnt++; in __page_pool_alloc_pages_slow()
236 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
245 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
250 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
255 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
265 static s32 page_pool_inflight(struct page_pool *pool) in page_pool_inflight() argument
267 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
268 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
273 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
284 void page_pool_release_page(struct page_pool *pool, struct page *page) in page_pool_release_page() argument
289 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_release_page()
297 /* When page is unmapped, it cannot be returned our pool */ in page_pool_release_page()
298 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_release_page()
299 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_release_page()
303 /* This may be the last page returned, releasing the pool, so in page_pool_release_page()
304 * it is not safe to reference pool afterwards. in page_pool_release_page()
306 count = atomic_inc_return(&pool->pages_state_release_cnt); in page_pool_release_page()
307 trace_page_pool_state_release(pool, page, count); in page_pool_release_page()
312 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
314 page_pool_release_page(pool, page); in page_pool_return_page()
317 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
323 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
328 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
330 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
341 struct page_pool *pool) in page_pool_recycle_in_cache() argument
343 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) in page_pool_recycle_in_cache()
347 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
354 static bool pool_page_reusable(struct page_pool *pool, struct page *page) in pool_page_reusable() argument
361 * the configured size min(dma_sync_size, pool->max_len).
365 void page_pool_put_page(struct page_pool *pool, struct page *page, in page_pool_put_page() argument
375 pool_page_reusable(pool, page))) { in page_pool_put_page()
378 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_put_page()
379 page_pool_dma_sync_for_device(pool, page, in page_pool_put_page()
383 if (page_pool_recycle_in_cache(page, pool)) in page_pool_put_page()
386 if (!page_pool_recycle_in_ring(pool, page)) { in page_pool_put_page()
388 page_pool_return_page(pool, page); in page_pool_put_page()
406 page_pool_release_page(pool, page); in page_pool_put_page()
411 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
416 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
422 page_pool_return_page(pool, page); in page_pool_empty_ring()
426 static void page_pool_free(struct page_pool *pool) in page_pool_free() argument
428 if (pool->disconnect) in page_pool_free()
429 pool->disconnect(pool); in page_pool_free()
431 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_free()
433 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_free()
434 put_device(pool->p.dev); in page_pool_free()
436 kfree(pool); in page_pool_free()
439 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
443 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
450 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
451 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
452 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
456 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
458 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
459 pool->destroy_cnt++; in page_pool_scrub()
464 page_pool_empty_ring(pool); in page_pool_scrub()
467 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
471 page_pool_scrub(pool); in page_pool_release()
472 inflight = page_pool_inflight(pool); in page_pool_release()
474 page_pool_free(pool); in page_pool_release()
482 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
485 inflight = page_pool_release(pool); in page_pool_release_retry()
490 if (time_after_eq(jiffies, pool->defer_warn)) { in page_pool_release_retry()
491 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
493 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", in page_pool_release_retry()
495 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
499 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
502 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) in page_pool_use_xdp_mem() argument
504 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
505 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
508 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
510 if (!pool) in page_pool_destroy()
513 if (!page_pool_put(pool)) in page_pool_destroy()
516 if (!page_pool_release(pool)) in page_pool_destroy()
519 pool->defer_start = jiffies; in page_pool_destroy()
520 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
522 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
523 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
528 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
532 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
533 pool->p.nid = new_nid; in page_pool_update_nid()
535 /* Flush pool alloc cache, as refill will check NUMA node */ in page_pool_update_nid()
536 while (pool->alloc.count) { in page_pool_update_nid()
537 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
538 page_pool_return_page(pool, page); in page_pool_update_nid()