Lines Matching full:page
16 #include <linux/page-flags.h>
43 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
53 /* In order to request DMA-sync-for-device the page in page_pool_init()
101 static void page_pool_return_page(struct page_pool *pool, struct page *page);
104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
107 struct page *page; in page_pool_refill_alloc_cache() local
129 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
130 if (unlikely(!page)) in page_pool_refill_alloc_cache()
133 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
134 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
137 * (1) release 1 page to page-allocator and in page_pool_refill_alloc_cache()
139 * This limit stress on page buddy alloactor. in page_pool_refill_alloc_cache()
141 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
142 page = NULL; in page_pool_refill_alloc_cache()
147 /* Return last page */ in page_pool_refill_alloc_cache()
149 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
152 return page; in page_pool_refill_alloc_cache()
156 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached()
158 struct page *page; in __page_pool_get_cached() local
163 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
165 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
168 return page; in __page_pool_get_cached()
172 struct page *page, in page_pool_dma_sync_for_device() argument
176 dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, in page_pool_dma_sync_for_device()
183 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow()
186 struct page *page; in __page_pool_alloc_pages_slow() local
198 * Current slow-path essentially falls back to single page in __page_pool_alloc_pages_slow()
200 * need bulk allocation support from the page allocator code. in __page_pool_alloc_pages_slow()
205 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_pages_slow()
207 page = alloc_pages(gfp, pool->p.order); in __page_pool_alloc_pages_slow()
209 if (!page) in __page_pool_alloc_pages_slow()
215 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr in __page_pool_alloc_pages_slow()
217 * into page private data (i.e 32bit cpu with 64bit DMA caps) in __page_pool_alloc_pages_slow()
218 * This mapping is kept for lifetime of page, until leaving pool. in __page_pool_alloc_pages_slow()
220 dma = dma_map_page_attrs(pool->p.dev, page, 0, in __page_pool_alloc_pages_slow()
224 put_page(page); in __page_pool_alloc_pages_slow()
227 page->dma_addr = dma; in __page_pool_alloc_pages_slow()
230 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in __page_pool_alloc_pages_slow()
236 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
238 /* When page just alloc'ed is should/must have refcnt 1. */ in __page_pool_alloc_pages_slow()
239 return page; in __page_pool_alloc_pages_slow()
245 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages()
247 struct page *page; in page_pool_alloc_pages() local
249 /* Fast-path: Get a page from cache */ in page_pool_alloc_pages()
250 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
251 if (page) in page_pool_alloc_pages()
252 return page; in page_pool_alloc_pages()
255 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
256 return page; in page_pool_alloc_pages()
279 /* Disconnects a page (from a page_pool). API users can have a need
280 * to disconnect a page (from a page_pool), to allow it to be used as
281 * a regular page (that will eventually be returned to the normal
282 * page-allocator via put_page).
284 void page_pool_release_page(struct page_pool *pool, struct page *page) in page_pool_release_page() argument
295 dma = page->dma_addr; in page_pool_release_page()
297 /* When page is unmapped, it cannot be returned our pool */ in page_pool_release_page()
301 page->dma_addr = 0; in page_pool_release_page()
303 /* This may be the last page returned, releasing the pool, so in page_pool_release_page()
307 trace_page_pool_state_release(pool, page, count); in page_pool_release_page()
311 /* Return a page to the page allocator, cleaning up our state */
312 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
314 page_pool_release_page(pool, page); in page_pool_return_page()
316 put_page(page); in page_pool_return_page()
317 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
318 * knowing page is not part of page-cache (thus avoiding a in page_pool_return_page()
323 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
328 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
330 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
340 static bool page_pool_recycle_in_cache(struct page *page, in page_pool_recycle_in_cache() argument
346 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ in page_pool_recycle_in_cache()
347 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
351 /* page is NOT reusable when:
354 static bool pool_page_reusable(struct page_pool *pool, struct page *page) in pool_page_reusable() argument
356 return !page_is_pfmemalloc(page); in pool_page_reusable()
359 /* If the page refcnt == 1, this will try to recycle the page.
362 * If the page refcnt != 1, then the page will be returned to memory
365 void page_pool_put_page(struct page_pool *pool, struct page *page, in page_pool_put_page() argument
369 * one-frame-per-page, but have fallbacks that act like the in page_pool_put_page()
370 * regular page allocator APIs. in page_pool_put_page()
372 * refcnt == 1 means page_pool owns page, and can recycle it. in page_pool_put_page()
374 if (likely(page_ref_count(page) == 1 && in page_pool_put_page()
375 pool_page_reusable(pool, page))) { in page_pool_put_page()
379 page_pool_dma_sync_for_device(pool, page, in page_pool_put_page()
383 if (page_pool_recycle_in_cache(page, pool)) in page_pool_put_page()
386 if (!page_pool_recycle_in_ring(pool, page)) { in page_pool_put_page()
388 page_pool_return_page(pool, page); in page_pool_put_page()
394 * Many drivers split up the page into fragments, and some in page_pool_put_page()
406 page_pool_release_page(pool, page); in page_pool_put_page()
407 put_page(page); in page_pool_put_page()
413 struct page *page; in page_pool_empty_ring() local
416 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
418 if (!(page_ref_count(page) == 1)) in page_pool_empty_ring()
420 __func__, page_ref_count(page)); in page_pool_empty_ring()
422 page_pool_return_page(pool, page); in page_pool_empty_ring()
441 struct page *page; in page_pool_empty_alloc_cache_once() local
451 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
452 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
530 struct page *page; in page_pool_update_nid() local
537 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
538 page_pool_return_page(pool, page); in page_pool_update_nid()