Lines Matching full:page
18 #include <linux/page-flags.h>
64 * page_pool_get_stats() - fetch page pool stats
65 * @pool: pool from which page was allocated
193 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
203 /* In order to request DMA-sync-for-device the page in page_pool_init()
256 * page_pool_create() - create a page pool.
287 static void page_pool_return_page(struct page_pool *pool, struct page *page);
290 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache()
293 struct page *page; in page_pool_refill_alloc_cache() local
314 page = __ptr_ring_consume(r); in page_pool_refill_alloc_cache()
315 if (unlikely(!page)) in page_pool_refill_alloc_cache()
318 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
319 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
322 * (1) release 1 page to page-allocator and in page_pool_refill_alloc_cache()
324 * This limit stress on page buddy alloactor. in page_pool_refill_alloc_cache()
326 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
328 page = NULL; in page_pool_refill_alloc_cache()
333 /* Return last page */ in page_pool_refill_alloc_cache()
335 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
339 return page; in page_pool_refill_alloc_cache()
343 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached()
345 struct page *page; in __page_pool_get_cached() local
350 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
353 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
356 return page; in __page_pool_get_cached()
360 struct page *page, in page_pool_dma_sync_for_device() argument
363 dma_addr_t dma_addr = page_pool_get_dma_addr(page); in page_pool_dma_sync_for_device()
371 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
375 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr in page_pool_dma_map()
377 * into page private data (i.e 32bit cpu with 64bit DMA caps) in page_pool_dma_map()
378 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
380 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
387 if (page_pool_set_dma_addr(page, dma)) in page_pool_dma_map()
391 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
404 struct page *page) in page_pool_set_pp_info() argument
406 page->pp = pool; in page_pool_set_pp_info()
407 page->pp_magic |= PP_SIGNATURE; in page_pool_set_pp_info()
410 * page_pool_set_pp_info() is only called once for every page when it in page_pool_set_pp_info()
411 * is allocated from the page allocator and page_pool_fragment_page() in page_pool_set_pp_info()
412 * is dirtying the same cache line as the page->pp_magic above, so in page_pool_set_pp_info()
415 page_pool_fragment_page(page, 1); in page_pool_set_pp_info()
417 pool->slow.init_callback(page, pool->slow.init_arg); in page_pool_set_pp_info()
420 static void page_pool_clear_pp_info(struct page *page) in page_pool_clear_pp_info() argument
422 page->pp_magic = 0; in page_pool_clear_pp_info()
423 page->pp = NULL; in page_pool_clear_pp_info()
426 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order()
429 struct page *page; in __page_pool_alloc_page_order() local
432 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
433 if (unlikely(!page)) in __page_pool_alloc_page_order()
437 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
438 put_page(page); in __page_pool_alloc_page_order()
443 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
447 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
448 return page; in __page_pool_alloc_page_order()
453 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow()
459 struct page *page; in __page_pool_alloc_pages_slow() local
479 * page element have not been (possibly) DMA mapped. in __page_pool_alloc_pages_slow()
482 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
484 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
485 put_page(page); in __page_pool_alloc_pages_slow()
489 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
490 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
493 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
497 /* Return last page */ in __page_pool_alloc_pages_slow()
499 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
502 page = NULL; in __page_pool_alloc_pages_slow()
505 /* When page just alloc'ed is should/must have refcnt 1. */ in __page_pool_alloc_pages_slow()
506 return page; in __page_pool_alloc_pages_slow()
512 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages()
514 struct page *page; in page_pool_alloc_pages() local
516 /* Fast-path: Get a page from cache */ in page_pool_alloc_pages()
517 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
518 if (page) in page_pool_alloc_pages()
519 return page; in page_pool_alloc_pages()
522 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
523 return page; in page_pool_alloc_pages()
552 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) in __page_pool_release_page_dma() argument
562 dma = page_pool_get_dma_addr(page); in __page_pool_release_page_dma()
564 /* When page is unmapped, it cannot be returned to our pool */ in __page_pool_release_page_dma()
568 page_pool_set_dma_addr(page, 0); in __page_pool_release_page_dma()
571 /* Disconnects a page (from a page_pool). API users can have a need
572 * to disconnect a page (from a page_pool), to allow it to be used as
573 * a regular page (that will eventually be returned to the normal
574 * page-allocator via put_page).
576 void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
580 __page_pool_release_page_dma(pool, page); in page_pool_return_page()
582 page_pool_clear_pp_info(page); in page_pool_return_page()
584 /* This may be the last page returned, releasing the pool, so in page_pool_return_page()
588 trace_page_pool_state_release(pool, page, count); in page_pool_return_page()
590 put_page(page); in page_pool_return_page()
591 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
592 * knowing page is not part of page-cache (thus avoiding a in page_pool_return_page()
597 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
602 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
604 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
619 static bool page_pool_recycle_in_cache(struct page *page, in page_pool_recycle_in_cache() argument
627 /* Caller MUST have verified/know (page_ref_count(page) == 1) */ in page_pool_recycle_in_cache()
628 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
633 /* If the page refcnt == 1, this will try to recycle the page.
636 * If the page refcnt != 1, then the page will be returned to memory
639 static __always_inline struct page *
640 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
646 * one-frame-per-page, but have fallbacks that act like the in __page_pool_put_page()
647 * regular page allocator APIs. in __page_pool_put_page()
649 * refcnt == 1 means page_pool owns page, and can recycle it. in __page_pool_put_page()
651 * page is NOT reusable when allocated when system is under in __page_pool_put_page()
654 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) { in __page_pool_put_page()
658 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
662 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
665 /* Page found as candidate for recycling */ in __page_pool_put_page()
666 return page; in __page_pool_put_page()
670 * Many drivers split up the page into fragments, and some in __page_pool_put_page()
682 page_pool_return_page(pool, page); in __page_pool_put_page()
687 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, in page_pool_put_unrefed_page() argument
690 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_unrefed_page()
691 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_unrefed_page()
694 page_pool_return_page(pool, page); in page_pool_put_unrefed_page()
702 * @data: array holding page pointers
707 * will release leftover pages to the page allocator.
721 struct page *page = virt_to_head_page(data[i]); in page_pool_put_page_bulk() local
723 /* It is not the last user for the page frag case */ in page_pool_put_page_bulk()
724 if (!page_pool_is_last_ref(page)) in page_pool_put_page_bulk()
727 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
729 if (page) in page_pool_put_page_bulk()
730 data[bulk_len++] = page; in page_pool_put_page_bulk()
760 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
761 struct page *page) in page_pool_drain_frag()
765 /* Some user is still using the page frag */ in page_pool_drain_frag()
766 if (likely(page_pool_unref_page(page, drain_count))) in page_pool_drain_frag()
769 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) { in page_pool_drain_frag()
771 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
773 return page; in page_pool_drain_frag()
776 page_pool_return_page(pool, page); in page_pool_drain_frag()
783 struct page *page = pool->frag_page; in page_pool_free_frag() local
787 if (!page || page_pool_unref_page(page, drain_count)) in page_pool_free_frag()
790 page_pool_return_page(pool, page); in page_pool_free_frag()
793 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag()
798 struct page *page = pool->frag_page; in page_pool_alloc_frag() local
806 if (page && *offset + size > max_size) { in page_pool_alloc_frag()
807 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
808 if (page) { in page_pool_alloc_frag()
814 if (!page) { in page_pool_alloc_frag()
815 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
816 if (unlikely(!page)) { in page_pool_alloc_frag()
821 pool->frag_page = page; in page_pool_alloc_frag()
827 page_pool_fragment_page(page, BIAS_MAX); in page_pool_alloc_frag()
828 return page; in page_pool_alloc_frag()
834 return page; in page_pool_alloc_frag()
840 struct page *page; in page_pool_empty_ring() local
843 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
845 if (!(page_ref_count(page) == 1)) in page_pool_empty_ring()
847 __func__, page_ref_count(page)); in page_pool_empty_ring()
849 page_pool_return_page(pool, page); in page_pool_empty_ring()
865 struct page *page; in page_pool_empty_alloc_cache_once() local
875 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
876 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
914 /* Periodic warning for page pools the user can't see */ in page_pool_release_retry()
978 struct page *page; in page_pool_update_nid() local
985 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
986 page_pool_return_page(pool, page); in page_pool_update_nid()