Lines Matching +full:dma +full:- +full:safe +full:- +full:map

1 /* SPDX-License-Identifier: GPL-2.0
12 * uses one-frame-per-page, but have fallbacks that act like the
19 * API keeps track of in-flight pages, in-order to let API user know
20 * when it is safe to dealloactor page_pool object. Thus, API users
27 * will release the DMA mapping and in-flight state accounting. We
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
38 * map/unmap
42 * DMA-synced-for-device according to
45 * Please note DMA-sync-for-CPU is still
54 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
58 * Keeping room for more objects, is due to XDP_DROP use-case. As
76 struct device *dev; /* device, for DMA pre-mapping purposes */
77 enum dma_data_direction dma_dir; /* DMA mapping direction */
78 unsigned int max_len; /* max DMA sync memory size */
79 unsigned int offset; /* DMA addr offset */
100 * RX-queue. As the RX-queue is already protected by
114 * effeciently, it a way that doesn't bounce cache-lines.
122 /* A page_pool is strictly tied to a single RX-queue being
140 /* get the stored dma direction. A driver might decide to treat this locally and
146 return pool->p.dma_dir; in page_pool_get_dma_dir()
173 /* Same as above but will try to sync the entire area pool->max_len */
177 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't in page_pool_put_full_page()
181 page_pool_put_page(pool, page, -1, allow_direct); in page_pool_put_full_page()
185 /* Same as above but the caller must guarantee safe context. e.g NAPI */
194 return page->dma_addr; in page_pool_get_dma_addr()
208 return refcount_dec_and_test(&pool->user_cnt); in page_pool_put()
211 /* Caller must provide appropriate safe context, e.g. NAPI. */
215 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()