Lines Matching defs:gfp
120 gfp_t gfp, bool allow_highmem)
131 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
132 page = dma_alloc_contiguous(dev, size, gfp);
142 page = alloc_pages_node(node, gfp, get_order(size));
149 !(gfp & (GFP_DMA32 | GFP_DMA))) {
150 gfp |= GFP_DMA32;
154 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
155 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
165 * pools for the given device/gfp.
167 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
169 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
173 dma_addr_t *dma_handle, gfp_t gfp)
182 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
183 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
191 dma_addr_t *dma_handle, gfp_t gfp)
195 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
209 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
217 gfp |= __GFP_NOWARN;
221 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
226 return arch_dma_alloc(dev, size, dma_handle, gfp,
255 dma_direct_use_pool(dev, gfp))
256 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
259 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
359 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
364 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
365 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
367 page = __dma_direct_alloc_pages(dev, size, gfp, false);