Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
10 #include <linux/dma-map-ops.h>
21 * override the variable below for dma-direct to work properly.
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask()
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask()
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
74 return dma_addr + size - 1 <= in dma_coherent_ok()
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok()
78 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
82 struct page *page = NULL; in __dma_direct_alloc_pages() local
85 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages()
87 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in __dma_direct_alloc_pages()
89 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
90 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
91 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
92 page = NULL; in __dma_direct_alloc_pages()
95 if (!page) in __dma_direct_alloc_pages()
96 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
97 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
98 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
99 page = NULL; in __dma_direct_alloc_pages()
114 return page; in __dma_direct_alloc_pages()
117 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
120 struct page *page; in dma_direct_alloc_from_pool() local
124 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in dma_direct_alloc_from_pool()
126 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
127 if (!page) in dma_direct_alloc_from_pool()
129 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
133 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
136 struct page *page; in dma_direct_alloc() local
140 size = PAGE_ALIGN(size); in dma_direct_alloc()
146 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); in dma_direct_alloc()
147 if (!page) in dma_direct_alloc()
150 if (!PageHighMem(page)) in dma_direct_alloc()
151 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
152 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
153 /* return the page pointer as the opaque cookie */ in dma_direct_alloc()
154 return page; in dma_direct_alloc()
160 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); in dma_direct_alloc()
170 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
173 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); in dma_direct_alloc()
174 if (!page) in dma_direct_alloc()
179 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { in dma_direct_alloc()
181 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
184 ret = dma_common_contiguous_remap(page, size, in dma_direct_alloc()
191 1 << get_order(size)); in dma_direct_alloc()
195 memset(ret, 0, size); in dma_direct_alloc()
199 if (PageHighMem(page)) { in dma_direct_alloc()
201 * Depending on the cma= arguments and per-arch setup in dma_direct_alloc()
206 dev_info(dev, "Rejecting highmem page from CMA.\n"); in dma_direct_alloc()
210 ret = page_address(page); in dma_direct_alloc()
213 1 << get_order(size)); in dma_direct_alloc()
218 memset(ret, 0, size); in dma_direct_alloc()
222 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
223 ret = arch_dma_set_uncached(ret, size); in dma_direct_alloc()
228 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
233 err = set_memory_encrypted((unsigned long)page_address(page), in dma_direct_alloc()
234 1 << get_order(size)); in dma_direct_alloc()
235 /* If memory cannot be re-encrypted, it must be leaked */ in dma_direct_alloc()
240 dma_free_contiguous(dev, page, size); in dma_direct_alloc()
244 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
247 unsigned int page_order = get_order(size); in dma_direct_free()
251 /* cpu_addr is a struct page cookie, not a kernel address */ in dma_direct_free()
252 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
259 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
265 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
274 arch_dma_clear_uncached(cpu_addr, size); in dma_direct_free()
276 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
279 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
282 struct page *page; in dma_direct_alloc_pages() local
287 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
289 page = __dma_direct_alloc_pages(dev, size, gfp); in dma_direct_alloc_pages()
290 if (!page) in dma_direct_alloc_pages()
292 if (PageHighMem(page)) { in dma_direct_alloc_pages()
294 * Depending on the cma= arguments and per-arch setup in dma_direct_alloc_pages()
299 dev_info(dev, "Rejecting highmem page from CMA.\n"); in dma_direct_alloc_pages()
303 ret = page_address(page); in dma_direct_alloc_pages()
306 1 << get_order(size))) in dma_direct_alloc_pages()
309 memset(ret, 0, size); in dma_direct_alloc_pages()
310 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
311 return page; in dma_direct_alloc_pages()
313 dma_free_contiguous(dev, page, size); in dma_direct_alloc_pages()
317 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
318 struct page *page, dma_addr_t dma_addr, in dma_direct_free_pages() argument
321 unsigned int page_order = get_order(size); in dma_direct_free_pages()
322 void *vaddr = page_address(page); in dma_direct_free_pages()
326 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
332 dma_free_contiguous(dev, page, size); in dma_direct_free_pages()
347 swiotlb_tbl_sync_single(dev, paddr, sg->length, in dma_direct_sync_sg_for_device()
351 arch_sync_dma_for_device(paddr, sg->length, in dma_direct_sync_sg_for_device()
370 arch_sync_dma_for_cpu(paddr, sg->length, dir); in dma_direct_sync_sg_for_cpu()
373 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, in dma_direct_sync_sg_for_cpu()
377 arch_dma_mark_clean(paddr, sg->length); in dma_direct_sync_sg_for_cpu()
391 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, in dma_direct_unmap_sg()
403 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), in dma_direct_map_sg()
404 sg->offset, sg->length, dir, attrs); in dma_direct_map_sg()
405 if (sg->dma_address == DMA_MAPPING_ERROR) in dma_direct_map_sg()
407 sg_dma_len(sg) = sg->length; in dma_direct_map_sg()
418 size_t size, enum dma_data_direction dir, unsigned long attrs) in dma_direct_map_resource() argument
422 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
425 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
434 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_get_sgtable() argument
437 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable() local
442 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_direct_get_sgtable()
453 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_mmap() argument
457 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_direct_mmap()
459 int ret = -ENXIO; in dma_direct_mmap()
461 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
463 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
466 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) in dma_direct_mmap()
467 return -ENXIO; in dma_direct_mmap()
468 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in dma_direct_mmap()
469 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_direct_mmap()
474 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; in dma_direct_supported()
477 * Because 32-bit DMA masks are so common we expect every architecture in dma_direct_supported()
478 * to be able to satisfy them - either by not supporting more physical in dma_direct_supported()
497 /* If SWIOTLB is active, use its maximum mapping size */ in dma_direct_max_mapping_size()
511 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
515 * @size: size of the region.
518 * be discovered by "dma-ranges".
520 * It returns -ENOMEM if out of memory, -EINVAL if a map
527 dma_addr_t dma_start, u64 size) in dma_direct_set_offset() argument
530 u64 offset = (u64)cpu_start - (u64)dma_start; in dma_direct_set_offset()
532 if (dev->dma_range_map) { in dma_direct_set_offset()
534 return -EINVAL; in dma_direct_set_offset()
542 return -ENOMEM; in dma_direct_set_offset()
546 map[0].size = size; in dma_direct_set_offset()
547 dev->dma_range_map = map; in dma_direct_set_offset()