Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
10 #include <linux/dma-map-ops.h>
21 * override the variable below for dma-direct to work properly.
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask()
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask()
50 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask()
51 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
75 return dma_addr + size - 1 <= in dma_coherent_ok()
76 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok()
79 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
83 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_decrypted()
86 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
92 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_encrypted()
94 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); in dma_set_encrypted()
98 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
99 size_t size) in __dma_direct_free_pages() argument
101 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
103 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
106 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument
108 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb() local
110 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
111 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
115 return page; in dma_direct_alloc_swiotlb()
118 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
122 struct page *page = NULL; in __dma_direct_alloc_pages() local
125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages()
128 return dma_direct_alloc_swiotlb(dev, size); in __dma_direct_alloc_pages()
131 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
132 if (page) { in __dma_direct_alloc_pages()
133 if (!dma_coherent_ok(dev, page_to_phys(page), size) || in __dma_direct_alloc_pages()
134 (!allow_highmem && PageHighMem(page))) { in __dma_direct_alloc_pages()
135 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
136 page = NULL; in __dma_direct_alloc_pages()
140 if (!page) in __dma_direct_alloc_pages()
141 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
142 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
143 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
144 page = NULL; in __dma_direct_alloc_pages()
159 return page; in __dma_direct_alloc_pages()
171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
174 struct page *page; in dma_direct_alloc_from_pool() local
182 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
183 if (!page) in dma_direct_alloc_from_pool()
185 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
189 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, in dma_direct_alloc_no_mapping() argument
192 struct page *page; in dma_direct_alloc_no_mapping() local
194 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
195 if (!page) in dma_direct_alloc_no_mapping()
199 if (!PageHighMem(page)) in dma_direct_alloc_no_mapping()
200 arch_dma_prep_coherent(page, size); in dma_direct_alloc_no_mapping()
202 /* return the page pointer as the opaque cookie */ in dma_direct_alloc_no_mapping()
203 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_no_mapping()
204 return page; in dma_direct_alloc_no_mapping()
207 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
211 struct page *page; in dma_direct_alloc() local
214 size = PAGE_ALIGN(size); in dma_direct_alloc()
220 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); in dma_direct_alloc()
225 return arch_dma_alloc(dev, size, dma_handle, gfp, in dma_direct_alloc()
230 * non-coherent devices. in dma_direct_alloc()
233 return dma_alloc_from_global_coherent(dev, size, in dma_direct_alloc()
255 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
258 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
259 if (!page) in dma_direct_alloc()
264 * combination the cma= arguments and per-arch setup. These need to be in dma_direct_alloc()
267 if (PageHighMem(page)) { in dma_direct_alloc()
279 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
282 ret = dma_common_contiguous_remap(page, size, prot, in dma_direct_alloc()
287 ret = page_address(page); in dma_direct_alloc()
288 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc()
292 memset(ret, 0, size); in dma_direct_alloc()
295 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
296 ret = arch_dma_set_uncached(ret, size); in dma_direct_alloc()
301 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
305 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
308 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
312 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
315 unsigned int page_order = get_order(size); in dma_direct_free()
319 /* cpu_addr is a struct page cookie, not a kernel address */ in dma_direct_free()
320 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
327 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
340 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
347 arch_dma_clear_uncached(cpu_addr, size); in dma_direct_free()
348 if (dma_set_encrypted(dev, cpu_addr, size)) in dma_direct_free()
352 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
355 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
358 struct page *page; in dma_direct_alloc_pages() local
362 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
364 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
365 if (!page) in dma_direct_alloc_pages()
368 ret = page_address(page); in dma_direct_alloc_pages()
369 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc_pages()
371 memset(ret, 0, size); in dma_direct_alloc_pages()
372 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
373 return page; in dma_direct_alloc_pages()
375 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc_pages()
379 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
380 struct page *page, dma_addr_t dma_addr, in dma_direct_free_pages() argument
383 void *vaddr = page_address(page); in dma_direct_free_pages()
387 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
390 if (dma_set_encrypted(dev, vaddr, size)) in dma_direct_free_pages()
392 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
407 swiotlb_sync_single_for_device(dev, paddr, sg->length, in dma_direct_sync_sg_for_device()
411 arch_sync_dma_for_device(paddr, sg->length, in dma_direct_sync_sg_for_device()
430 arch_sync_dma_for_cpu(paddr, sg->length, dir); in dma_direct_sync_sg_for_cpu()
433 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, in dma_direct_sync_sg_for_cpu()
437 arch_dma_mark_clean(paddr, sg->length); in dma_direct_sync_sg_for_cpu()
458 dma_direct_unmap_page(dev, sg->dma_address, in dma_direct_unmap_sg()
487 ret = -EREMOTEIO; in dma_direct_map_sg()
492 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), in dma_direct_map_sg()
493 sg->offset, sg->length, dir, attrs); in dma_direct_map_sg()
494 if (sg->dma_address == DMA_MAPPING_ERROR) { in dma_direct_map_sg()
495 ret = -EIO; in dma_direct_map_sg()
498 sg_dma_len(sg) = sg->length; in dma_direct_map_sg()
509 size_t size, enum dma_data_direction dir, unsigned long attrs) in dma_direct_map_resource() argument
513 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
516 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
525 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_get_sgtable() argument
528 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable() local
533 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_direct_get_sgtable()
544 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_mmap() argument
548 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_direct_mmap()
550 int ret = -ENXIO; in dma_direct_mmap()
552 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
554 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); in dma_direct_mmap()
556 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
558 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) in dma_direct_mmap()
561 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) in dma_direct_mmap()
562 return -ENXIO; in dma_direct_mmap()
563 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in dma_direct_mmap()
564 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_direct_mmap()
569 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; in dma_direct_supported()
572 * Because 32-bit DMA masks are so common we expect every architecture in dma_direct_supported()
573 * to be able to satisfy them - either by not supporting more physical in dma_direct_supported()
604 for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { in check_ram_in_range_map()
605 unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start); in check_ram_in_range_map()
608 start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) { in check_ram_in_range_map()
616 start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); in check_ram_in_range_map()
624 if (!dev->dma_range_map) in dma_direct_all_ram_mapped()
632 /* If SWIOTLB is active, use its maximum mapping size */ in dma_direct_max_mapping_size()
646 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
650 * @size: size of the region.
653 * be discovered by "dma-ranges".
655 * It returns -ENOMEM if out of memory, -EINVAL if a map
662 dma_addr_t dma_start, u64 size) in dma_direct_set_offset() argument
665 u64 offset = (u64)cpu_start - (u64)dma_start; in dma_direct_set_offset()
667 if (dev->dma_range_map) { in dma_direct_set_offset()
669 return -EINVAL; in dma_direct_set_offset()
677 return -ENOMEM; in dma_direct_set_offset()
680 map[0].size = size; in dma_direct_set_offset()
681 dev->dma_range_map = map; in dma_direct_set_offset()