Lines Matching +full:dma +full:- +full:coherent
1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
10 #include <linux/dma-map-ops.h>
20 * Managed DMA API
33 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
34 this->attrs); in dmam_release()
41 if (this->vaddr == match->vaddr) { in dmam_match()
42 WARN_ON(this->size != match->size || in dmam_match()
43 this->dma_handle != match->dma_handle); in dmam_match()
50 * dmam_free_coherent - Managed dma_free_coherent()
51 * @dev: Device to free coherent memory for
54 * @dma_handle: DMA handle of the memory to free
69 * dmam_alloc_attrs - Managed dma_alloc_attrs()
72 * @dma_handle: Out argument for allocated DMA handle
98 dr->vaddr = vaddr; in dmam_alloc_attrs()
99 dr->dma_handle = *dma_handle; in dmam_alloc_attrs()
100 dr->size = size; in dmam_alloc_attrs()
101 dr->attrs = attrs; in dmam_alloc_attrs()
115 if (dev->dma_ops_bypass) in dma_go_direct()
116 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
124 * Check if the devices uses a direct mapping for streaming DMA operations.
125 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
131 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
137 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
149 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_page_attrs()
155 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
170 else if (ops->unmap_page) in dma_unmap_page_attrs()
171 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
188 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_sg_attrs()
194 ents = ops->map_sg(dev, sg, nents, dir, attrs); in dma_map_sg_attrs()
212 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
213 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
225 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_resource()
234 else if (ops->map_resource) in dma_map_resource()
235 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
248 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
249 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
262 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
263 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
276 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
277 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
290 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
291 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
304 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
305 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
311 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
313 * coherent DMA APIs through the dma_buf API, which only accepts a
315 * 1. Not all memory allocated via the coherent DMA APIs is backed by
317 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
330 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
331 return -ENXIO; in dma_get_sgtable_attrs()
332 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
356 * dma_can_mmap - check if a given device supports dma_mmap_*
360 * map DMA allocations to userspace.
368 return ops->mmap != NULL; in dma_can_mmap()
373 * dma_mmap_attrs - map a coherent DMA allocation into user space
374 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
376 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
377 * @dma_addr: device-view address returned from dma_alloc_attrs
381 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
382 * space. The coherent DMA buffer must not be freed by the driver until the
394 if (!ops->mmap) in dma_mmap_attrs()
395 return -ENXIO; in dma_mmap_attrs()
396 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
406 if (ops->get_required_mask) in dma_get_required_mask()
407 return ops->get_required_mask(dev); in dma_get_required_mask()
410 * We require every DMA ops implementation to at least support a 32-bit in dma_get_required_mask()
411 * DMA mask (and use bounce buffering if that isn't supported in in dma_get_required_mask()
413 * actually report an optimal mask we default to 32-bit here as that in dma_get_required_mask()
427 WARN_ON_ONCE(!dev->coherent_dma_mask); in dma_alloc_attrs()
437 else if (ops->alloc) in dma_alloc_attrs()
438 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
455 * On non-coherent platforms which implement DMA-coherent buffers via in dma_free_attrs()
456 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting in dma_free_attrs()
459 * probably misusing the coherent API anyway. in dma_free_attrs()
469 else if (ops->free) in dma_free_attrs()
470 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
480 if (WARN_ON_ONCE(!dev->coherent_dma_mask)) in dma_alloc_pages()
488 else if (ops->alloc_pages) in dma_alloc_pages()
489 page = ops->alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
509 else if (ops->free_pages) in dma_free_pages()
510 ops->free_pages(dev, size, page, dma_handle, dir); in dma_free_pages()
520 if (!ops || !ops->alloc_noncoherent) { in dma_alloc_noncoherent()
530 vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp); in dma_alloc_noncoherent()
543 if (!ops || !ops->free_noncoherent) { in dma_free_noncoherent()
550 ops->free_noncoherent(dev, size, vaddr, dma_handle, dir); in dma_free_noncoherent()
559 * ->dma_supported sets the bypass flag, so we must always call in dma_supported()
564 if (!ops->dma_supported) in dma_supported()
566 return ops->dma_supported(dev, mask); in dma_supported()
584 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
585 return -EIO; in dma_set_mask()
588 *dev->dma_mask = mask; in dma_set_mask()
603 return -EIO; in dma_set_coherent_mask()
605 dev->coherent_dma_mask = mask; in dma_set_coherent_mask()
618 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
619 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
631 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
639 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
642 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()