Lines Matching +full:dma +full:- +full:safe +full:- +full:map
1 /* SPDX-License-Identifier: GPL-2.0 */
9 #include <linux/dma-direction.h>
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
60 * at least read-only at lesser-privileged levels).
65 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
66 * be given to a device to use as a DMA source or target. It is specific to a
76 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
99 return -ENOMEM; in dma_mapping_error()
196 return -ENOMEM; in dma_mapping_error()
220 return -ENXIO; in dma_get_sgtable_attrs()
226 return -ENXIO; in dma_mmap_attrs()
238 return -EIO; in dma_set_mask()
242 return -EIO; in dma_set_coherent_mask()
274 /* DMA must never operate on areas that might be remapped. */ in dma_map_single_attrs()
276 "rejecting DMA map of vmalloc memory\n")) in dma_map_single_attrs()
304 * dma_map_sgtable - Map the given buffer for DMA
305 * @dev: The device for which to perform the DMA operation
307 * @dir: DMA direction
308 * @attrs: Optional DMA attributes for the map operation
311 * object for the @dir DMA operation by the @dev device. After success the
312 * ownership for the buffer is transferred to the DMA domain. One has to
317 * Returns 0 on success or -EINVAL on error during mapping the buffer.
324 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_map_sgtable()
326 return -EINVAL; in dma_map_sgtable()
327 sgt->nents = nents; in dma_map_sgtable()
332 * dma_unmap_sgtable - Unmap the given buffer for DMA
333 * @dev: The device for which to perform the DMA operation
335 * @dir: DMA direction
336 * @attrs: Optional DMA attributes for the unmap operation
339 * object for the @dir DMA operation by the @dev device. After this function
345 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_unmap_sgtable()
349 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
350 * @dev: The device for which to perform the DMA operation
352 * @dir: DMA direction
355 * buffer back to the CPU domain, so it is safe to perform any access to it
356 * by the CPU. Before doing any further DMA operations, one has to transfer
357 * the ownership of the buffer back to the DMA domain by calling the
363 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); in dma_sync_sgtable_for_cpu()
367 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
368 * @dev: The device for which to perform the DMA operation
370 * @dir: DMA direction
373 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
380 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); in dma_sync_sgtable_for_device()
409 if (dev->dma_mask && *dev->dma_mask) in dma_get_mask()
410 return *dev->dma_mask; in dma_get_mask()
415 * Set both the DMA mask and the coherent DMA mask to the same thing.
417 * as the DMA API guarantees that the coherent DMA mask can be set to
418 * the same or smaller than the streaming DMA mask.
430 * does not have dev->dma_mask appropriately setup.
434 dev->dma_mask = &dev->coherent_dma_mask; in dma_coerce_mask_and_coherent()
439 * dma_addressing_limited - return if the device is addressing limited
442 * Return %true if the devices DMA mask is too small to address all memory in
448 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < in dma_addressing_limited()
454 if (dev->dma_parms && dev->dma_parms->max_segment_size) in dma_get_max_seg_size()
455 return dev->dma_parms->max_segment_size; in dma_get_max_seg_size()
461 if (dev->dma_parms) { in dma_set_max_seg_size()
462 dev->dma_parms->max_segment_size = size; in dma_set_max_seg_size()
465 return -EIO; in dma_set_max_seg_size()
470 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) in dma_get_seg_boundary()
471 return dev->dma_parms->segment_boundary_mask; in dma_get_seg_boundary()
476 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
484 * non-DMA API callers.
496 if (dev->dma_parms) { in dma_set_seg_boundary()
497 dev->dma_parms->segment_boundary_mask = mask; in dma_set_seg_boundary()
500 return -EIO; in dma_set_seg_boundary()
548 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
549 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
550 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
551 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
562 * Legacy interface to set up the dma offset map. Drivers really should not