1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without using an IOMMU.
6 */
7 #ifndef _KERNEL_DMA_DIRECT_H
8 #define _KERNEL_DMA_DIRECT_H
9
10 #include <linux/dma-direct.h>
11 #include <linux/memremap.h>
12
13 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
14 void *cpu_addr, dma_addr_t dma_addr, size_t size,
15 unsigned long attrs);
16 bool dma_direct_can_mmap(struct device *dev);
17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
18 void *cpu_addr, dma_addr_t dma_addr, size_t size,
19 unsigned long attrs);
20 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
21 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22 enum dma_data_direction dir, unsigned long attrs);
23 bool dma_direct_all_ram_mapped(struct device *dev);
24 size_t dma_direct_max_mapping_size(struct device *dev);
25
26 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
27 defined(CONFIG_SWIOTLB)
28 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
29 int nents, enum dma_data_direction dir);
30 #else
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)31 static inline void dma_direct_sync_sg_for_device(struct device *dev,
32 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
33 {
34 }
35 #endif
36
37 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
38 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
39 defined(CONFIG_SWIOTLB)
40 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
41 int nents, enum dma_data_direction dir, unsigned long attrs);
42 void dma_direct_sync_sg_for_cpu(struct device *dev,
43 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
44 #else
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)45 static inline void dma_direct_unmap_sg(struct device *dev,
46 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
47 unsigned long attrs)
48 {
49 }
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)50 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
51 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
52 {
53 }
54 #endif
55
dma_direct_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)56 static inline void dma_direct_sync_single_for_device(struct device *dev,
57 dma_addr_t addr, size_t size, enum dma_data_direction dir)
58 {
59 phys_addr_t paddr = dma_to_phys(dev, addr);
60
61 if (unlikely(is_swiotlb_buffer(dev, paddr)))
62 swiotlb_sync_single_for_device(dev, paddr, size, dir);
63
64 if (!dev_is_dma_coherent(dev))
65 arch_sync_dma_for_device(paddr, size, dir);
66 }
67
dma_direct_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)68 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
69 dma_addr_t addr, size_t size, enum dma_data_direction dir)
70 {
71 phys_addr_t paddr = dma_to_phys(dev, addr);
72
73 if (!dev_is_dma_coherent(dev)) {
74 arch_sync_dma_for_cpu(paddr, size, dir);
75 arch_sync_dma_for_cpu_all();
76 }
77
78 if (unlikely(is_swiotlb_buffer(dev, paddr)))
79 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
80
81 if (dir == DMA_FROM_DEVICE)
82 arch_dma_mark_clean(paddr, size);
83 }
84
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)85 static inline dma_addr_t dma_direct_map_page(struct device *dev,
86 struct page *page, unsigned long offset, size_t size,
87 enum dma_data_direction dir, unsigned long attrs)
88 {
89 phys_addr_t phys = page_to_phys(page) + offset;
90 dma_addr_t dma_addr = phys_to_dma(dev, phys);
91
92 if (is_swiotlb_force_bounce(dev)) {
93 if (is_pci_p2pdma_page(page))
94 return DMA_MAPPING_ERROR;
95 return swiotlb_map(dev, phys, size, dir, attrs);
96 }
97
98 if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
99 dma_kmalloc_needs_bounce(dev, size, dir)) {
100 if (is_pci_p2pdma_page(page))
101 return DMA_MAPPING_ERROR;
102 if (is_swiotlb_active(dev))
103 return swiotlb_map(dev, phys, size, dir, attrs);
104
105 dev_WARN_ONCE(dev, 1,
106 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
107 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
108 return DMA_MAPPING_ERROR;
109 }
110
111 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
112 arch_sync_dma_for_device(phys, size, dir);
113 return dma_addr;
114 }
115
dma_direct_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)116 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
117 size_t size, enum dma_data_direction dir, unsigned long attrs)
118 {
119 phys_addr_t phys = dma_to_phys(dev, addr);
120
121 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
122 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
123
124 if (unlikely(is_swiotlb_buffer(dev, phys)))
125 swiotlb_tbl_unmap_single(dev, phys, size, dir,
126 attrs | DMA_ATTR_SKIP_CPU_SYNC);
127 }
128 #endif /* _KERNEL_DMA_DIRECT_H */
129