1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
3 
4 #include <linux/kmemcheck.h>
5 #include <linux/scatterlist.h>
6 #include <linux/dma-debug.h>
7 #include <linux/dma-attrs.h>
8 
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)9 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10 					      size_t size,
11 					      enum dma_data_direction dir,
12 					      struct dma_attrs *attrs)
13 {
14 	struct dma_map_ops *ops = get_dma_ops(dev);
15 	dma_addr_t addr;
16 
17 	kmemcheck_mark_initialized(ptr, size);
18 	BUG_ON(!valid_dma_direction(dir));
19 	addr = ops->map_page(dev, virt_to_page(ptr),
20 			     (unsigned long)ptr & ~PAGE_MASK, size,
21 			     dir, attrs);
22 	debug_dma_map_page(dev, virt_to_page(ptr),
23 			   (unsigned long)ptr & ~PAGE_MASK, size,
24 			   dir, addr, true);
25 	return addr;
26 }
27 
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)28 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
29 					  size_t size,
30 					  enum dma_data_direction dir,
31 					  struct dma_attrs *attrs)
32 {
33 	struct dma_map_ops *ops = get_dma_ops(dev);
34 
35 	BUG_ON(!valid_dma_direction(dir));
36 	if (ops->unmap_page)
37 		ops->unmap_page(dev, addr, size, dir, attrs);
38 	debug_dma_unmap_page(dev, addr, size, dir, true);
39 }
40 
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)41 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
42 				   int nents, enum dma_data_direction dir,
43 				   struct dma_attrs *attrs)
44 {
45 	struct dma_map_ops *ops = get_dma_ops(dev);
46 	int i, ents;
47 	struct scatterlist *s;
48 
49 	for_each_sg(sg, s, nents, i)
50 		kmemcheck_mark_initialized(sg_virt(s), s->length);
51 	BUG_ON(!valid_dma_direction(dir));
52 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
53 	debug_dma_map_sg(dev, sg, nents, ents, dir);
54 
55 	return ents;
56 }
57 
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)58 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
59 				      int nents, enum dma_data_direction dir,
60 				      struct dma_attrs *attrs)
61 {
62 	struct dma_map_ops *ops = get_dma_ops(dev);
63 
64 	BUG_ON(!valid_dma_direction(dir));
65 	debug_dma_unmap_sg(dev, sg, nents, dir);
66 	if (ops->unmap_sg)
67 		ops->unmap_sg(dev, sg, nents, dir, attrs);
68 }
69 
dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir)70 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
71 				      size_t offset, size_t size,
72 				      enum dma_data_direction dir)
73 {
74 	struct dma_map_ops *ops = get_dma_ops(dev);
75 	dma_addr_t addr;
76 
77 	kmemcheck_mark_initialized(page_address(page) + offset, size);
78 	BUG_ON(!valid_dma_direction(dir));
79 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
80 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
81 
82 	return addr;
83 }
84 
dma_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)85 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
86 				  size_t size, enum dma_data_direction dir)
87 {
88 	struct dma_map_ops *ops = get_dma_ops(dev);
89 
90 	BUG_ON(!valid_dma_direction(dir));
91 	if (ops->unmap_page)
92 		ops->unmap_page(dev, addr, size, dir, NULL);
93 	debug_dma_unmap_page(dev, addr, size, dir, false);
94 }
95 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)96 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
97 					   size_t size,
98 					   enum dma_data_direction dir)
99 {
100 	struct dma_map_ops *ops = get_dma_ops(dev);
101 
102 	BUG_ON(!valid_dma_direction(dir));
103 	if (ops->sync_single_for_cpu)
104 		ops->sync_single_for_cpu(dev, addr, size, dir);
105 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106 }
107 
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)108 static inline void dma_sync_single_for_device(struct device *dev,
109 					      dma_addr_t addr, size_t size,
110 					      enum dma_data_direction dir)
111 {
112 	struct dma_map_ops *ops = get_dma_ops(dev);
113 
114 	BUG_ON(!valid_dma_direction(dir));
115 	if (ops->sync_single_for_device)
116 		ops->sync_single_for_device(dev, addr, size, dir);
117 	debug_dma_sync_single_for_device(dev, addr, size, dir);
118 }
119 
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)120 static inline void dma_sync_single_range_for_cpu(struct device *dev,
121 						 dma_addr_t addr,
122 						 unsigned long offset,
123 						 size_t size,
124 						 enum dma_data_direction dir)
125 {
126 	const struct dma_map_ops *ops = get_dma_ops(dev);
127 
128 	BUG_ON(!valid_dma_direction(dir));
129 	if (ops->sync_single_for_cpu)
130 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
131 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
132 }
133 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)134 static inline void dma_sync_single_range_for_device(struct device *dev,
135 						    dma_addr_t addr,
136 						    unsigned long offset,
137 						    size_t size,
138 						    enum dma_data_direction dir)
139 {
140 	const struct dma_map_ops *ops = get_dma_ops(dev);
141 
142 	BUG_ON(!valid_dma_direction(dir));
143 	if (ops->sync_single_for_device)
144 		ops->sync_single_for_device(dev, addr + offset, size, dir);
145 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
146 }
147 
148 static inline void
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)149 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
150 		    int nelems, enum dma_data_direction dir)
151 {
152 	struct dma_map_ops *ops = get_dma_ops(dev);
153 
154 	BUG_ON(!valid_dma_direction(dir));
155 	if (ops->sync_sg_for_cpu)
156 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
157 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
158 }
159 
160 static inline void
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)161 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
162 		       int nelems, enum dma_data_direction dir)
163 {
164 	struct dma_map_ops *ops = get_dma_ops(dev);
165 
166 	BUG_ON(!valid_dma_direction(dir));
167 	if (ops->sync_sg_for_device)
168 		ops->sync_sg_for_device(dev, sg, nelems, dir);
169 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
170 
171 }
172 
173 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
174 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
175 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
176 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
177 
178 #endif
179