1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7 
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <asm/bug.h>
15 #include <asm/abs_addr.h>
16 #include <asm/machdep.h>
17 
18 /*
19  * Generic direct DMA implementation
20  *
21  * This implementation supports a per-device offset that can be applied if
22  * the address at which memory is visible to devices is not 0. Platform code
23  * can set archdata.dma_data to an unsigned long holding the offset. By
24  * default the offset is PCI_DRAM_OFFSET.
25  */
26 
27 
dma_direct_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)28 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
29 				dma_addr_t *dma_handle, gfp_t flag)
30 {
31 	void *ret;
32 #ifdef CONFIG_NOT_COHERENT_CACHE
33 	ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
34 	if (ret == NULL)
35 		return NULL;
36 	*dma_handle += get_dma_offset(dev);
37 	return ret;
38 #else
39 	struct page *page;
40 	int node = dev_to_node(dev);
41 
42 	/* ignore region specifiers */
43 	flag  &= ~(__GFP_HIGHMEM);
44 
45 	page = alloc_pages_node(node, flag, get_order(size));
46 	if (page == NULL)
47 		return NULL;
48 	ret = page_address(page);
49 	memset(ret, 0, size);
50 	*dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
51 
52 	return ret;
53 #endif
54 }
55 
dma_direct_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)56 void dma_direct_free_coherent(struct device *dev, size_t size,
57 			      void *vaddr, dma_addr_t dma_handle)
58 {
59 #ifdef CONFIG_NOT_COHERENT_CACHE
60 	__dma_free_coherent(size, vaddr);
61 #else
62 	free_pages((unsigned long)vaddr, get_order(size));
63 #endif
64 }
65 
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)66 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
67 			     int nents, enum dma_data_direction direction,
68 			     struct dma_attrs *attrs)
69 {
70 	struct scatterlist *sg;
71 	int i;
72 
73 	for_each_sg(sgl, sg, nents, i) {
74 		sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
75 		sg->dma_length = sg->length;
76 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
77 	}
78 
79 	return nents;
80 }
81 
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)82 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
83 				int nents, enum dma_data_direction direction,
84 				struct dma_attrs *attrs)
85 {
86 }
87 
dma_direct_dma_supported(struct device * dev,u64 mask)88 static int dma_direct_dma_supported(struct device *dev, u64 mask)
89 {
90 #ifdef CONFIG_PPC64
91 	/* Could be improved so platforms can set the limit in case
92 	 * they have limited DMA windows
93 	 */
94 	return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
95 #else
96 	return 1;
97 #endif
98 }
99 
dma_direct_get_required_mask(struct device * dev)100 static u64 dma_direct_get_required_mask(struct device *dev)
101 {
102 	u64 end, mask;
103 
104 	end = memblock_end_of_DRAM() + get_dma_offset(dev);
105 
106 	mask = 1ULL << (fls64(end) - 1);
107 	mask += mask - 1;
108 
109 	return mask;
110 }
111 
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)112 static inline dma_addr_t dma_direct_map_page(struct device *dev,
113 					     struct page *page,
114 					     unsigned long offset,
115 					     size_t size,
116 					     enum dma_data_direction dir,
117 					     struct dma_attrs *attrs)
118 {
119 	BUG_ON(dir == DMA_NONE);
120 	__dma_sync_page(page, offset, size, dir);
121 	return page_to_phys(page) + offset + get_dma_offset(dev);
122 }
123 
dma_direct_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)124 static inline void dma_direct_unmap_page(struct device *dev,
125 					 dma_addr_t dma_address,
126 					 size_t size,
127 					 enum dma_data_direction direction,
128 					 struct dma_attrs *attrs)
129 {
130 }
131 
132 #ifdef CONFIG_NOT_COHERENT_CACHE
dma_direct_sync_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction)133 static inline void dma_direct_sync_sg(struct device *dev,
134 		struct scatterlist *sgl, int nents,
135 		enum dma_data_direction direction)
136 {
137 	struct scatterlist *sg;
138 	int i;
139 
140 	for_each_sg(sgl, sg, nents, i)
141 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
142 }
143 
dma_direct_sync_single(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)144 static inline void dma_direct_sync_single(struct device *dev,
145 					  dma_addr_t dma_handle, size_t size,
146 					  enum dma_data_direction direction)
147 {
148 	__dma_sync(bus_to_virt(dma_handle), size, direction);
149 }
150 #endif
151 
152 struct dma_map_ops dma_direct_ops = {
153 	.alloc_coherent			= dma_direct_alloc_coherent,
154 	.free_coherent			= dma_direct_free_coherent,
155 	.map_sg				= dma_direct_map_sg,
156 	.unmap_sg			= dma_direct_unmap_sg,
157 	.dma_supported			= dma_direct_dma_supported,
158 	.map_page			= dma_direct_map_page,
159 	.unmap_page			= dma_direct_unmap_page,
160 	.get_required_mask		= dma_direct_get_required_mask,
161 #ifdef CONFIG_NOT_COHERENT_CACHE
162 	.sync_single_for_cpu 		= dma_direct_sync_single,
163 	.sync_single_for_device 	= dma_direct_sync_single,
164 	.sync_sg_for_cpu 		= dma_direct_sync_sg,
165 	.sync_sg_for_device 		= dma_direct_sync_sg,
166 #endif
167 };
168 EXPORT_SYMBOL(dma_direct_ops);
169 
170 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
171 
dma_set_mask(struct device * dev,u64 dma_mask)172 int dma_set_mask(struct device *dev, u64 dma_mask)
173 {
174 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
175 
176 	if (ppc_md.dma_set_mask)
177 		return ppc_md.dma_set_mask(dev, dma_mask);
178 	if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
179 		return dma_ops->set_dma_mask(dev, dma_mask);
180 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
181 		return -EIO;
182 	*dev->dma_mask = dma_mask;
183 	return 0;
184 }
185 EXPORT_SYMBOL(dma_set_mask);
186 
dma_get_required_mask(struct device * dev)187 u64 dma_get_required_mask(struct device *dev)
188 {
189 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
190 
191 	if (ppc_md.dma_get_required_mask)
192 		return ppc_md.dma_get_required_mask(dev);
193 
194 	if (unlikely(dma_ops == NULL))
195 		return 0;
196 
197 	if (dma_ops->get_required_mask)
198 		return dma_ops->get_required_mask(dev);
199 
200 	return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
201 }
202 EXPORT_SYMBOL_GPL(dma_get_required_mask);
203 
dma_init(void)204 static int __init dma_init(void)
205 {
206        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
207 
208        return 0;
209 }
210 fs_initcall(dma_init);
211 
dma_mmap_coherent(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t handle,size_t size)212 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
213 		      void *cpu_addr, dma_addr_t handle, size_t size)
214 {
215 	unsigned long pfn;
216 
217 #ifdef CONFIG_NOT_COHERENT_CACHE
218 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
219 	pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
220 #else
221 	pfn = page_to_pfn(virt_to_page(cpu_addr));
222 #endif
223 	return remap_pfn_range(vma, vma->vm_start,
224 			       pfn + vma->vm_pgoff,
225 			       vma->vm_end - vma->vm_start,
226 			       vma->vm_page_prot);
227 }
228 EXPORT_SYMBOL_GPL(dma_mmap_coherent);
229