1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <asm/bug.h>
15 
16 /*
17  * Generic direct DMA implementation
18  *
19  * This implementation supports a per-device offset that can be applied if
20  * the address at which memory is visible to devices is not 0. Platform code
21  * can set archdata.dma_data to an unsigned long holding the offset. By
22  * default the offset is PCI_DRAM_OFFSET.
23  */
24 
get_dma_direct_offset(struct device * dev)25 static unsigned long get_dma_direct_offset(struct device *dev)
26 {
27 	if (likely(dev))
28 		return (unsigned long)dev->archdata.dma_data;
29 
30 	return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
31 }
32 
33 #define NOT_COHERENT_CACHE
34 
dma_direct_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)35 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
36 				dma_addr_t *dma_handle, gfp_t flag)
37 {
38 #ifdef NOT_COHERENT_CACHE
39 	return consistent_alloc(flag, size, dma_handle);
40 #else
41 	void *ret;
42 	struct page *page;
43 	int node = dev_to_node(dev);
44 
45 	/* ignore region specifiers */
46 	flag  &= ~(__GFP_HIGHMEM);
47 
48 	page = alloc_pages_node(node, flag, get_order(size));
49 	if (page == NULL)
50 		return NULL;
51 	ret = page_address(page);
52 	memset(ret, 0, size);
53 	*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
54 
55 	return ret;
56 #endif
57 }
58 
dma_direct_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)59 static void dma_direct_free_coherent(struct device *dev, size_t size,
60 			      void *vaddr, dma_addr_t dma_handle)
61 {
62 #ifdef NOT_COHERENT_CACHE
63 	consistent_free(size, vaddr);
64 #else
65 	free_pages((unsigned long)vaddr, get_order(size));
66 #endif
67 }
68 
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)69 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
70 			     int nents, enum dma_data_direction direction,
71 			     struct dma_attrs *attrs)
72 {
73 	struct scatterlist *sg;
74 	int i;
75 
76 	/* FIXME this part of code is untested */
77 	for_each_sg(sgl, sg, nents, i) {
78 		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
79 		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
80 							sg->length, direction);
81 	}
82 
83 	return nents;
84 }
85 
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)86 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
87 				int nents, enum dma_data_direction direction,
88 				struct dma_attrs *attrs)
89 {
90 }
91 
dma_direct_dma_supported(struct device * dev,u64 mask)92 static int dma_direct_dma_supported(struct device *dev, u64 mask)
93 {
94 	return 1;
95 }
96 
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)97 static inline dma_addr_t dma_direct_map_page(struct device *dev,
98 					     struct page *page,
99 					     unsigned long offset,
100 					     size_t size,
101 					     enum dma_data_direction direction,
102 					     struct dma_attrs *attrs)
103 {
104 	__dma_sync(page_to_phys(page) + offset, size, direction);
105 	return page_to_phys(page) + offset + get_dma_direct_offset(dev);
106 }
107 
dma_direct_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)108 static inline void dma_direct_unmap_page(struct device *dev,
109 					 dma_addr_t dma_address,
110 					 size_t size,
111 					 enum dma_data_direction direction,
112 					 struct dma_attrs *attrs)
113 {
114 /* There is not necessary to do cache cleanup
115  *
116  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
117  * dma_address is physical address
118  */
119 	__dma_sync(dma_address, size, direction);
120 }
121 
122 static inline void
dma_direct_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)123 dma_direct_sync_single_for_cpu(struct device *dev,
124 			       dma_addr_t dma_handle, size_t size,
125 			       enum dma_data_direction direction)
126 {
127 	/*
128 	 * It's pointless to flush the cache as the memory segment
129 	 * is given to the CPU
130 	 */
131 
132 	if (direction == DMA_FROM_DEVICE)
133 		__dma_sync(dma_handle, size, direction);
134 }
135 
136 static inline void
dma_direct_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)137 dma_direct_sync_single_for_device(struct device *dev,
138 				  dma_addr_t dma_handle, size_t size,
139 				  enum dma_data_direction direction)
140 {
141 	/*
142 	 * It's pointless to invalidate the cache if the device isn't
143 	 * supposed to write to the relevant region
144 	 */
145 
146 	if (direction == DMA_TO_DEVICE)
147 		__dma_sync(dma_handle, size, direction);
148 }
149 
150 static inline void
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction)151 dma_direct_sync_sg_for_cpu(struct device *dev,
152 			   struct scatterlist *sgl, int nents,
153 			   enum dma_data_direction direction)
154 {
155 	struct scatterlist *sg;
156 	int i;
157 
158 	/* FIXME this part of code is untested */
159 	if (direction == DMA_FROM_DEVICE)
160 		for_each_sg(sgl, sg, nents, i)
161 			__dma_sync(sg->dma_address, sg->length, direction);
162 }
163 
164 static inline void
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction)165 dma_direct_sync_sg_for_device(struct device *dev,
166 			      struct scatterlist *sgl, int nents,
167 			      enum dma_data_direction direction)
168 {
169 	struct scatterlist *sg;
170 	int i;
171 
172 	/* FIXME this part of code is untested */
173 	if (direction == DMA_TO_DEVICE)
174 		for_each_sg(sgl, sg, nents, i)
175 			__dma_sync(sg->dma_address, sg->length, direction);
176 }
177 
178 struct dma_map_ops dma_direct_ops = {
179 	.alloc_coherent	= dma_direct_alloc_coherent,
180 	.free_coherent	= dma_direct_free_coherent,
181 	.map_sg		= dma_direct_map_sg,
182 	.unmap_sg	= dma_direct_unmap_sg,
183 	.dma_supported	= dma_direct_dma_supported,
184 	.map_page	= dma_direct_map_page,
185 	.unmap_page	= dma_direct_unmap_page,
186 	.sync_single_for_cpu		= dma_direct_sync_single_for_cpu,
187 	.sync_single_for_device		= dma_direct_sync_single_for_device,
188 	.sync_sg_for_cpu		= dma_direct_sync_sg_for_cpu,
189 	.sync_sg_for_device		= dma_direct_sync_sg_for_device,
190 };
191 EXPORT_SYMBOL(dma_direct_ops);
192 
193 /* Number of entries preallocated for DMA-API debugging */
194 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
195 
dma_init(void)196 static int __init dma_init(void)
197 {
198        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
199 
200        return 0;
201 }
202 fs_initcall(dma_init);
203