1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
21 
22 /*
23  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25  * used to manage the buffer memory and implement the IIO buffer operations
26  * while the DMAengine framework is used to perform the DMA transfers. Combined
27  * this results in a device independent fully functional DMA buffer
28  * implementation that can be used by device drivers for peripherals which are
29  * connected to a DMA controller which has a DMAengine driver implementation.
30  */
31 
32 struct dmaengine_buffer {
33 	struct iio_dma_buffer_queue queue;
34 
35 	struct dma_chan *chan;
36 	struct list_head active;
37 
38 	size_t align;
39 	size_t max_size;
40 };
41 
42 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 		struct iio_buffer *buffer)
44 {
45 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46 }
47 
48 static void iio_dmaengine_buffer_block_done(void *data,
49 		const struct dmaengine_result *result)
50 {
51 	struct iio_dma_buffer_block *block = data;
52 	unsigned long flags;
53 
54 	spin_lock_irqsave(&block->queue->list_lock, flags);
55 	list_del(&block->head);
56 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
57 	block->bytes_used -= result->residue;
58 	iio_dma_buffer_block_done(block);
59 }
60 
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62 	struct iio_dma_buffer_block *block)
63 {
64 	struct dmaengine_buffer *dmaengine_buffer =
65 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
66 	struct dma_async_tx_descriptor *desc;
67 	enum dma_transfer_direction dma_dir;
68 	struct scatterlist *sgl;
69 	struct dma_vec *vecs;
70 	size_t max_size;
71 	dma_cookie_t cookie;
72 	size_t len_total;
73 	unsigned int i;
74 	int nents;
75 
76 	max_size = min(block->size, dmaengine_buffer->max_size);
77 	max_size = round_down(max_size, dmaengine_buffer->align);
78 
79 	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
80 		dma_dir = DMA_DEV_TO_MEM;
81 	else
82 		dma_dir = DMA_MEM_TO_DEV;
83 
84 	if (block->sg_table) {
85 		sgl = block->sg_table->sgl;
86 		nents = sg_nents_for_len(sgl, block->bytes_used);
87 		if (nents < 0)
88 			return nents;
89 
90 		vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
91 		if (!vecs)
92 			return -ENOMEM;
93 
94 		len_total = block->bytes_used;
95 
96 		for (i = 0; i < nents; i++) {
97 			vecs[i].addr = sg_dma_address(sgl);
98 			vecs[i].len = min(sg_dma_len(sgl), len_total);
99 			len_total -= vecs[i].len;
100 
101 			sgl = sg_next(sgl);
102 		}
103 
104 		desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
105 							 vecs, nents, dma_dir,
106 							 DMA_PREP_INTERRUPT);
107 		kfree(vecs);
108 	} else {
109 		max_size = min(block->size, dmaengine_buffer->max_size);
110 		max_size = round_down(max_size, dmaengine_buffer->align);
111 
112 		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
113 			block->bytes_used = max_size;
114 
115 		if (!block->bytes_used || block->bytes_used > max_size)
116 			return -EINVAL;
117 
118 		desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
119 						   block->phys_addr,
120 						   block->bytes_used,
121 						   dma_dir,
122 						   DMA_PREP_INTERRUPT);
123 	}
124 	if (!desc)
125 		return -ENOMEM;
126 
127 	desc->callback_result = iio_dmaengine_buffer_block_done;
128 	desc->callback_param = block;
129 
130 	cookie = dmaengine_submit(desc);
131 	if (dma_submit_error(cookie))
132 		return dma_submit_error(cookie);
133 
134 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
135 	list_add_tail(&block->head, &dmaengine_buffer->active);
136 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
137 
138 	dma_async_issue_pending(dmaengine_buffer->chan);
139 
140 	return 0;
141 }
142 
143 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
144 {
145 	struct dmaengine_buffer *dmaengine_buffer =
146 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
147 
148 	dmaengine_terminate_sync(dmaengine_buffer->chan);
149 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
150 }
151 
152 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
153 {
154 	struct dmaengine_buffer *dmaengine_buffer =
155 		iio_buffer_to_dmaengine_buffer(buf);
156 
157 	iio_dma_buffer_release(&dmaengine_buffer->queue);
158 	kfree(dmaengine_buffer);
159 }
160 
161 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
162 	.read = iio_dma_buffer_read,
163 	.write = iio_dma_buffer_write,
164 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
165 	.set_length = iio_dma_buffer_set_length,
166 	.request_update = iio_dma_buffer_request_update,
167 	.enable = iio_dma_buffer_enable,
168 	.disable = iio_dma_buffer_disable,
169 	.data_available = iio_dma_buffer_usage,
170 	.space_available = iio_dma_buffer_usage,
171 	.release = iio_dmaengine_buffer_release,
172 
173 	.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
174 	.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
175 	.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
176 
177 	.lock_queue = iio_dma_buffer_lock_queue,
178 	.unlock_queue = iio_dma_buffer_unlock_queue,
179 
180 	.modes = INDIO_BUFFER_HARDWARE,
181 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
182 };
183 
184 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
185 	.submit = iio_dmaengine_buffer_submit_block,
186 	.abort = iio_dmaengine_buffer_abort,
187 };
188 
189 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
190 	struct device_attribute *attr, char *buf)
191 {
192 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
193 	struct dmaengine_buffer *dmaengine_buffer =
194 		iio_buffer_to_dmaengine_buffer(buffer);
195 
196 	return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
197 }
198 
199 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
200 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
201 
202 static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
203 	&iio_dev_attr_length_align_bytes,
204 	NULL,
205 };
206 
207 /**
208  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
209  * @chan: DMA channel.
210  *
211  * This allocates a new IIO buffer which internally uses the DMAengine framework
212  * to perform its transfers.
213  *
214  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
215  * release it.
216  */
217 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
218 {
219 	struct dmaengine_buffer *dmaengine_buffer;
220 	unsigned int width, src_width, dest_width;
221 	struct dma_slave_caps caps;
222 	int ret;
223 
224 	ret = dma_get_slave_caps(chan, &caps);
225 	if (ret < 0)
226 		return ERR_PTR(ret);
227 
228 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
229 	if (!dmaengine_buffer)
230 		return ERR_PTR(-ENOMEM);
231 
232 	/* Needs to be aligned to the maximum of the minimums */
233 	if (caps.src_addr_widths)
234 		src_width = __ffs(caps.src_addr_widths);
235 	else
236 		src_width = 1;
237 	if (caps.dst_addr_widths)
238 		dest_width = __ffs(caps.dst_addr_widths);
239 	else
240 		dest_width = 1;
241 	width = max(src_width, dest_width);
242 
243 	INIT_LIST_HEAD(&dmaengine_buffer->active);
244 	dmaengine_buffer->chan = chan;
245 	dmaengine_buffer->align = width;
246 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
247 
248 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
249 		&iio_dmaengine_default_ops);
250 
251 	dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
252 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
253 
254 	return &dmaengine_buffer->queue.buffer;
255 }
256 
257 /**
258  * iio_dmaengine_buffer_free() - Free dmaengine buffer
259  * @buffer: Buffer to free
260  *
261  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
262  */
263 static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
264 {
265 	struct dmaengine_buffer *dmaengine_buffer =
266 		iio_buffer_to_dmaengine_buffer(buffer);
267 
268 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
269 	iio_buffer_put(buffer);
270 }
271 
272 /**
273  * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
274  * @buffer: Buffer to free
275  *
276  * Releases the DMA channel and frees the buffer previously setup with
277  * iio_dmaengine_buffer_setup_ext().
278  */
279 void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
280 {
281 	struct dmaengine_buffer *dmaengine_buffer =
282 		iio_buffer_to_dmaengine_buffer(buffer);
283 	struct dma_chan *chan = dmaengine_buffer->chan;
284 
285 	iio_dmaengine_buffer_free(buffer);
286 	dma_release_channel(chan);
287 }
288 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
289 
290 static struct iio_buffer
291 *__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
292 				  struct dma_chan *chan,
293 				  enum iio_buffer_direction dir)
294 {
295 	struct iio_buffer *buffer;
296 	int ret;
297 
298 	buffer = iio_dmaengine_buffer_alloc(chan);
299 	if (IS_ERR(buffer))
300 		return ERR_CAST(buffer);
301 
302 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
303 
304 	buffer->direction = dir;
305 
306 	ret = iio_device_attach_buffer(indio_dev, buffer);
307 	if (ret) {
308 		iio_dmaengine_buffer_free(buffer);
309 		return ERR_PTR(ret);
310 	}
311 
312 	return buffer;
313 }
314 
315 /**
316  * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
317  * @dev: DMA channel consumer device
318  * @indio_dev: IIO device to which to attach this buffer.
319  * @channel: DMA channel name, typically "rx".
320  * @dir: Direction of buffer (in or out)
321  *
322  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
323  * and attaches it to an IIO device with iio_device_attach_buffer().
324  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
325  * IIO device.
326  *
327  * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
328  * release it.
329  */
330 struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
331 						  struct iio_dev *indio_dev,
332 						  const char *channel,
333 						  enum iio_buffer_direction dir)
334 {
335 	struct dma_chan *chan;
336 	struct iio_buffer *buffer;
337 
338 	chan = dma_request_chan(dev, channel);
339 	if (IS_ERR(chan))
340 		return ERR_CAST(chan);
341 
342 	buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
343 	if (IS_ERR(buffer))
344 		dma_release_channel(chan);
345 
346 	return buffer;
347 }
348 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
349 
350 static void devm_iio_dmaengine_buffer_teardown(void *buffer)
351 {
352 	iio_dmaengine_buffer_teardown(buffer);
353 }
354 
355 /**
356  * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
357  * @dev: Device for devm ownership and DMA channel consumer device
358  * @indio_dev: IIO device to which to attach this buffer.
359  * @channel: DMA channel name, typically "rx".
360  * @dir: Direction of buffer (in or out)
361  *
362  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
363  * and attaches it to an IIO device with iio_device_attach_buffer().
364  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
365  * IIO device.
366  */
367 int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
368 					struct iio_dev *indio_dev,
369 					const char *channel,
370 					enum iio_buffer_direction dir)
371 {
372 	struct iio_buffer *buffer;
373 
374 	buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
375 	if (IS_ERR(buffer))
376 		return PTR_ERR(buffer);
377 
378 	return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
379 					buffer);
380 }
381 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
382 
383 static void devm_iio_dmaengine_buffer_free(void *buffer)
384 {
385 	iio_dmaengine_buffer_free(buffer);
386 }
387 
388 /**
389  * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an
390  *						   IIO device
391  * @dev: Device for devm ownership
392  * @indio_dev: IIO device to which to attach this buffer.
393  * @chan: DMA channel
394  * @dir: Direction of buffer (in or out)
395  *
396  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
397  * and attaches it to an IIO device with iio_device_attach_buffer().
398  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
399  * IIO device.
400  *
401  * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
402  * caller manages requesting and releasing the DMA channel handle.
403  */
404 int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
405 						struct iio_dev *indio_dev,
406 						struct dma_chan *chan,
407 						enum iio_buffer_direction dir)
408 {
409 	struct iio_buffer *buffer;
410 
411 	buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
412 	if (IS_ERR(buffer))
413 		return PTR_ERR(buffer);
414 
415 	return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free,
416 					buffer);
417 }
418 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle,
419 		     "IIO_DMAENGINE_BUFFER");
420 
421 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
422 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
423 MODULE_LICENSE("GPL");
424 MODULE_IMPORT_NS("IIO_DMA_BUFFER");
425