1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/dma/edma.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/string_choices.h>
19
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "dw-hdma-v0-core.h"
23 #include "../dmaengine.h"
24 #include "../virt-dma.h"
25
26 static inline
dchan2dev(struct dma_chan * dchan)27 struct device *dchan2dev(struct dma_chan *dchan)
28 {
29 return &dchan->dev->device;
30 }
31
32 static inline
chan2dev(struct dw_edma_chan * chan)33 struct device *chan2dev(struct dw_edma_chan *chan)
34 {
35 return &chan->vc.chan.dev->device;
36 }
37
38 static inline
vd2dw_edma_desc(struct virt_dma_desc * vd)39 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
40 {
41 return container_of(vd, struct dw_edma_desc, vd);
42 }
43
44 static inline
dw_edma_get_pci_address(struct dw_edma_chan * chan,phys_addr_t cpu_addr)45 u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
46 {
47 struct dw_edma_chip *chip = chan->dw->chip;
48
49 if (chip->ops->pci_address)
50 return chip->ops->pci_address(chip->dev, cpu_addr);
51
52 return cpu_addr;
53 }
54
dw_edma_alloc_burst(struct dw_edma_chunk * chunk)55 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
56 {
57 struct dw_edma_burst *burst;
58
59 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
60 if (unlikely(!burst))
61 return NULL;
62
63 INIT_LIST_HEAD(&burst->list);
64 if (chunk->burst) {
65 /* Create and add new element into the linked list */
66 chunk->bursts_alloc++;
67 list_add_tail(&burst->list, &chunk->burst->list);
68 } else {
69 /* List head */
70 chunk->bursts_alloc = 0;
71 chunk->burst = burst;
72 }
73
74 return burst;
75 }
76
dw_edma_alloc_chunk(struct dw_edma_desc * desc)77 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
78 {
79 struct dw_edma_chip *chip = desc->chan->dw->chip;
80 struct dw_edma_chan *chan = desc->chan;
81 struct dw_edma_chunk *chunk;
82
83 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
84 if (unlikely(!chunk))
85 return NULL;
86
87 INIT_LIST_HEAD(&chunk->list);
88 chunk->chan = chan;
89 /* Toggling change bit (CB) in each chunk, this is a mechanism to
90 * inform the eDMA HW block that this is a new linked list ready
91 * to be consumed.
92 * - Odd chunks originate CB equal to 0
93 * - Even chunks originate CB equal to 1
94 */
95 chunk->cb = !(desc->chunks_alloc % 2);
96 if (chan->dir == EDMA_DIR_WRITE) {
97 chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
98 chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
99 } else {
100 chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
101 chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
102 }
103
104 if (desc->chunk) {
105 /* Create and add new element into the linked list */
106 if (!dw_edma_alloc_burst(chunk)) {
107 kfree(chunk);
108 return NULL;
109 }
110 desc->chunks_alloc++;
111 list_add_tail(&chunk->list, &desc->chunk->list);
112 } else {
113 /* List head */
114 chunk->burst = NULL;
115 desc->chunks_alloc = 0;
116 desc->chunk = chunk;
117 }
118
119 return chunk;
120 }
121
dw_edma_alloc_desc(struct dw_edma_chan * chan)122 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
123 {
124 struct dw_edma_desc *desc;
125
126 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
127 if (unlikely(!desc))
128 return NULL;
129
130 desc->chan = chan;
131 if (!dw_edma_alloc_chunk(desc)) {
132 kfree(desc);
133 return NULL;
134 }
135
136 return desc;
137 }
138
dw_edma_free_burst(struct dw_edma_chunk * chunk)139 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
140 {
141 struct dw_edma_burst *child, *_next;
142
143 /* Remove all the list elements */
144 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
145 list_del(&child->list);
146 kfree(child);
147 chunk->bursts_alloc--;
148 }
149
150 /* Remove the list head */
151 kfree(child);
152 chunk->burst = NULL;
153 }
154
dw_edma_free_chunk(struct dw_edma_desc * desc)155 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
156 {
157 struct dw_edma_chunk *child, *_next;
158
159 if (!desc->chunk)
160 return;
161
162 /* Remove all the list elements */
163 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
164 dw_edma_free_burst(child);
165 list_del(&child->list);
166 kfree(child);
167 desc->chunks_alloc--;
168 }
169
170 /* Remove the list head */
171 kfree(child);
172 desc->chunk = NULL;
173 }
174
dw_edma_free_desc(struct dw_edma_desc * desc)175 static void dw_edma_free_desc(struct dw_edma_desc *desc)
176 {
177 dw_edma_free_chunk(desc);
178 kfree(desc);
179 }
180
vchan_free_desc(struct virt_dma_desc * vdesc)181 static void vchan_free_desc(struct virt_dma_desc *vdesc)
182 {
183 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
184 }
185
dw_edma_start_transfer(struct dw_edma_chan * chan)186 static int dw_edma_start_transfer(struct dw_edma_chan *chan)
187 {
188 struct dw_edma *dw = chan->dw;
189 struct dw_edma_chunk *child;
190 struct dw_edma_desc *desc;
191 struct virt_dma_desc *vd;
192
193 vd = vchan_next_desc(&chan->vc);
194 if (!vd)
195 return 0;
196
197 desc = vd2dw_edma_desc(vd);
198 if (!desc)
199 return 0;
200
201 child = list_first_entry_or_null(&desc->chunk->list,
202 struct dw_edma_chunk, list);
203 if (!child)
204 return 0;
205
206 dw_edma_core_start(dw, child, !desc->xfer_sz);
207 desc->xfer_sz += child->ll_region.sz;
208 dw_edma_free_burst(child);
209 list_del(&child->list);
210 kfree(child);
211 desc->chunks_alloc--;
212
213 return 1;
214 }
215
dw_edma_device_caps(struct dma_chan * dchan,struct dma_slave_caps * caps)216 static void dw_edma_device_caps(struct dma_chan *dchan,
217 struct dma_slave_caps *caps)
218 {
219 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
220
221 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
222 if (chan->dir == EDMA_DIR_READ)
223 caps->directions = BIT(DMA_DEV_TO_MEM);
224 else
225 caps->directions = BIT(DMA_MEM_TO_DEV);
226 } else {
227 if (chan->dir == EDMA_DIR_WRITE)
228 caps->directions = BIT(DMA_DEV_TO_MEM);
229 else
230 caps->directions = BIT(DMA_MEM_TO_DEV);
231 }
232 }
233
dw_edma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)234 static int dw_edma_device_config(struct dma_chan *dchan,
235 struct dma_slave_config *config)
236 {
237 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
238
239 memcpy(&chan->config, config, sizeof(*config));
240 chan->configured = true;
241
242 return 0;
243 }
244
dw_edma_device_pause(struct dma_chan * dchan)245 static int dw_edma_device_pause(struct dma_chan *dchan)
246 {
247 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
248 int err = 0;
249
250 if (!chan->configured)
251 err = -EPERM;
252 else if (chan->status != EDMA_ST_BUSY)
253 err = -EPERM;
254 else if (chan->request != EDMA_REQ_NONE)
255 err = -EPERM;
256 else
257 chan->request = EDMA_REQ_PAUSE;
258
259 return err;
260 }
261
dw_edma_device_resume(struct dma_chan * dchan)262 static int dw_edma_device_resume(struct dma_chan *dchan)
263 {
264 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
265 int err = 0;
266
267 if (!chan->configured) {
268 err = -EPERM;
269 } else if (chan->status != EDMA_ST_PAUSE) {
270 err = -EPERM;
271 } else if (chan->request != EDMA_REQ_NONE) {
272 err = -EPERM;
273 } else {
274 chan->status = EDMA_ST_BUSY;
275 dw_edma_start_transfer(chan);
276 }
277
278 return err;
279 }
280
dw_edma_device_terminate_all(struct dma_chan * dchan)281 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
282 {
283 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
284 int err = 0;
285
286 if (!chan->configured) {
287 /* Do nothing */
288 } else if (chan->status == EDMA_ST_PAUSE) {
289 chan->status = EDMA_ST_IDLE;
290 chan->configured = false;
291 } else if (chan->status == EDMA_ST_IDLE) {
292 chan->configured = false;
293 } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
294 /*
295 * The channel is in a false BUSY state, probably didn't
296 * receive or lost an interrupt
297 */
298 chan->status = EDMA_ST_IDLE;
299 chan->configured = false;
300 } else if (chan->request > EDMA_REQ_PAUSE) {
301 err = -EPERM;
302 } else {
303 chan->request = EDMA_REQ_STOP;
304 }
305
306 return err;
307 }
308
dw_edma_device_issue_pending(struct dma_chan * dchan)309 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
310 {
311 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
312 unsigned long flags;
313
314 if (!chan->configured)
315 return;
316
317 spin_lock_irqsave(&chan->vc.lock, flags);
318 if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
319 chan->status == EDMA_ST_IDLE) {
320 chan->status = EDMA_ST_BUSY;
321 dw_edma_start_transfer(chan);
322 }
323 spin_unlock_irqrestore(&chan->vc.lock, flags);
324 }
325
326 static enum dma_status
dw_edma_device_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)327 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
328 struct dma_tx_state *txstate)
329 {
330 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
331 struct dw_edma_desc *desc;
332 struct virt_dma_desc *vd;
333 unsigned long flags;
334 enum dma_status ret;
335 u32 residue = 0;
336
337 ret = dma_cookie_status(dchan, cookie, txstate);
338 if (ret == DMA_COMPLETE)
339 return ret;
340
341 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
342 ret = DMA_PAUSED;
343
344 if (!txstate)
345 goto ret_residue;
346
347 spin_lock_irqsave(&chan->vc.lock, flags);
348 vd = vchan_find_desc(&chan->vc, cookie);
349 if (vd) {
350 desc = vd2dw_edma_desc(vd);
351 if (desc)
352 residue = desc->alloc_sz - desc->xfer_sz;
353 }
354 spin_unlock_irqrestore(&chan->vc.lock, flags);
355
356 ret_residue:
357 dma_set_residue(txstate, residue);
358
359 return ret;
360 }
361
362 static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer * xfer)363 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
364 {
365 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
366 enum dma_transfer_direction dir = xfer->direction;
367 struct scatterlist *sg = NULL;
368 struct dw_edma_chunk *chunk;
369 struct dw_edma_burst *burst;
370 struct dw_edma_desc *desc;
371 u64 src_addr, dst_addr;
372 size_t fsz = 0;
373 u32 cnt = 0;
374 int i;
375
376 if (!chan->configured)
377 return NULL;
378
379 /*
380 * Local Root Port/End-point Remote End-point
381 * +-----------------------+ PCIe bus +----------------------+
382 * | | +-+ | |
383 * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
384 * | | | | | |
385 * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
386 * | | +-+ | |
387 * +-----------------------+ +----------------------+
388 *
389 * 1. Normal logic:
390 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
391 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
392 * for the device read operations (DEV_TO_MEM) and the Tx channel
393 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
394 *
395 * 2. Inverted logic:
396 * If eDMA is embedded into a Remote PCIe EP and is controlled by the
397 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
398 * channel (EDMA_DIR_WRITE) will be used for the device read operations
399 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
400 * operations (MEM_TO_DEV).
401 *
402 * It is the client driver responsibility to choose a proper channel
403 * for the DMA transfers.
404 */
405 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
406 if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
407 (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
408 return NULL;
409 } else {
410 if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
411 (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
412 return NULL;
413 }
414
415 if (xfer->type == EDMA_XFER_CYCLIC) {
416 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
417 return NULL;
418 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
419 if (xfer->xfer.sg.len < 1)
420 return NULL;
421 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
422 if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
423 return NULL;
424 if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
425 return NULL;
426 } else {
427 return NULL;
428 }
429
430 desc = dw_edma_alloc_desc(chan);
431 if (unlikely(!desc))
432 goto err_alloc;
433
434 chunk = dw_edma_alloc_chunk(desc);
435 if (unlikely(!chunk))
436 goto err_alloc;
437
438 if (xfer->type == EDMA_XFER_INTERLEAVED) {
439 src_addr = xfer->xfer.il->src_start;
440 dst_addr = xfer->xfer.il->dst_start;
441 } else {
442 src_addr = chan->config.src_addr;
443 dst_addr = chan->config.dst_addr;
444 }
445
446 if (dir == DMA_DEV_TO_MEM)
447 src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
448 else
449 dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
450
451 if (xfer->type == EDMA_XFER_CYCLIC) {
452 cnt = xfer->xfer.cyclic.cnt;
453 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
454 cnt = xfer->xfer.sg.len;
455 sg = xfer->xfer.sg.sgl;
456 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
457 cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
458 fsz = xfer->xfer.il->frame_size;
459 }
460
461 for (i = 0; i < cnt; i++) {
462 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
463 break;
464
465 if (chunk->bursts_alloc == chan->ll_max) {
466 chunk = dw_edma_alloc_chunk(desc);
467 if (unlikely(!chunk))
468 goto err_alloc;
469 }
470
471 burst = dw_edma_alloc_burst(chunk);
472 if (unlikely(!burst))
473 goto err_alloc;
474
475 if (xfer->type == EDMA_XFER_CYCLIC)
476 burst->sz = xfer->xfer.cyclic.len;
477 else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
478 burst->sz = sg_dma_len(sg);
479 else if (xfer->type == EDMA_XFER_INTERLEAVED)
480 burst->sz = xfer->xfer.il->sgl[i % fsz].size;
481
482 chunk->ll_region.sz += burst->sz;
483 desc->alloc_sz += burst->sz;
484
485 if (dir == DMA_DEV_TO_MEM) {
486 burst->sar = src_addr;
487 if (xfer->type == EDMA_XFER_CYCLIC) {
488 burst->dar = xfer->xfer.cyclic.paddr;
489 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
490 src_addr += sg_dma_len(sg);
491 burst->dar = sg_dma_address(sg);
492 /* Unlike the typical assumption by other
493 * drivers/IPs the peripheral memory isn't
494 * a FIFO memory, in this case, it's a
495 * linear memory and that why the source
496 * and destination addresses are increased
497 * by the same portion (data length)
498 */
499 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
500 burst->dar = dst_addr;
501 }
502 } else {
503 burst->dar = dst_addr;
504 if (xfer->type == EDMA_XFER_CYCLIC) {
505 burst->sar = xfer->xfer.cyclic.paddr;
506 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
507 dst_addr += sg_dma_len(sg);
508 burst->sar = sg_dma_address(sg);
509 /* Unlike the typical assumption by other
510 * drivers/IPs the peripheral memory isn't
511 * a FIFO memory, in this case, it's a
512 * linear memory and that why the source
513 * and destination addresses are increased
514 * by the same portion (data length)
515 */
516 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
517 burst->sar = src_addr;
518 }
519 }
520
521 if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
522 sg = sg_next(sg);
523 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
524 struct dma_interleaved_template *il = xfer->xfer.il;
525 struct data_chunk *dc = &il->sgl[i % fsz];
526
527 src_addr += burst->sz;
528 if (il->src_sgl)
529 src_addr += dmaengine_get_src_icg(il, dc);
530
531 dst_addr += burst->sz;
532 if (il->dst_sgl)
533 dst_addr += dmaengine_get_dst_icg(il, dc);
534 }
535 }
536
537 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
538
539 err_alloc:
540 if (desc)
541 dw_edma_free_desc(desc);
542
543 return NULL;
544 }
545
546 static struct dma_async_tx_descriptor *
dw_edma_device_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int len,enum dma_transfer_direction direction,unsigned long flags,void * context)547 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
548 unsigned int len,
549 enum dma_transfer_direction direction,
550 unsigned long flags, void *context)
551 {
552 struct dw_edma_transfer xfer;
553
554 xfer.dchan = dchan;
555 xfer.direction = direction;
556 xfer.xfer.sg.sgl = sgl;
557 xfer.xfer.sg.len = len;
558 xfer.flags = flags;
559 xfer.type = EDMA_XFER_SCATTER_GATHER;
560
561 return dw_edma_device_transfer(&xfer);
562 }
563
564 static struct dma_async_tx_descriptor *
dw_edma_device_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t paddr,size_t len,size_t count,enum dma_transfer_direction direction,unsigned long flags)565 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
566 size_t len, size_t count,
567 enum dma_transfer_direction direction,
568 unsigned long flags)
569 {
570 struct dw_edma_transfer xfer;
571
572 xfer.dchan = dchan;
573 xfer.direction = direction;
574 xfer.xfer.cyclic.paddr = paddr;
575 xfer.xfer.cyclic.len = len;
576 xfer.xfer.cyclic.cnt = count;
577 xfer.flags = flags;
578 xfer.type = EDMA_XFER_CYCLIC;
579
580 return dw_edma_device_transfer(&xfer);
581 }
582
583 static struct dma_async_tx_descriptor *
dw_edma_device_prep_interleaved_dma(struct dma_chan * dchan,struct dma_interleaved_template * ilt,unsigned long flags)584 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
585 struct dma_interleaved_template *ilt,
586 unsigned long flags)
587 {
588 struct dw_edma_transfer xfer;
589
590 xfer.dchan = dchan;
591 xfer.direction = ilt->dir;
592 xfer.xfer.il = ilt;
593 xfer.flags = flags;
594 xfer.type = EDMA_XFER_INTERLEAVED;
595
596 return dw_edma_device_transfer(&xfer);
597 }
598
dw_edma_done_interrupt(struct dw_edma_chan * chan)599 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
600 {
601 struct dw_edma_desc *desc;
602 struct virt_dma_desc *vd;
603 unsigned long flags;
604
605 spin_lock_irqsave(&chan->vc.lock, flags);
606 vd = vchan_next_desc(&chan->vc);
607 if (vd) {
608 switch (chan->request) {
609 case EDMA_REQ_NONE:
610 desc = vd2dw_edma_desc(vd);
611 if (!desc->chunks_alloc) {
612 list_del(&vd->node);
613 vchan_cookie_complete(vd);
614 }
615
616 /* Continue transferring if there are remaining chunks or issued requests.
617 */
618 chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
619 break;
620
621 case EDMA_REQ_STOP:
622 list_del(&vd->node);
623 vchan_cookie_complete(vd);
624 chan->request = EDMA_REQ_NONE;
625 chan->status = EDMA_ST_IDLE;
626 break;
627
628 case EDMA_REQ_PAUSE:
629 chan->request = EDMA_REQ_NONE;
630 chan->status = EDMA_ST_PAUSE;
631 break;
632
633 default:
634 break;
635 }
636 }
637 spin_unlock_irqrestore(&chan->vc.lock, flags);
638 }
639
dw_edma_abort_interrupt(struct dw_edma_chan * chan)640 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
641 {
642 struct virt_dma_desc *vd;
643 unsigned long flags;
644
645 spin_lock_irqsave(&chan->vc.lock, flags);
646 vd = vchan_next_desc(&chan->vc);
647 if (vd) {
648 list_del(&vd->node);
649 vchan_cookie_complete(vd);
650 }
651 spin_unlock_irqrestore(&chan->vc.lock, flags);
652 chan->request = EDMA_REQ_NONE;
653 chan->status = EDMA_ST_IDLE;
654 }
655
dw_edma_interrupt_write(int irq,void * data)656 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
657 {
658 struct dw_edma_irq *dw_irq = data;
659
660 return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
661 dw_edma_done_interrupt,
662 dw_edma_abort_interrupt);
663 }
664
dw_edma_interrupt_read(int irq,void * data)665 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
666 {
667 struct dw_edma_irq *dw_irq = data;
668
669 return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
670 dw_edma_done_interrupt,
671 dw_edma_abort_interrupt);
672 }
673
dw_edma_interrupt_common(int irq,void * data)674 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
675 {
676 irqreturn_t ret = IRQ_NONE;
677
678 ret |= dw_edma_interrupt_write(irq, data);
679 ret |= dw_edma_interrupt_read(irq, data);
680
681 return ret;
682 }
683
dw_edma_alloc_chan_resources(struct dma_chan * dchan)684 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
685 {
686 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
687
688 if (chan->status != EDMA_ST_IDLE)
689 return -EBUSY;
690
691 return 0;
692 }
693
dw_edma_free_chan_resources(struct dma_chan * dchan)694 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
695 {
696 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
697 int ret;
698
699 while (time_before(jiffies, timeout)) {
700 ret = dw_edma_device_terminate_all(dchan);
701 if (!ret)
702 break;
703
704 if (time_after_eq(jiffies, timeout))
705 return;
706
707 cpu_relax();
708 }
709 }
710
dw_edma_channel_setup(struct dw_edma * dw,u32 wr_alloc,u32 rd_alloc)711 static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
712 {
713 struct dw_edma_chip *chip = dw->chip;
714 struct device *dev = chip->dev;
715 struct dw_edma_chan *chan;
716 struct dw_edma_irq *irq;
717 struct dma_device *dma;
718 u32 i, ch_cnt;
719 u32 pos;
720
721 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
722 dma = &dw->dma;
723
724 INIT_LIST_HEAD(&dma->channels);
725
726 for (i = 0; i < ch_cnt; i++) {
727 chan = &dw->chan[i];
728
729 chan->dw = dw;
730
731 if (i < dw->wr_ch_cnt) {
732 chan->id = i;
733 chan->dir = EDMA_DIR_WRITE;
734 } else {
735 chan->id = i - dw->wr_ch_cnt;
736 chan->dir = EDMA_DIR_READ;
737 }
738
739 chan->configured = false;
740 chan->request = EDMA_REQ_NONE;
741 chan->status = EDMA_ST_IDLE;
742
743 if (chan->dir == EDMA_DIR_WRITE)
744 chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
745 else
746 chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
747 chan->ll_max -= 1;
748
749 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
750 str_write_read(chan->dir == EDMA_DIR_WRITE),
751 chan->id, chan->ll_max);
752
753 if (dw->nr_irqs == 1)
754 pos = 0;
755 else if (chan->dir == EDMA_DIR_WRITE)
756 pos = chan->id % wr_alloc;
757 else
758 pos = wr_alloc + chan->id % rd_alloc;
759
760 irq = &dw->irq[pos];
761
762 if (chan->dir == EDMA_DIR_WRITE)
763 irq->wr_mask |= BIT(chan->id);
764 else
765 irq->rd_mask |= BIT(chan->id);
766
767 irq->dw = dw;
768 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
769
770 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
771 str_write_read(chan->dir == EDMA_DIR_WRITE),
772 chan->id,
773 chan->msi.address_hi, chan->msi.address_lo,
774 chan->msi.data);
775
776 chan->vc.desc_free = vchan_free_desc;
777 chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
778 &dw->chip->dt_region_wr[chan->id] :
779 &dw->chip->dt_region_rd[chan->id];
780
781 vchan_init(&chan->vc, dma);
782
783 dw_edma_core_ch_config(chan);
784 }
785
786 /* Set DMA channel capabilities */
787 dma_cap_zero(dma->cap_mask);
788 dma_cap_set(DMA_SLAVE, dma->cap_mask);
789 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
790 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
791 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
792 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
793 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
794 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
795 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
796
797 /* Set DMA channel callbacks */
798 dma->dev = chip->dev;
799 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
800 dma->device_free_chan_resources = dw_edma_free_chan_resources;
801 dma->device_caps = dw_edma_device_caps;
802 dma->device_config = dw_edma_device_config;
803 dma->device_pause = dw_edma_device_pause;
804 dma->device_resume = dw_edma_device_resume;
805 dma->device_terminate_all = dw_edma_device_terminate_all;
806 dma->device_issue_pending = dw_edma_device_issue_pending;
807 dma->device_tx_status = dw_edma_device_tx_status;
808 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
809 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
810 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
811
812 dma_set_max_seg_size(dma->dev, U32_MAX);
813
814 /* Register DMA device */
815 return dma_async_device_register(dma);
816 }
817
dw_edma_dec_irq_alloc(int * nr_irqs,u32 * alloc,u16 cnt)818 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
819 {
820 if (*nr_irqs && *alloc < cnt) {
821 (*alloc)++;
822 (*nr_irqs)--;
823 }
824 }
825
dw_edma_add_irq_mask(u32 * mask,u32 alloc,u16 cnt)826 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
827 {
828 while (*mask * alloc < cnt)
829 (*mask)++;
830 }
831
dw_edma_irq_request(struct dw_edma * dw,u32 * wr_alloc,u32 * rd_alloc)832 static int dw_edma_irq_request(struct dw_edma *dw,
833 u32 *wr_alloc, u32 *rd_alloc)
834 {
835 struct dw_edma_chip *chip = dw->chip;
836 struct device *dev = dw->chip->dev;
837 u32 wr_mask = 1;
838 u32 rd_mask = 1;
839 int i, err = 0;
840 u32 ch_cnt;
841 int irq;
842
843 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
844
845 if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
846 return -EINVAL;
847
848 dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
849 if (!dw->irq)
850 return -ENOMEM;
851
852 if (chip->nr_irqs == 1) {
853 /* Common IRQ shared among all channels */
854 irq = chip->ops->irq_vector(dev, 0);
855 err = request_irq(irq, dw_edma_interrupt_common,
856 IRQF_SHARED, dw->name, &dw->irq[0]);
857 if (err) {
858 dw->nr_irqs = 0;
859 return err;
860 }
861
862 if (irq_get_msi_desc(irq))
863 get_cached_msi_msg(irq, &dw->irq[0].msi);
864
865 dw->nr_irqs = 1;
866 } else {
867 /* Distribute IRQs equally among all channels */
868 int tmp = chip->nr_irqs;
869
870 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
871 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
872 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
873 }
874
875 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
876 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
877
878 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
879 irq = chip->ops->irq_vector(dev, i);
880 err = request_irq(irq,
881 i < *wr_alloc ?
882 dw_edma_interrupt_write :
883 dw_edma_interrupt_read,
884 IRQF_SHARED, dw->name,
885 &dw->irq[i]);
886 if (err)
887 goto err_irq_free;
888
889 if (irq_get_msi_desc(irq))
890 get_cached_msi_msg(irq, &dw->irq[i].msi);
891 }
892
893 dw->nr_irqs = i;
894 }
895
896 return 0;
897
898 err_irq_free:
899 for (i--; i >= 0; i--) {
900 irq = chip->ops->irq_vector(dev, i);
901 free_irq(irq, &dw->irq[i]);
902 }
903
904 return err;
905 }
906
dw_edma_probe(struct dw_edma_chip * chip)907 int dw_edma_probe(struct dw_edma_chip *chip)
908 {
909 struct device *dev;
910 struct dw_edma *dw;
911 u32 wr_alloc = 0;
912 u32 rd_alloc = 0;
913 int i, err;
914
915 if (!chip)
916 return -EINVAL;
917
918 dev = chip->dev;
919 if (!dev || !chip->ops)
920 return -EINVAL;
921
922 dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
923 if (!dw)
924 return -ENOMEM;
925
926 dw->chip = chip;
927
928 if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
929 dw_hdma_v0_core_register(dw);
930 else
931 dw_edma_v0_core_register(dw);
932
933 raw_spin_lock_init(&dw->lock);
934
935 dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
936 dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
937 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
938
939 dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
940 dw_edma_core_ch_count(dw, EDMA_DIR_READ));
941 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
942
943 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
944 return -EINVAL;
945
946 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
947 dw->wr_ch_cnt, dw->rd_ch_cnt);
948
949 /* Allocate channels */
950 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
951 sizeof(*dw->chan), GFP_KERNEL);
952 if (!dw->chan)
953 return -ENOMEM;
954
955 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
956 dev_name(chip->dev));
957
958 /* Disable eDMA, only to establish the ideal initial conditions */
959 dw_edma_core_off(dw);
960
961 /* Request IRQs */
962 err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
963 if (err)
964 return err;
965
966 /* Setup write/read channels */
967 err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
968 if (err)
969 goto err_irq_free;
970
971 /* Turn debugfs on */
972 dw_edma_core_debugfs_on(dw);
973
974 chip->dw = dw;
975
976 return 0;
977
978 err_irq_free:
979 for (i = (dw->nr_irqs - 1); i >= 0; i--)
980 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
981
982 return err;
983 }
984 EXPORT_SYMBOL_GPL(dw_edma_probe);
985
dw_edma_remove(struct dw_edma_chip * chip)986 int dw_edma_remove(struct dw_edma_chip *chip)
987 {
988 struct dw_edma_chan *chan, *_chan;
989 struct device *dev = chip->dev;
990 struct dw_edma *dw = chip->dw;
991 int i;
992
993 /* Skip removal if no private data found */
994 if (!dw)
995 return -ENODEV;
996
997 /* Disable eDMA */
998 dw_edma_core_off(dw);
999
1000 /* Free irqs */
1001 for (i = (dw->nr_irqs - 1); i >= 0; i--)
1002 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
1003
1004 /* Deregister eDMA device */
1005 dma_async_device_unregister(&dw->dma);
1006 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1007 vc.chan.device_node) {
1008 tasklet_kill(&chan->vc.task);
1009 list_del(&chan->vc.chan.device_node);
1010 }
1011
1012 return 0;
1013 }
1014 EXPORT_SYMBOL_GPL(dw_edma_remove);
1015
1016 MODULE_LICENSE("GPL v2");
1017 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1018 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
1019