1 /*
2  * DMA controller driver for CSR SiRFprimaII
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/sirfsoc_dma.h>
20 
21 #define SIRFSOC_DMA_DESCRIPTORS                 16
22 #define SIRFSOC_DMA_CHANNELS                    16
23 
24 #define SIRFSOC_DMA_CH_ADDR                     0x00
25 #define SIRFSOC_DMA_CH_XLEN                     0x04
26 #define SIRFSOC_DMA_CH_YLEN                     0x08
27 #define SIRFSOC_DMA_CH_CTRL                     0x0C
28 
29 #define SIRFSOC_DMA_WIDTH_0                     0x100
30 #define SIRFSOC_DMA_CH_VALID                    0x140
31 #define SIRFSOC_DMA_CH_INT                      0x144
32 #define SIRFSOC_DMA_INT_EN                      0x148
33 #define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
34 
35 #define SIRFSOC_DMA_MODE_CTRL_BIT               4
36 #define SIRFSOC_DMA_DIR_CTRL_BIT                5
37 
38 /* xlen and dma_width register is in 4 bytes boundary */
39 #define SIRFSOC_DMA_WORD_LEN			4
40 
41 struct sirfsoc_dma_desc {
42 	struct dma_async_tx_descriptor	desc;
43 	struct list_head		node;
44 
45 	/* SiRFprimaII 2D-DMA parameters */
46 
47 	int             xlen;           /* DMA xlen */
48 	int             ylen;           /* DMA ylen */
49 	int             width;          /* DMA width */
50 	int             dir;
51 	bool            cyclic;         /* is loop DMA? */
52 	u32             addr;		/* DMA buffer address */
53 };
54 
55 struct sirfsoc_dma_chan {
56 	struct dma_chan			chan;
57 	struct list_head		free;
58 	struct list_head		prepared;
59 	struct list_head		queued;
60 	struct list_head		active;
61 	struct list_head		completed;
62 	dma_cookie_t			completed_cookie;
63 	unsigned long			happened_cyclic;
64 	unsigned long			completed_cyclic;
65 
66 	/* Lock for this structure */
67 	spinlock_t			lock;
68 
69 	int				mode;
70 };
71 
72 struct sirfsoc_dma {
73 	struct dma_device		dma;
74 	struct tasklet_struct		tasklet;
75 	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
76 	void __iomem			*base;
77 	int				irq;
78 };
79 
80 #define DRV_NAME	"sirfsoc_dma"
81 
82 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
83 static inline
dma_chan_to_sirfsoc_dma_chan(struct dma_chan * c)84 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
85 {
86 	return container_of(c, struct sirfsoc_dma_chan, chan);
87 }
88 
89 /* Convert struct dma_chan to struct sirfsoc_dma */
dma_chan_to_sirfsoc_dma(struct dma_chan * c)90 static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
91 {
92 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
93 	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
94 }
95 
96 /* Execute all queued DMA descriptors */
sirfsoc_dma_execute(struct sirfsoc_dma_chan * schan)97 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
98 {
99 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
100 	int cid = schan->chan.chan_id;
101 	struct sirfsoc_dma_desc *sdesc = NULL;
102 
103 	/*
104 	 * lock has been held by functions calling this, so we don't hold
105 	 * lock again
106 	 */
107 
108 	sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
109 		node);
110 	/* Move the first queued descriptor to active list */
111 	list_move_tail(&schan->queued, &schan->active);
112 
113 	/* Start the DMA transfer */
114 	writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
115 		cid * 4);
116 	writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
117 		(sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
118 		sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
119 	writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
120 		SIRFSOC_DMA_CH_XLEN);
121 	writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
122 		SIRFSOC_DMA_CH_YLEN);
123 	writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
124 		(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
125 
126 	/*
127 	 * writel has an implict memory write barrier to make sure data is
128 	 * flushed into memory before starting DMA
129 	 */
130 	writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
131 
132 	if (sdesc->cyclic) {
133 		writel((1 << cid) | 1 << (cid + 16) |
134 			readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
135 			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
136 		schan->happened_cyclic = schan->completed_cyclic = 0;
137 	}
138 }
139 
140 /* Interrupt handler */
sirfsoc_dma_irq(int irq,void * data)141 static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
142 {
143 	struct sirfsoc_dma *sdma = data;
144 	struct sirfsoc_dma_chan *schan;
145 	struct sirfsoc_dma_desc *sdesc = NULL;
146 	u32 is;
147 	int ch;
148 
149 	is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
150 	while ((ch = fls(is) - 1) >= 0) {
151 		is &= ~(1 << ch);
152 		writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
153 		schan = &sdma->channels[ch];
154 
155 		spin_lock(&schan->lock);
156 
157 		sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
158 			node);
159 		if (!sdesc->cyclic) {
160 			/* Execute queued descriptors */
161 			list_splice_tail_init(&schan->active, &schan->completed);
162 			if (!list_empty(&schan->queued))
163 				sirfsoc_dma_execute(schan);
164 		} else
165 			schan->happened_cyclic++;
166 
167 		spin_unlock(&schan->lock);
168 	}
169 
170 	/* Schedule tasklet */
171 	tasklet_schedule(&sdma->tasklet);
172 
173 	return IRQ_HANDLED;
174 }
175 
176 /* process completed descriptors */
sirfsoc_dma_process_completed(struct sirfsoc_dma * sdma)177 static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
178 {
179 	dma_cookie_t last_cookie = 0;
180 	struct sirfsoc_dma_chan *schan;
181 	struct sirfsoc_dma_desc *sdesc;
182 	struct dma_async_tx_descriptor *desc;
183 	unsigned long flags;
184 	unsigned long happened_cyclic;
185 	LIST_HEAD(list);
186 	int i;
187 
188 	for (i = 0; i < sdma->dma.chancnt; i++) {
189 		schan = &sdma->channels[i];
190 
191 		/* Get all completed descriptors */
192 		spin_lock_irqsave(&schan->lock, flags);
193 		if (!list_empty(&schan->completed)) {
194 			list_splice_tail_init(&schan->completed, &list);
195 			spin_unlock_irqrestore(&schan->lock, flags);
196 
197 			/* Execute callbacks and run dependencies */
198 			list_for_each_entry(sdesc, &list, node) {
199 				desc = &sdesc->desc;
200 
201 				if (desc->callback)
202 					desc->callback(desc->callback_param);
203 
204 				last_cookie = desc->cookie;
205 				dma_run_dependencies(desc);
206 			}
207 
208 			/* Free descriptors */
209 			spin_lock_irqsave(&schan->lock, flags);
210 			list_splice_tail_init(&list, &schan->free);
211 			schan->completed_cookie = last_cookie;
212 			spin_unlock_irqrestore(&schan->lock, flags);
213 		} else {
214 			/* for cyclic channel, desc is always in active list */
215 			sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
216 				node);
217 
218 			if (!sdesc || (sdesc && !sdesc->cyclic)) {
219 				/* without active cyclic DMA */
220 				spin_unlock_irqrestore(&schan->lock, flags);
221 				continue;
222 			}
223 
224 			/* cyclic DMA */
225 			happened_cyclic = schan->happened_cyclic;
226 			spin_unlock_irqrestore(&schan->lock, flags);
227 
228 			desc = &sdesc->desc;
229 			while (happened_cyclic != schan->completed_cyclic) {
230 				if (desc->callback)
231 					desc->callback(desc->callback_param);
232 				schan->completed_cyclic++;
233 			}
234 		}
235 	}
236 }
237 
238 /* DMA Tasklet */
sirfsoc_dma_tasklet(unsigned long data)239 static void sirfsoc_dma_tasklet(unsigned long data)
240 {
241 	struct sirfsoc_dma *sdma = (void *)data;
242 
243 	sirfsoc_dma_process_completed(sdma);
244 }
245 
246 /* Submit descriptor to hardware */
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor * txd)247 static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
248 {
249 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
250 	struct sirfsoc_dma_desc *sdesc;
251 	unsigned long flags;
252 	dma_cookie_t cookie;
253 
254 	sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
255 
256 	spin_lock_irqsave(&schan->lock, flags);
257 
258 	/* Move descriptor to queue */
259 	list_move_tail(&sdesc->node, &schan->queued);
260 
261 	/* Update cookie */
262 	cookie = schan->chan.cookie + 1;
263 	if (cookie <= 0)
264 		cookie = 1;
265 
266 	schan->chan.cookie = cookie;
267 	sdesc->desc.cookie = cookie;
268 
269 	spin_unlock_irqrestore(&schan->lock, flags);
270 
271 	return cookie;
272 }
273 
sirfsoc_dma_slave_config(struct sirfsoc_dma_chan * schan,struct dma_slave_config * config)274 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
275 	struct dma_slave_config *config)
276 {
277 	unsigned long flags;
278 
279 	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
280 		(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
281 		return -EINVAL;
282 
283 	spin_lock_irqsave(&schan->lock, flags);
284 	schan->mode = (config->src_maxburst == 4 ? 1 : 0);
285 	spin_unlock_irqrestore(&schan->lock, flags);
286 
287 	return 0;
288 }
289 
sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan * schan)290 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
291 {
292 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
293 	int cid = schan->chan.chan_id;
294 	unsigned long flags;
295 
296 	writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
297 		~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
298 	writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
299 
300 	writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
301 		& ~((1 << cid) | 1 << (cid + 16)),
302 			sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
303 
304 	spin_lock_irqsave(&schan->lock, flags);
305 	list_splice_tail_init(&schan->active, &schan->free);
306 	list_splice_tail_init(&schan->queued, &schan->free);
307 	spin_unlock_irqrestore(&schan->lock, flags);
308 
309 	return 0;
310 }
311 
sirfsoc_dma_control(struct dma_chan * chan,enum dma_ctrl_cmd cmd,unsigned long arg)312 static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
313 	unsigned long arg)
314 {
315 	struct dma_slave_config *config;
316 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
317 
318 	switch (cmd) {
319 	case DMA_TERMINATE_ALL:
320 		return sirfsoc_dma_terminate_all(schan);
321 	case DMA_SLAVE_CONFIG:
322 		config = (struct dma_slave_config *)arg;
323 		return sirfsoc_dma_slave_config(schan, config);
324 
325 	default:
326 		break;
327 	}
328 
329 	return -ENOSYS;
330 }
331 
332 /* Alloc channel resources */
sirfsoc_dma_alloc_chan_resources(struct dma_chan * chan)333 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
334 {
335 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
336 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
337 	struct sirfsoc_dma_desc *sdesc;
338 	unsigned long flags;
339 	LIST_HEAD(descs);
340 	int i;
341 
342 	/* Alloc descriptors for this channel */
343 	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
344 		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
345 		if (!sdesc) {
346 			dev_notice(sdma->dma.dev, "Memory allocation error. "
347 				"Allocated only %u descriptors\n", i);
348 			break;
349 		}
350 
351 		dma_async_tx_descriptor_init(&sdesc->desc, chan);
352 		sdesc->desc.flags = DMA_CTRL_ACK;
353 		sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
354 
355 		list_add_tail(&sdesc->node, &descs);
356 	}
357 
358 	/* Return error only if no descriptors were allocated */
359 	if (i == 0)
360 		return -ENOMEM;
361 
362 	spin_lock_irqsave(&schan->lock, flags);
363 
364 	list_splice_tail_init(&descs, &schan->free);
365 	spin_unlock_irqrestore(&schan->lock, flags);
366 
367 	return i;
368 }
369 
370 /* Free channel resources */
sirfsoc_dma_free_chan_resources(struct dma_chan * chan)371 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
372 {
373 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
374 	struct sirfsoc_dma_desc *sdesc, *tmp;
375 	unsigned long flags;
376 	LIST_HEAD(descs);
377 
378 	spin_lock_irqsave(&schan->lock, flags);
379 
380 	/* Channel must be idle */
381 	BUG_ON(!list_empty(&schan->prepared));
382 	BUG_ON(!list_empty(&schan->queued));
383 	BUG_ON(!list_empty(&schan->active));
384 	BUG_ON(!list_empty(&schan->completed));
385 
386 	/* Move data */
387 	list_splice_tail_init(&schan->free, &descs);
388 
389 	spin_unlock_irqrestore(&schan->lock, flags);
390 
391 	/* Free descriptors */
392 	list_for_each_entry_safe(sdesc, tmp, &descs, node)
393 		kfree(sdesc);
394 }
395 
396 /* Send pending descriptor to hardware */
sirfsoc_dma_issue_pending(struct dma_chan * chan)397 static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
398 {
399 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
400 	unsigned long flags;
401 
402 	spin_lock_irqsave(&schan->lock, flags);
403 
404 	if (list_empty(&schan->active) && !list_empty(&schan->queued))
405 		sirfsoc_dma_execute(schan);
406 
407 	spin_unlock_irqrestore(&schan->lock, flags);
408 }
409 
410 /* Check request completion status */
411 static enum dma_status
sirfsoc_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)412 sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
413 	struct dma_tx_state *txstate)
414 {
415 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
416 	unsigned long flags;
417 	dma_cookie_t last_used;
418 	dma_cookie_t last_complete;
419 
420 	spin_lock_irqsave(&schan->lock, flags);
421 	last_used = schan->chan.cookie;
422 	last_complete = schan->completed_cookie;
423 	spin_unlock_irqrestore(&schan->lock, flags);
424 
425 	dma_set_tx_state(txstate, last_complete, last_used, 0);
426 	return dma_async_is_complete(cookie, last_complete, last_used);
427 }
428 
sirfsoc_dma_prep_interleaved(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)429 static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
430 	struct dma_chan *chan, struct dma_interleaved_template *xt,
431 	unsigned long flags)
432 {
433 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
434 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
435 	struct sirfsoc_dma_desc *sdesc = NULL;
436 	unsigned long iflags;
437 	int ret;
438 
439 	if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
440 		ret = -EINVAL;
441 		goto err_dir;
442 	}
443 
444 	/* Get free descriptor */
445 	spin_lock_irqsave(&schan->lock, iflags);
446 	if (!list_empty(&schan->free)) {
447 		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
448 			node);
449 		list_del(&sdesc->node);
450 	}
451 	spin_unlock_irqrestore(&schan->lock, iflags);
452 
453 	if (!sdesc) {
454 		/* try to free completed descriptors */
455 		sirfsoc_dma_process_completed(sdma);
456 		ret = 0;
457 		goto no_desc;
458 	}
459 
460 	/* Place descriptor in prepared list */
461 	spin_lock_irqsave(&schan->lock, iflags);
462 
463 	/*
464 	 * Number of chunks in a frame can only be 1 for prima2
465 	 * and ylen (number of frame - 1) must be at least 0
466 	 */
467 	if ((xt->frame_size == 1) && (xt->numf > 0)) {
468 		sdesc->cyclic = 0;
469 		sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
470 		sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
471 				SIRFSOC_DMA_WORD_LEN;
472 		sdesc->ylen = xt->numf - 1;
473 		if (xt->dir == DMA_MEM_TO_DEV) {
474 			sdesc->addr = xt->src_start;
475 			sdesc->dir = 1;
476 		} else {
477 			sdesc->addr = xt->dst_start;
478 			sdesc->dir = 0;
479 		}
480 
481 		list_add_tail(&sdesc->node, &schan->prepared);
482 	} else {
483 		pr_err("sirfsoc DMA Invalid xfer\n");
484 		ret = -EINVAL;
485 		goto err_xfer;
486 	}
487 	spin_unlock_irqrestore(&schan->lock, iflags);
488 
489 	return &sdesc->desc;
490 err_xfer:
491 	spin_unlock_irqrestore(&schan->lock, iflags);
492 no_desc:
493 err_dir:
494 	return ERR_PTR(ret);
495 }
496 
497 static struct dma_async_tx_descriptor *
sirfsoc_dma_prep_cyclic(struct dma_chan * chan,dma_addr_t addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction)498 sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
499 	size_t buf_len, size_t period_len,
500 	enum dma_transfer_direction direction)
501 {
502 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
503 	struct sirfsoc_dma_desc *sdesc = NULL;
504 	unsigned long iflags;
505 
506 	/*
507 	 * we only support cycle transfer with 2 period
508 	 * If the X-length is set to 0, it would be the loop mode.
509 	 * The DMA address keeps increasing until reaching the end of a loop
510 	 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
511 	 * the DMA address goes back to the beginning of this area.
512 	 * In loop mode, the DMA data region is divided into two parts, BUFA
513 	 * and BUFB. DMA controller generates interrupts twice in each loop:
514 	 * when the DMA address reaches the end of BUFA or the end of the
515 	 * BUFB
516 	 */
517 	if (buf_len !=  2 * period_len)
518 		return ERR_PTR(-EINVAL);
519 
520 	/* Get free descriptor */
521 	spin_lock_irqsave(&schan->lock, iflags);
522 	if (!list_empty(&schan->free)) {
523 		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
524 			node);
525 		list_del(&sdesc->node);
526 	}
527 	spin_unlock_irqrestore(&schan->lock, iflags);
528 
529 	if (!sdesc)
530 		return 0;
531 
532 	/* Place descriptor in prepared list */
533 	spin_lock_irqsave(&schan->lock, iflags);
534 	sdesc->addr = addr;
535 	sdesc->cyclic = 1;
536 	sdesc->xlen = 0;
537 	sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
538 	sdesc->width = 1;
539 	list_add_tail(&sdesc->node, &schan->prepared);
540 	spin_unlock_irqrestore(&schan->lock, iflags);
541 
542 	return &sdesc->desc;
543 }
544 
545 /*
546  * The DMA controller consists of 16 independent DMA channels.
547  * Each channel is allocated to a different function
548  */
sirfsoc_dma_filter_id(struct dma_chan * chan,void * chan_id)549 bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
550 {
551 	unsigned int ch_nr = (unsigned int) chan_id;
552 
553 	if (ch_nr == chan->chan_id +
554 		chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
555 		return true;
556 
557 	return false;
558 }
559 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
560 
sirfsoc_dma_probe(struct platform_device * op)561 static int __devinit sirfsoc_dma_probe(struct platform_device *op)
562 {
563 	struct device_node *dn = op->dev.of_node;
564 	struct device *dev = &op->dev;
565 	struct dma_device *dma;
566 	struct sirfsoc_dma *sdma;
567 	struct sirfsoc_dma_chan *schan;
568 	struct resource res;
569 	ulong regs_start, regs_size;
570 	u32 id;
571 	int ret, i;
572 
573 	sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
574 	if (!sdma) {
575 		dev_err(dev, "Memory exhausted!\n");
576 		return -ENOMEM;
577 	}
578 
579 	if (of_property_read_u32(dn, "cell-index", &id)) {
580 		dev_err(dev, "Fail to get DMAC index\n");
581 		ret = -ENODEV;
582 		goto free_mem;
583 	}
584 
585 	sdma->irq = irq_of_parse_and_map(dn, 0);
586 	if (sdma->irq == NO_IRQ) {
587 		dev_err(dev, "Error mapping IRQ!\n");
588 		ret = -EINVAL;
589 		goto free_mem;
590 	}
591 
592 	ret = of_address_to_resource(dn, 0, &res);
593 	if (ret) {
594 		dev_err(dev, "Error parsing memory region!\n");
595 		goto free_mem;
596 	}
597 
598 	regs_start = res.start;
599 	regs_size = resource_size(&res);
600 
601 	sdma->base = devm_ioremap(dev, regs_start, regs_size);
602 	if (!sdma->base) {
603 		dev_err(dev, "Error mapping memory region!\n");
604 		ret = -ENOMEM;
605 		goto irq_dispose;
606 	}
607 
608 	ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
609 		sdma);
610 	if (ret) {
611 		dev_err(dev, "Error requesting IRQ!\n");
612 		ret = -EINVAL;
613 		goto unmap_mem;
614 	}
615 
616 	dma = &sdma->dma;
617 	dma->dev = dev;
618 	dma->chancnt = SIRFSOC_DMA_CHANNELS;
619 
620 	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
621 	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
622 	dma->device_issue_pending = sirfsoc_dma_issue_pending;
623 	dma->device_control = sirfsoc_dma_control;
624 	dma->device_tx_status = sirfsoc_dma_tx_status;
625 	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
626 	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
627 
628 	INIT_LIST_HEAD(&dma->channels);
629 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
630 	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
631 	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
632 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
633 
634 	for (i = 0; i < dma->chancnt; i++) {
635 		schan = &sdma->channels[i];
636 
637 		schan->chan.device = dma;
638 		schan->chan.cookie = 1;
639 		schan->completed_cookie = schan->chan.cookie;
640 
641 		INIT_LIST_HEAD(&schan->free);
642 		INIT_LIST_HEAD(&schan->prepared);
643 		INIT_LIST_HEAD(&schan->queued);
644 		INIT_LIST_HEAD(&schan->active);
645 		INIT_LIST_HEAD(&schan->completed);
646 
647 		spin_lock_init(&schan->lock);
648 		list_add_tail(&schan->chan.device_node, &dma->channels);
649 	}
650 
651 	tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
652 
653 	/* Register DMA engine */
654 	dev_set_drvdata(dev, sdma);
655 	ret = dma_async_device_register(dma);
656 	if (ret)
657 		goto free_irq;
658 
659 	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
660 
661 	return 0;
662 
663 free_irq:
664 	devm_free_irq(dev, sdma->irq, sdma);
665 irq_dispose:
666 	irq_dispose_mapping(sdma->irq);
667 unmap_mem:
668 	iounmap(sdma->base);
669 free_mem:
670 	devm_kfree(dev, sdma);
671 	return ret;
672 }
673 
sirfsoc_dma_remove(struct platform_device * op)674 static int __devexit sirfsoc_dma_remove(struct platform_device *op)
675 {
676 	struct device *dev = &op->dev;
677 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
678 
679 	dma_async_device_unregister(&sdma->dma);
680 	devm_free_irq(dev, sdma->irq, sdma);
681 	irq_dispose_mapping(sdma->irq);
682 	iounmap(sdma->base);
683 	devm_kfree(dev, sdma);
684 	return 0;
685 }
686 
687 static struct of_device_id sirfsoc_dma_match[] = {
688 	{ .compatible = "sirf,prima2-dmac", },
689 	{},
690 };
691 
692 static struct platform_driver sirfsoc_dma_driver = {
693 	.probe		= sirfsoc_dma_probe,
694 	.remove		= __devexit_p(sirfsoc_dma_remove),
695 	.driver = {
696 		.name = DRV_NAME,
697 		.owner = THIS_MODULE,
698 		.of_match_table	= sirfsoc_dma_match,
699 	},
700 };
701 
702 module_platform_driver(sirfsoc_dma_driver);
703 
704 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
705 	"Barry Song <baohua.song@csr.com>");
706 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
707 MODULE_LICENSE("GPL v2");
708