1 /*
2  *  linux/arch/arm/plat-mxc/dma-v1.c
3  *
4  *  i.MX DMA registration and IRQ dispatching
5  *
6  * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7  * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8  * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22  * MA 02110-1301, USA.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/interrupt.h>
29 #include <linux/err.h>
30 #include <linux/errno.h>
31 #include <linux/clk.h>
32 #include <linux/scatterlist.h>
33 #include <linux/io.h>
34 
35 #include <asm/system.h>
36 #include <asm/irq.h>
37 #include <mach/hardware.h>
38 #include <mach/dma-v1.h>
39 
40 #define DMA_DCR     0x00		/* Control Register */
41 #define DMA_DISR    0x04		/* Interrupt status Register */
42 #define DMA_DIMR    0x08		/* Interrupt mask Register */
43 #define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
44 #define DMA_DRTOSR  0x10		/* Request timeout Register */
45 #define DMA_DSESR   0x14		/* Transfer Error Status Register */
46 #define DMA_DBOSR   0x18		/* Buffer overflow status Register */
47 #define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
48 #define DMA_WSRA    0x40		/* W-Size Register A */
49 #define DMA_XSRA    0x44		/* X-Size Register A */
50 #define DMA_YSRA    0x48		/* Y-Size Register A */
51 #define DMA_WSRB    0x4c		/* W-Size Register B */
52 #define DMA_XSRB    0x50		/* X-Size Register B */
53 #define DMA_YSRB    0x54		/* Y-Size Register B */
54 #define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
55 #define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
56 #define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
57 #define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
58 #define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
59 #define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
60 #define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
61 #define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
62 #define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */
63 
64 #define DCR_DRST           (1<<1)
65 #define DCR_DEN            (1<<0)
66 #define DBTOCR_EN          (1<<15)
67 #define DBTOCR_CNT(x)      ((x) & 0x7fff)
68 #define CNTR_CNT(x)        ((x) & 0xffffff)
69 #define CCR_ACRPT          (1<<14)
70 #define CCR_DMOD_LINEAR    (0x0 << 12)
71 #define CCR_DMOD_2D        (0x1 << 12)
72 #define CCR_DMOD_FIFO      (0x2 << 12)
73 #define CCR_DMOD_EOBFIFO   (0x3 << 12)
74 #define CCR_SMOD_LINEAR    (0x0 << 10)
75 #define CCR_SMOD_2D        (0x1 << 10)
76 #define CCR_SMOD_FIFO      (0x2 << 10)
77 #define CCR_SMOD_EOBFIFO   (0x3 << 10)
78 #define CCR_MDIR_DEC       (1<<9)
79 #define CCR_MSEL_B         (1<<8)
80 #define CCR_DSIZ_32        (0x0 << 6)
81 #define CCR_DSIZ_8         (0x1 << 6)
82 #define CCR_DSIZ_16        (0x2 << 6)
83 #define CCR_SSIZ_32        (0x0 << 4)
84 #define CCR_SSIZ_8         (0x1 << 4)
85 #define CCR_SSIZ_16        (0x2 << 4)
86 #define CCR_REN            (1<<3)
87 #define CCR_RPT            (1<<2)
88 #define CCR_FRC            (1<<1)
89 #define CCR_CEN            (1<<0)
90 #define RTOR_EN            (1<<15)
91 #define RTOR_CLK           (1<<14)
92 #define RTOR_PSC           (1<<13)
93 
94 /*
95  * struct imx_dma_channel - i.MX specific DMA extension
96  * @name: name specified by DMA client
97  * @irq_handler: client callback for end of transfer
98  * @err_handler: client callback for error condition
99  * @data: clients context data for callbacks
100  * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
101  * @sg: pointer to the actual read/written chunk for scatter-gather emulation
102  * @resbytes: total residual number of bytes to transfer
103  *            (it can be lower or same as sum of SG mapped chunk sizes)
104  * @sgcount: number of chunks to be read/written
105  *
106  * Structure is used for IMX DMA processing. It would be probably good
107  * @struct dma_struct in the future for external interfacing and use
108  * @struct imx_dma_channel only as extension to it.
109  */
110 
111 struct imx_dma_channel {
112 	const char *name;
113 	void (*irq_handler) (int, void *);
114 	void (*err_handler) (int, void *, int errcode);
115 	void (*prog_handler) (int, void *, struct scatterlist *);
116 	void *data;
117 	unsigned int dma_mode;
118 	struct scatterlist *sg;
119 	unsigned int resbytes;
120 	int dma_num;
121 
122 	int in_use;
123 
124 	u32 ccr_from_device;
125 	u32 ccr_to_device;
126 
127 	struct timer_list watchdog;
128 
129 	int hw_chaining;
130 };
131 
132 static void __iomem *imx_dmav1_baseaddr;
133 
imx_dmav1_writel(unsigned val,unsigned offset)134 static void imx_dmav1_writel(unsigned val, unsigned offset)
135 {
136 	__raw_writel(val, imx_dmav1_baseaddr + offset);
137 }
138 
imx_dmav1_readl(unsigned offset)139 static unsigned imx_dmav1_readl(unsigned offset)
140 {
141 	return __raw_readl(imx_dmav1_baseaddr + offset);
142 }
143 
144 static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
145 
146 static struct clk *dma_clk;
147 
imx_dma_hw_chain(struct imx_dma_channel * imxdma)148 static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
149 {
150 	if (cpu_is_mx27())
151 		return imxdma->hw_chaining;
152 	else
153 		return 0;
154 }
155 
156 /*
157  * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
158  */
imx_dma_sg_next(int channel,struct scatterlist * sg)159 static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
160 {
161 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
162 	unsigned long now;
163 
164 	if (!imxdma->name) {
165 		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
166 		       __func__, channel);
167 		return 0;
168 	}
169 
170 	now = min(imxdma->resbytes, sg->length);
171 	if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
172 		imxdma->resbytes -= now;
173 
174 	if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
175 		imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
176 	else
177 		imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
178 
179 	imx_dmav1_writel(now, DMA_CNTR(channel));
180 
181 	pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
182 		"size 0x%08x\n", channel,
183 		 imx_dmav1_readl(DMA_DAR(channel)),
184 		 imx_dmav1_readl(DMA_SAR(channel)),
185 		 imx_dmav1_readl(DMA_CNTR(channel)));
186 
187 	return now;
188 }
189 
190 /**
191  * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
192  * device transfer
193  *
194  * @channel: i.MX DMA channel number
195  * @dma_address: the DMA/physical memory address of the linear data block
196  *		to transfer
197  * @dma_length: length of the data block in bytes
198  * @dev_addr: physical device port address
199  * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
200  *           or %DMA_MODE_WRITE from memory to the device
201  *
202  * Return value: if incorrect parameters are provided -%EINVAL.
203  *		Zero indicates success.
204  */
205 int
imx_dma_setup_single(int channel,dma_addr_t dma_address,unsigned int dma_length,unsigned int dev_addr,unsigned int dmamode)206 imx_dma_setup_single(int channel, dma_addr_t dma_address,
207 		     unsigned int dma_length, unsigned int dev_addr,
208 		     unsigned int dmamode)
209 {
210 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
211 
212 	imxdma->sg = NULL;
213 	imxdma->dma_mode = dmamode;
214 
215 	if (!dma_address) {
216 		printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
217 		       channel);
218 		return -EINVAL;
219 	}
220 
221 	if (!dma_length) {
222 		printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
223 		       channel);
224 		return -EINVAL;
225 	}
226 
227 	if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
228 		pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
229 			"dev_addr=0x%08x for read\n",
230 			channel, __func__, (unsigned int)dma_address,
231 			dma_length, dev_addr);
232 
233 		imx_dmav1_writel(dev_addr, DMA_SAR(channel));
234 		imx_dmav1_writel(dma_address, DMA_DAR(channel));
235 		imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
236 	} else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
237 		pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
238 			"dev_addr=0x%08x for write\n",
239 			channel, __func__, (unsigned int)dma_address,
240 			dma_length, dev_addr);
241 
242 		imx_dmav1_writel(dma_address, DMA_SAR(channel));
243 		imx_dmav1_writel(dev_addr, DMA_DAR(channel));
244 		imx_dmav1_writel(imxdma->ccr_to_device,
245 				DMA_CCR(channel));
246 	} else {
247 		printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
248 		       channel);
249 		return -EINVAL;
250 	}
251 
252 	imx_dmav1_writel(dma_length, DMA_CNTR(channel));
253 
254 	return 0;
255 }
256 EXPORT_SYMBOL(imx_dma_setup_single);
257 
258 /**
259  * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
260  * @channel: i.MX DMA channel number
261  * @sg: pointer to the scatter-gather list/vector
262  * @sgcount: scatter-gather list hungs count
263  * @dma_length: total length of the transfer request in bytes
264  * @dev_addr: physical device port address
265  * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
266  *           or %DMA_MODE_WRITE from memory to the device
267  *
268  * The function sets up DMA channel state and registers to be ready for
269  * transfer specified by provided parameters. The scatter-gather emulation
270  * is set up according to the parameters.
271  *
272  * The full preparation of the transfer requires setup of more register
273  * by the caller before imx_dma_enable() can be called.
274  *
275  * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
276  *
277  * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
278  *
279  * %CCR(channel) has to specify transfer parameters, the next settings is
280  * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
281  * specified
282  *
283  * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
284  *
285  * The typical setup for %DMA_MODE_WRITE is specified by next options
286  * combination
287  *
288  * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
289  *
290  * Be careful here and do not mistakenly mix source and target device
291  * port sizes constants, they are really different:
292  * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
293  * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
294  *
295  * Return value: if incorrect parameters are provided -%EINVAL.
296  * Zero indicates success.
297  */
298 int
imx_dma_setup_sg(int channel,struct scatterlist * sg,unsigned int sgcount,unsigned int dma_length,unsigned int dev_addr,unsigned int dmamode)299 imx_dma_setup_sg(int channel,
300 		 struct scatterlist *sg, unsigned int sgcount,
301 		 unsigned int dma_length, unsigned int dev_addr,
302 		 unsigned int dmamode)
303 {
304 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
305 
306 	if (imxdma->in_use)
307 		return -EBUSY;
308 
309 	imxdma->sg = sg;
310 	imxdma->dma_mode = dmamode;
311 	imxdma->resbytes = dma_length;
312 
313 	if (!sg || !sgcount) {
314 		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
315 		       channel);
316 		return -EINVAL;
317 	}
318 
319 	if (!sg->length) {
320 		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
321 		       channel);
322 		return -EINVAL;
323 	}
324 
325 	if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
326 		pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
327 			"dev_addr=0x%08x for read\n",
328 			channel, __func__, sg, sgcount, dma_length, dev_addr);
329 
330 		imx_dmav1_writel(dev_addr, DMA_SAR(channel));
331 		imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
332 	} else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
333 		pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
334 			"dev_addr=0x%08x for write\n",
335 			channel, __func__, sg, sgcount, dma_length, dev_addr);
336 
337 		imx_dmav1_writel(dev_addr, DMA_DAR(channel));
338 		imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
339 	} else {
340 		printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
341 		       channel);
342 		return -EINVAL;
343 	}
344 
345 	imx_dma_sg_next(channel, sg);
346 
347 	return 0;
348 }
349 EXPORT_SYMBOL(imx_dma_setup_sg);
350 
351 int
imx_dma_config_channel(int channel,unsigned int config_port,unsigned int config_mem,unsigned int dmareq,int hw_chaining)352 imx_dma_config_channel(int channel, unsigned int config_port,
353 	unsigned int config_mem, unsigned int dmareq, int hw_chaining)
354 {
355 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
356 	u32 dreq = 0;
357 
358 	imxdma->hw_chaining = 0;
359 
360 	if (hw_chaining) {
361 		imxdma->hw_chaining = 1;
362 		if (!imx_dma_hw_chain(imxdma))
363 			return -EINVAL;
364 	}
365 
366 	if (dmareq)
367 		dreq = CCR_REN;
368 
369 	imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
370 	imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
371 
372 	imx_dmav1_writel(dmareq, DMA_RSSR(channel));
373 
374 	return 0;
375 }
376 EXPORT_SYMBOL(imx_dma_config_channel);
377 
imx_dma_config_burstlen(int channel,unsigned int burstlen)378 void imx_dma_config_burstlen(int channel, unsigned int burstlen)
379 {
380 	imx_dmav1_writel(burstlen, DMA_BLR(channel));
381 }
382 EXPORT_SYMBOL(imx_dma_config_burstlen);
383 
384 /**
385  * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
386  * handlers
387  * @channel: i.MX DMA channel number
388  * @irq_handler: the pointer to the function called if the transfer
389  *		ends successfully
390  * @err_handler: the pointer to the function called if the premature
391  *		end caused by error occurs
392  * @data: user specified value to be passed to the handlers
393  */
394 int
imx_dma_setup_handlers(int channel,void (* irq_handler)(int,void *),void (* err_handler)(int,void *,int),void * data)395 imx_dma_setup_handlers(int channel,
396 		       void (*irq_handler) (int, void *),
397 		       void (*err_handler) (int, void *, int),
398 		       void *data)
399 {
400 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
401 	unsigned long flags;
402 
403 	if (!imxdma->name) {
404 		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
405 		       __func__, channel);
406 		return -ENODEV;
407 	}
408 
409 	local_irq_save(flags);
410 	imx_dmav1_writel(1 << channel, DMA_DISR);
411 	imxdma->irq_handler = irq_handler;
412 	imxdma->err_handler = err_handler;
413 	imxdma->data = data;
414 	local_irq_restore(flags);
415 	return 0;
416 }
417 EXPORT_SYMBOL(imx_dma_setup_handlers);
418 
419 /**
420  * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
421  * handlers
422  * @channel: i.MX DMA channel number
423  * @prog_handler: the pointer to the function called if the transfer progresses
424  */
425 int
imx_dma_setup_progression_handler(int channel,void (* prog_handler)(int,void *,struct scatterlist *))426 imx_dma_setup_progression_handler(int channel,
427 			void (*prog_handler) (int, void*, struct scatterlist*))
428 {
429 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
430 	unsigned long flags;
431 
432 	if (!imxdma->name) {
433 		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
434 		       __func__, channel);
435 		return -ENODEV;
436 	}
437 
438 	local_irq_save(flags);
439 	imxdma->prog_handler = prog_handler;
440 	local_irq_restore(flags);
441 	return 0;
442 }
443 EXPORT_SYMBOL(imx_dma_setup_progression_handler);
444 
445 /**
446  * imx_dma_enable - function to start i.MX DMA channel operation
447  * @channel: i.MX DMA channel number
448  *
449  * The channel has to be allocated by driver through imx_dma_request()
450  * or imx_dma_request_by_prio() function.
451  * The transfer parameters has to be set to the channel registers through
452  * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
453  * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
454  * be set prior this function call by the channel user.
455  */
imx_dma_enable(int channel)456 void imx_dma_enable(int channel)
457 {
458 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
459 	unsigned long flags;
460 
461 	pr_debug("imxdma%d: imx_dma_enable\n", channel);
462 
463 	if (!imxdma->name) {
464 		printk(KERN_CRIT "%s: called for  not allocated channel %d\n",
465 		       __func__, channel);
466 		return;
467 	}
468 
469 	if (imxdma->in_use)
470 		return;
471 
472 	local_irq_save(flags);
473 
474 	imx_dmav1_writel(1 << channel, DMA_DISR);
475 	imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
476 	imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
477 		CCR_ACRPT, DMA_CCR(channel));
478 
479 	if ((cpu_is_mx21() || cpu_is_mx27()) &&
480 			imxdma->sg && imx_dma_hw_chain(imxdma)) {
481 		imxdma->sg = sg_next(imxdma->sg);
482 		if (imxdma->sg) {
483 			u32 tmp;
484 			imx_dma_sg_next(channel, imxdma->sg);
485 			tmp = imx_dmav1_readl(DMA_CCR(channel));
486 			imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
487 				DMA_CCR(channel));
488 		}
489 	}
490 	imxdma->in_use = 1;
491 
492 	local_irq_restore(flags);
493 }
494 EXPORT_SYMBOL(imx_dma_enable);
495 
496 /**
497  * imx_dma_disable - stop, finish i.MX DMA channel operatin
498  * @channel: i.MX DMA channel number
499  */
imx_dma_disable(int channel)500 void imx_dma_disable(int channel)
501 {
502 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
503 	unsigned long flags;
504 
505 	pr_debug("imxdma%d: imx_dma_disable\n", channel);
506 
507 	if (imx_dma_hw_chain(imxdma))
508 		del_timer(&imxdma->watchdog);
509 
510 	local_irq_save(flags);
511 	imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
512 	imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
513 			DMA_CCR(channel));
514 	imx_dmav1_writel(1 << channel, DMA_DISR);
515 	imxdma->in_use = 0;
516 	local_irq_restore(flags);
517 }
518 EXPORT_SYMBOL(imx_dma_disable);
519 
imx_dma_watchdog(unsigned long chno)520 static void imx_dma_watchdog(unsigned long chno)
521 {
522 	struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
523 
524 	imx_dmav1_writel(0, DMA_CCR(chno));
525 	imxdma->in_use = 0;
526 	imxdma->sg = NULL;
527 
528 	if (imxdma->err_handler)
529 		imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
530 }
531 
dma_err_handler(int irq,void * dev_id)532 static irqreturn_t dma_err_handler(int irq, void *dev_id)
533 {
534 	int i, disr;
535 	struct imx_dma_channel *imxdma;
536 	unsigned int err_mask;
537 	int errcode;
538 
539 	disr = imx_dmav1_readl(DMA_DISR);
540 
541 	err_mask = imx_dmav1_readl(DMA_DBTOSR) |
542 		   imx_dmav1_readl(DMA_DRTOSR) |
543 		   imx_dmav1_readl(DMA_DSESR)  |
544 		   imx_dmav1_readl(DMA_DBOSR);
545 
546 	if (!err_mask)
547 		return IRQ_HANDLED;
548 
549 	imx_dmav1_writel(disr & err_mask, DMA_DISR);
550 
551 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
552 		if (!(err_mask & (1 << i)))
553 			continue;
554 		imxdma = &imx_dma_channels[i];
555 		errcode = 0;
556 
557 		if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
558 			imx_dmav1_writel(1 << i, DMA_DBTOSR);
559 			errcode |= IMX_DMA_ERR_BURST;
560 		}
561 		if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
562 			imx_dmav1_writel(1 << i, DMA_DRTOSR);
563 			errcode |= IMX_DMA_ERR_REQUEST;
564 		}
565 		if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
566 			imx_dmav1_writel(1 << i, DMA_DSESR);
567 			errcode |= IMX_DMA_ERR_TRANSFER;
568 		}
569 		if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
570 			imx_dmav1_writel(1 << i, DMA_DBOSR);
571 			errcode |= IMX_DMA_ERR_BUFFER;
572 		}
573 		if (imxdma->name && imxdma->err_handler) {
574 			imxdma->err_handler(i, imxdma->data, errcode);
575 			continue;
576 		}
577 
578 		imx_dma_channels[i].sg = NULL;
579 
580 		printk(KERN_WARNING
581 		       "DMA timeout on channel %d (%s) -%s%s%s%s\n",
582 		       i, imxdma->name,
583 		       errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
584 		       errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
585 		       errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
586 		       errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
587 	}
588 	return IRQ_HANDLED;
589 }
590 
dma_irq_handle_channel(int chno)591 static void dma_irq_handle_channel(int chno)
592 {
593 	struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
594 
595 	if (!imxdma->name) {
596 		/*
597 		 * IRQ for an unregistered DMA channel:
598 		 * let's clear the interrupts and disable it.
599 		 */
600 		printk(KERN_WARNING
601 		       "spurious IRQ for DMA channel %d\n", chno);
602 		return;
603 	}
604 
605 	if (imxdma->sg) {
606 		u32 tmp;
607 		struct scatterlist *current_sg = imxdma->sg;
608 		imxdma->sg = sg_next(imxdma->sg);
609 
610 		if (imxdma->sg) {
611 			imx_dma_sg_next(chno, imxdma->sg);
612 
613 			tmp = imx_dmav1_readl(DMA_CCR(chno));
614 
615 			if (imx_dma_hw_chain(imxdma)) {
616 				/* FIXME: The timeout should probably be
617 				 * configurable
618 				 */
619 				mod_timer(&imxdma->watchdog,
620 					jiffies + msecs_to_jiffies(500));
621 
622 				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
623 				imx_dmav1_writel(tmp, DMA_CCR(chno));
624 			} else {
625 				imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
626 				tmp |= CCR_CEN;
627 			}
628 
629 			imx_dmav1_writel(tmp, DMA_CCR(chno));
630 
631 			if (imxdma->prog_handler)
632 				imxdma->prog_handler(chno, imxdma->data,
633 						current_sg);
634 
635 			return;
636 		}
637 
638 		if (imx_dma_hw_chain(imxdma)) {
639 			del_timer(&imxdma->watchdog);
640 			return;
641 		}
642 	}
643 
644 	imx_dmav1_writel(0, DMA_CCR(chno));
645 	imxdma->in_use = 0;
646 	if (imxdma->irq_handler)
647 		imxdma->irq_handler(chno, imxdma->data);
648 }
649 
dma_irq_handler(int irq,void * dev_id)650 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
651 {
652 	int i, disr;
653 
654 	if (cpu_is_mx21() || cpu_is_mx27())
655 		dma_err_handler(irq, dev_id);
656 
657 	disr = imx_dmav1_readl(DMA_DISR);
658 
659 	pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
660 		     disr);
661 
662 	imx_dmav1_writel(disr, DMA_DISR);
663 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
664 		if (disr & (1 << i))
665 			dma_irq_handle_channel(i);
666 	}
667 
668 	return IRQ_HANDLED;
669 }
670 
671 /**
672  * imx_dma_request - request/allocate specified channel number
673  * @channel: i.MX DMA channel number
674  * @name: the driver/caller own non-%NULL identification
675  */
imx_dma_request(int channel,const char * name)676 int imx_dma_request(int channel, const char *name)
677 {
678 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
679 	unsigned long flags;
680 	int ret = 0;
681 
682 	/* basic sanity checks */
683 	if (!name)
684 		return -EINVAL;
685 
686 	if (channel >= IMX_DMA_CHANNELS) {
687 		printk(KERN_CRIT "%s: called for  non-existed channel %d\n",
688 		       __func__, channel);
689 		return -EINVAL;
690 	}
691 
692 	local_irq_save(flags);
693 	if (imxdma->name) {
694 		local_irq_restore(flags);
695 		return -EBUSY;
696 	}
697 	memset(imxdma, 0, sizeof(*imxdma));
698 	imxdma->name = name;
699 	local_irq_restore(flags); /* request_irq() can block */
700 
701 	if (cpu_is_mx21() || cpu_is_mx27()) {
702 		ret = request_irq(MX2x_INT_DMACH0 + channel,
703 				dma_irq_handler, 0, "DMA", NULL);
704 		if (ret) {
705 			imxdma->name = NULL;
706 			pr_crit("Can't register IRQ %d for DMA channel %d\n",
707 					MX2x_INT_DMACH0 + channel, channel);
708 			return ret;
709 		}
710 		init_timer(&imxdma->watchdog);
711 		imxdma->watchdog.function = &imx_dma_watchdog;
712 		imxdma->watchdog.data = channel;
713 	}
714 
715 	return ret;
716 }
717 EXPORT_SYMBOL(imx_dma_request);
718 
719 /**
720  * imx_dma_free - release previously acquired channel
721  * @channel: i.MX DMA channel number
722  */
imx_dma_free(int channel)723 void imx_dma_free(int channel)
724 {
725 	unsigned long flags;
726 	struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
727 
728 	if (!imxdma->name) {
729 		printk(KERN_CRIT
730 		       "%s: trying to free free channel %d\n",
731 		       __func__, channel);
732 		return;
733 	}
734 
735 	local_irq_save(flags);
736 	/* Disable interrupts */
737 	imx_dma_disable(channel);
738 	imxdma->name = NULL;
739 
740 	if (cpu_is_mx21() || cpu_is_mx27())
741 		free_irq(MX2x_INT_DMACH0 + channel, NULL);
742 
743 	local_irq_restore(flags);
744 }
745 EXPORT_SYMBOL(imx_dma_free);
746 
747 /**
748  * imx_dma_request_by_prio - find and request some of free channels best
749  * suiting requested priority
750  * @channel: i.MX DMA channel number
751  * @name: the driver/caller own non-%NULL identification
752  *
753  * This function tries to find a free channel in the specified priority group
754  * if the priority cannot be achieved it tries to look for free channel
755  * in the higher and then even lower priority groups.
756  *
757  * Return value: If there is no free channel to allocate, -%ENODEV is returned.
758  *               On successful allocation channel is returned.
759  */
imx_dma_request_by_prio(const char * name,enum imx_dma_prio prio)760 int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
761 {
762 	int i;
763 	int best;
764 
765 	switch (prio) {
766 	case (DMA_PRIO_HIGH):
767 		best = 8;
768 		break;
769 	case (DMA_PRIO_MEDIUM):
770 		best = 4;
771 		break;
772 	case (DMA_PRIO_LOW):
773 	default:
774 		best = 0;
775 		break;
776 	}
777 
778 	for (i = best; i < IMX_DMA_CHANNELS; i++)
779 		if (!imx_dma_request(i, name))
780 			return i;
781 
782 	for (i = best - 1; i >= 0; i--)
783 		if (!imx_dma_request(i, name))
784 			return i;
785 
786 	printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
787 
788 	return -ENODEV;
789 }
790 EXPORT_SYMBOL(imx_dma_request_by_prio);
791 
imx_dma_init(void)792 static int __init imx_dma_init(void)
793 {
794 	int ret = 0;
795 	int i;
796 
797 	if (cpu_is_mx1())
798 		imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
799 	else if (cpu_is_mx21())
800 		imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
801 	else if (cpu_is_mx27())
802 		imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
803 	else
804 		return 0;
805 
806 	dma_clk = clk_get(NULL, "dma");
807 	if (IS_ERR(dma_clk))
808 		return PTR_ERR(dma_clk);
809 	clk_enable(dma_clk);
810 
811 	/* reset DMA module */
812 	imx_dmav1_writel(DCR_DRST, DMA_DCR);
813 
814 	if (cpu_is_mx1()) {
815 		ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
816 		if (ret) {
817 			pr_crit("Wow!  Can't register IRQ for DMA\n");
818 			return ret;
819 		}
820 
821 		ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
822 		if (ret) {
823 			pr_crit("Wow!  Can't register ERRIRQ for DMA\n");
824 			free_irq(MX1_DMA_INT, NULL);
825 			return ret;
826 		}
827 	}
828 
829 	/* enable DMA module */
830 	imx_dmav1_writel(DCR_DEN, DMA_DCR);
831 
832 	/* clear all interrupts */
833 	imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
834 
835 	/* disable interrupts */
836 	imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
837 
838 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
839 		imx_dma_channels[i].sg = NULL;
840 		imx_dma_channels[i].dma_num = i;
841 	}
842 
843 	return ret;
844 }
845 
846 arch_initcall(imx_dma_init);
847