1 /* linux/arch/arm/common/pl330.c
2  *
3  * Copyright (C) 2010 Samsung Electronics Co Ltd.
4  *	Jaswinder Singh <jassi.brar@samsung.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/string.h>
26 #include <linux/io.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 
31 #include <asm/hardware/pl330.h>
32 
33 /* Register and Bit field Definitions */
34 #define DS		0x0
35 #define DS_ST_STOP	0x0
36 #define DS_ST_EXEC	0x1
37 #define DS_ST_CMISS	0x2
38 #define DS_ST_UPDTPC	0x3
39 #define DS_ST_WFE	0x4
40 #define DS_ST_ATBRR	0x5
41 #define DS_ST_QBUSY	0x6
42 #define DS_ST_WFP	0x7
43 #define DS_ST_KILL	0x8
44 #define DS_ST_CMPLT	0x9
45 #define DS_ST_FLTCMP	0xe
46 #define DS_ST_FAULT	0xf
47 
48 #define DPC		0x4
49 #define INTEN		0x20
50 #define ES		0x24
51 #define INTSTATUS	0x28
52 #define INTCLR		0x2c
53 #define FSM		0x30
54 #define FSC		0x34
55 #define FTM		0x38
56 
57 #define _FTC		0x40
58 #define FTC(n)		(_FTC + (n)*0x4)
59 
60 #define _CS		0x100
61 #define CS(n)		(_CS + (n)*0x8)
62 #define CS_CNS		(1 << 21)
63 
64 #define _CPC		0x104
65 #define CPC(n)		(_CPC + (n)*0x8)
66 
67 #define _SA		0x400
68 #define SA(n)		(_SA + (n)*0x20)
69 
70 #define _DA		0x404
71 #define DA(n)		(_DA + (n)*0x20)
72 
73 #define _CC		0x408
74 #define CC(n)		(_CC + (n)*0x20)
75 
76 #define CC_SRCINC	(1 << 0)
77 #define CC_DSTINC	(1 << 14)
78 #define CC_SRCPRI	(1 << 8)
79 #define CC_DSTPRI	(1 << 22)
80 #define CC_SRCNS	(1 << 9)
81 #define CC_DSTNS	(1 << 23)
82 #define CC_SRCIA	(1 << 10)
83 #define CC_DSTIA	(1 << 24)
84 #define CC_SRCBRSTLEN_SHFT	4
85 #define CC_DSTBRSTLEN_SHFT	18
86 #define CC_SRCBRSTSIZE_SHFT	1
87 #define CC_DSTBRSTSIZE_SHFT	15
88 #define CC_SRCCCTRL_SHFT	11
89 #define CC_SRCCCTRL_MASK	0x7
90 #define CC_DSTCCTRL_SHFT	25
91 #define CC_DRCCCTRL_MASK	0x7
92 #define CC_SWAP_SHFT	28
93 
94 #define _LC0		0x40c
95 #define LC0(n)		(_LC0 + (n)*0x20)
96 
97 #define _LC1		0x410
98 #define LC1(n)		(_LC1 + (n)*0x20)
99 
100 #define DBGSTATUS	0xd00
101 #define DBG_BUSY	(1 << 0)
102 
103 #define DBGCMD		0xd04
104 #define DBGINST0	0xd08
105 #define DBGINST1	0xd0c
106 
107 #define CR0		0xe00
108 #define CR1		0xe04
109 #define CR2		0xe08
110 #define CR3		0xe0c
111 #define CR4		0xe10
112 #define CRD		0xe14
113 
114 #define PERIPH_ID	0xfe0
115 #define PCELL_ID	0xff0
116 
117 #define CR0_PERIPH_REQ_SET	(1 << 0)
118 #define CR0_BOOT_EN_SET		(1 << 1)
119 #define CR0_BOOT_MAN_NS		(1 << 2)
120 #define CR0_NUM_CHANS_SHIFT	4
121 #define CR0_NUM_CHANS_MASK	0x7
122 #define CR0_NUM_PERIPH_SHIFT	12
123 #define CR0_NUM_PERIPH_MASK	0x1f
124 #define CR0_NUM_EVENTS_SHIFT	17
125 #define CR0_NUM_EVENTS_MASK	0x1f
126 
127 #define CR1_ICACHE_LEN_SHIFT	0
128 #define CR1_ICACHE_LEN_MASK	0x7
129 #define CR1_NUM_ICACHELINES_SHIFT	4
130 #define CR1_NUM_ICACHELINES_MASK	0xf
131 
132 #define CRD_DATA_WIDTH_SHIFT	0
133 #define CRD_DATA_WIDTH_MASK	0x7
134 #define CRD_WR_CAP_SHIFT	4
135 #define CRD_WR_CAP_MASK		0x7
136 #define CRD_WR_Q_DEP_SHIFT	8
137 #define CRD_WR_Q_DEP_MASK	0xf
138 #define CRD_RD_CAP_SHIFT	12
139 #define CRD_RD_CAP_MASK		0x7
140 #define CRD_RD_Q_DEP_SHIFT	16
141 #define CRD_RD_Q_DEP_MASK	0xf
142 #define CRD_DATA_BUFF_SHIFT	20
143 #define CRD_DATA_BUFF_MASK	0x3ff
144 
145 #define	PART		0x330
146 #define DESIGNER	0x41
147 #define REVISION	0x0
148 #define INTEG_CFG	0x0
149 #define PERIPH_ID_VAL	((PART << 0) | (DESIGNER << 12))
150 
151 #define PCELL_ID_VAL	0xb105f00d
152 
153 #define PL330_STATE_STOPPED		(1 << 0)
154 #define PL330_STATE_EXECUTING		(1 << 1)
155 #define PL330_STATE_WFE			(1 << 2)
156 #define PL330_STATE_FAULTING		(1 << 3)
157 #define PL330_STATE_COMPLETING		(1 << 4)
158 #define PL330_STATE_WFP			(1 << 5)
159 #define PL330_STATE_KILLING		(1 << 6)
160 #define PL330_STATE_FAULT_COMPLETING	(1 << 7)
161 #define PL330_STATE_CACHEMISS		(1 << 8)
162 #define PL330_STATE_UPDTPC		(1 << 9)
163 #define PL330_STATE_ATBARRIER		(1 << 10)
164 #define PL330_STATE_QUEUEBUSY		(1 << 11)
165 #define PL330_STATE_INVALID		(1 << 15)
166 
167 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
168 				| PL330_STATE_WFE | PL330_STATE_FAULTING)
169 
170 #define CMD_DMAADDH	0x54
171 #define CMD_DMAEND	0x00
172 #define CMD_DMAFLUSHP	0x35
173 #define CMD_DMAGO	0xa0
174 #define CMD_DMALD	0x04
175 #define CMD_DMALDP	0x25
176 #define CMD_DMALP	0x20
177 #define CMD_DMALPEND	0x28
178 #define CMD_DMAKILL	0x01
179 #define CMD_DMAMOV	0xbc
180 #define CMD_DMANOP	0x18
181 #define CMD_DMARMB	0x12
182 #define CMD_DMASEV	0x34
183 #define CMD_DMAST	0x08
184 #define CMD_DMASTP	0x29
185 #define CMD_DMASTZ	0x0c
186 #define CMD_DMAWFE	0x36
187 #define CMD_DMAWFP	0x30
188 #define CMD_DMAWMB	0x13
189 
190 #define SZ_DMAADDH	3
191 #define SZ_DMAEND	1
192 #define SZ_DMAFLUSHP	2
193 #define SZ_DMALD	1
194 #define SZ_DMALDP	2
195 #define SZ_DMALP	2
196 #define SZ_DMALPEND	2
197 #define SZ_DMAKILL	1
198 #define SZ_DMAMOV	6
199 #define SZ_DMANOP	1
200 #define SZ_DMARMB	1
201 #define SZ_DMASEV	2
202 #define SZ_DMAST	1
203 #define SZ_DMASTP	2
204 #define SZ_DMASTZ	1
205 #define SZ_DMAWFE	2
206 #define SZ_DMAWFP	2
207 #define SZ_DMAWMB	1
208 #define SZ_DMAGO	6
209 
210 #define BRST_LEN(ccr)	((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
211 #define BRST_SIZE(ccr)	(1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
212 
213 #define BYTE_TO_BURST(b, ccr)  ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
214 #define BURST_TO_BYTE(c, ccr)  ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
215 
216 /*
217  * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
218  * at 1byte/burst for P<->M and M<->M respectively.
219  * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
220  * should be enough for P<->M and M<->M respectively.
221  */
222 #define MCODE_BUFF_PER_REQ	256
223 
224 /* If the _pl330_req is available to the client */
225 #define IS_FREE(req)	(*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
226 
227 /* Use this _only_ to wait on transient states */
228 #define UNTIL(t, s)	while (!(_state(t) & (s))) cpu_relax();
229 
230 #ifdef PL330_DEBUG_MCGEN
231 static unsigned cmd_line;
232 #define PL330_DBGCMD_DUMP(off, x...)	do { \
233 						printk("%x:", cmd_line); \
234 						printk(x); \
235 						cmd_line += off; \
236 					} while (0)
237 #define PL330_DBGMC_START(addr)		(cmd_line = addr)
238 #else
239 #define PL330_DBGCMD_DUMP(off, x...)	do {} while (0)
240 #define PL330_DBGMC_START(addr)		do {} while (0)
241 #endif
242 
243 struct _xfer_spec {
244 	u32 ccr;
245 	struct pl330_req *r;
246 	struct pl330_xfer *x;
247 };
248 
249 enum dmamov_dst {
250 	SAR = 0,
251 	CCR,
252 	DAR,
253 };
254 
255 enum pl330_dst {
256 	SRC = 0,
257 	DST,
258 };
259 
260 enum pl330_cond {
261 	SINGLE,
262 	BURST,
263 	ALWAYS,
264 };
265 
266 struct _pl330_req {
267 	u32 mc_bus;
268 	void *mc_cpu;
269 	/* Number of bytes taken to setup MC for the req */
270 	u32 mc_len;
271 	struct pl330_req *r;
272 	/* Hook to attach to DMAC's list of reqs with due callback */
273 	struct list_head rqd;
274 };
275 
276 /* ToBeDone for tasklet */
277 struct _pl330_tbd {
278 	bool reset_dmac;
279 	bool reset_mngr;
280 	u8 reset_chan;
281 };
282 
283 /* A DMAC Thread */
284 struct pl330_thread {
285 	u8 id;
286 	int ev;
287 	/* If the channel is not yet acquired by any client */
288 	bool free;
289 	/* Parent DMAC */
290 	struct pl330_dmac *dmac;
291 	/* Only two at a time */
292 	struct _pl330_req req[2];
293 	/* Index of the last enqueued request */
294 	unsigned lstenq;
295 	/* Index of the last submitted request or -1 if the DMA is stopped */
296 	int req_running;
297 };
298 
299 enum pl330_dmac_state {
300 	UNINIT,
301 	INIT,
302 	DYING,
303 };
304 
305 /* A DMAC */
306 struct pl330_dmac {
307 	spinlock_t		lock;
308 	/* Holds list of reqs with due callbacks */
309 	struct list_head	req_done;
310 	/* Pointer to platform specific stuff */
311 	struct pl330_info	*pinfo;
312 	/* Maximum possible events/irqs */
313 	int			events[32];
314 	/* BUS address of MicroCode buffer */
315 	u32			mcode_bus;
316 	/* CPU address of MicroCode buffer */
317 	void			*mcode_cpu;
318 	/* List of all Channel threads */
319 	struct pl330_thread	*channels;
320 	/* Pointer to the MANAGER thread */
321 	struct pl330_thread	*manager;
322 	/* To handle bad news in interrupt */
323 	struct tasklet_struct	tasks;
324 	struct _pl330_tbd	dmac_tbd;
325 	/* State of DMAC operation */
326 	enum pl330_dmac_state	state;
327 };
328 
_callback(struct pl330_req * r,enum pl330_op_err err)329 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
330 {
331 	if (r && r->xfer_cb)
332 		r->xfer_cb(r->token, err);
333 }
334 
_queue_empty(struct pl330_thread * thrd)335 static inline bool _queue_empty(struct pl330_thread *thrd)
336 {
337 	return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
338 		? true : false;
339 }
340 
_queue_full(struct pl330_thread * thrd)341 static inline bool _queue_full(struct pl330_thread *thrd)
342 {
343 	return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
344 		? false : true;
345 }
346 
is_manager(struct pl330_thread * thrd)347 static inline bool is_manager(struct pl330_thread *thrd)
348 {
349 	struct pl330_dmac *pl330 = thrd->dmac;
350 
351 	/* MANAGER is indexed at the end */
352 	if (thrd->id == pl330->pinfo->pcfg.num_chan)
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /* If manager of the thread is in Non-Secure mode */
_manager_ns(struct pl330_thread * thrd)359 static inline bool _manager_ns(struct pl330_thread *thrd)
360 {
361 	struct pl330_dmac *pl330 = thrd->dmac;
362 
363 	return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
364 }
365 
get_id(struct pl330_info * pi,u32 off)366 static inline u32 get_id(struct pl330_info *pi, u32 off)
367 {
368 	void __iomem *regs = pi->base;
369 	u32 id = 0;
370 
371 	id |= (readb(regs + off + 0x0) << 0);
372 	id |= (readb(regs + off + 0x4) << 8);
373 	id |= (readb(regs + off + 0x8) << 16);
374 	id |= (readb(regs + off + 0xc) << 24);
375 
376 	return id;
377 }
378 
_emit_ADDH(unsigned dry_run,u8 buf[],enum pl330_dst da,u16 val)379 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
380 		enum pl330_dst da, u16 val)
381 {
382 	if (dry_run)
383 		return SZ_DMAADDH;
384 
385 	buf[0] = CMD_DMAADDH;
386 	buf[0] |= (da << 1);
387 	*((u16 *)&buf[1]) = val;
388 
389 	PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
390 		da == 1 ? "DA" : "SA", val);
391 
392 	return SZ_DMAADDH;
393 }
394 
_emit_END(unsigned dry_run,u8 buf[])395 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
396 {
397 	if (dry_run)
398 		return SZ_DMAEND;
399 
400 	buf[0] = CMD_DMAEND;
401 
402 	PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
403 
404 	return SZ_DMAEND;
405 }
406 
_emit_FLUSHP(unsigned dry_run,u8 buf[],u8 peri)407 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
408 {
409 	if (dry_run)
410 		return SZ_DMAFLUSHP;
411 
412 	buf[0] = CMD_DMAFLUSHP;
413 
414 	peri &= 0x1f;
415 	peri <<= 3;
416 	buf[1] = peri;
417 
418 	PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
419 
420 	return SZ_DMAFLUSHP;
421 }
422 
_emit_LD(unsigned dry_run,u8 buf[],enum pl330_cond cond)423 static inline u32 _emit_LD(unsigned dry_run, u8 buf[],	enum pl330_cond cond)
424 {
425 	if (dry_run)
426 		return SZ_DMALD;
427 
428 	buf[0] = CMD_DMALD;
429 
430 	if (cond == SINGLE)
431 		buf[0] |= (0 << 1) | (1 << 0);
432 	else if (cond == BURST)
433 		buf[0] |= (1 << 1) | (1 << 0);
434 
435 	PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
436 		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
437 
438 	return SZ_DMALD;
439 }
440 
_emit_LDP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)441 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
442 		enum pl330_cond cond, u8 peri)
443 {
444 	if (dry_run)
445 		return SZ_DMALDP;
446 
447 	buf[0] = CMD_DMALDP;
448 
449 	if (cond == BURST)
450 		buf[0] |= (1 << 1);
451 
452 	peri &= 0x1f;
453 	peri <<= 3;
454 	buf[1] = peri;
455 
456 	PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
457 		cond == SINGLE ? 'S' : 'B', peri >> 3);
458 
459 	return SZ_DMALDP;
460 }
461 
_emit_LP(unsigned dry_run,u8 buf[],unsigned loop,u8 cnt)462 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
463 		unsigned loop, u8 cnt)
464 {
465 	if (dry_run)
466 		return SZ_DMALP;
467 
468 	buf[0] = CMD_DMALP;
469 
470 	if (loop)
471 		buf[0] |= (1 << 1);
472 
473 	cnt--; /* DMAC increments by 1 internally */
474 	buf[1] = cnt;
475 
476 	PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
477 
478 	return SZ_DMALP;
479 }
480 
481 struct _arg_LPEND {
482 	enum pl330_cond cond;
483 	bool forever;
484 	unsigned loop;
485 	u8 bjump;
486 };
487 
_emit_LPEND(unsigned dry_run,u8 buf[],const struct _arg_LPEND * arg)488 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
489 		const struct _arg_LPEND *arg)
490 {
491 	enum pl330_cond cond = arg->cond;
492 	bool forever = arg->forever;
493 	unsigned loop = arg->loop;
494 	u8 bjump = arg->bjump;
495 
496 	if (dry_run)
497 		return SZ_DMALPEND;
498 
499 	buf[0] = CMD_DMALPEND;
500 
501 	if (loop)
502 		buf[0] |= (1 << 2);
503 
504 	if (!forever)
505 		buf[0] |= (1 << 4);
506 
507 	if (cond == SINGLE)
508 		buf[0] |= (0 << 1) | (1 << 0);
509 	else if (cond == BURST)
510 		buf[0] |= (1 << 1) | (1 << 0);
511 
512 	buf[1] = bjump;
513 
514 	PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
515 			forever ? "FE" : "END",
516 			cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
517 			loop ? '1' : '0',
518 			bjump);
519 
520 	return SZ_DMALPEND;
521 }
522 
_emit_KILL(unsigned dry_run,u8 buf[])523 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
524 {
525 	if (dry_run)
526 		return SZ_DMAKILL;
527 
528 	buf[0] = CMD_DMAKILL;
529 
530 	return SZ_DMAKILL;
531 }
532 
_emit_MOV(unsigned dry_run,u8 buf[],enum dmamov_dst dst,u32 val)533 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
534 		enum dmamov_dst dst, u32 val)
535 {
536 	if (dry_run)
537 		return SZ_DMAMOV;
538 
539 	buf[0] = CMD_DMAMOV;
540 	buf[1] = dst;
541 	*((u32 *)&buf[2]) = val;
542 
543 	PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
544 		dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
545 
546 	return SZ_DMAMOV;
547 }
548 
_emit_NOP(unsigned dry_run,u8 buf[])549 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
550 {
551 	if (dry_run)
552 		return SZ_DMANOP;
553 
554 	buf[0] = CMD_DMANOP;
555 
556 	PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
557 
558 	return SZ_DMANOP;
559 }
560 
_emit_RMB(unsigned dry_run,u8 buf[])561 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
562 {
563 	if (dry_run)
564 		return SZ_DMARMB;
565 
566 	buf[0] = CMD_DMARMB;
567 
568 	PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
569 
570 	return SZ_DMARMB;
571 }
572 
_emit_SEV(unsigned dry_run,u8 buf[],u8 ev)573 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
574 {
575 	if (dry_run)
576 		return SZ_DMASEV;
577 
578 	buf[0] = CMD_DMASEV;
579 
580 	ev &= 0x1f;
581 	ev <<= 3;
582 	buf[1] = ev;
583 
584 	PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
585 
586 	return SZ_DMASEV;
587 }
588 
_emit_ST(unsigned dry_run,u8 buf[],enum pl330_cond cond)589 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
590 {
591 	if (dry_run)
592 		return SZ_DMAST;
593 
594 	buf[0] = CMD_DMAST;
595 
596 	if (cond == SINGLE)
597 		buf[0] |= (0 << 1) | (1 << 0);
598 	else if (cond == BURST)
599 		buf[0] |= (1 << 1) | (1 << 0);
600 
601 	PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
602 		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
603 
604 	return SZ_DMAST;
605 }
606 
_emit_STP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)607 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
608 		enum pl330_cond cond, u8 peri)
609 {
610 	if (dry_run)
611 		return SZ_DMASTP;
612 
613 	buf[0] = CMD_DMASTP;
614 
615 	if (cond == BURST)
616 		buf[0] |= (1 << 1);
617 
618 	peri &= 0x1f;
619 	peri <<= 3;
620 	buf[1] = peri;
621 
622 	PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
623 		cond == SINGLE ? 'S' : 'B', peri >> 3);
624 
625 	return SZ_DMASTP;
626 }
627 
_emit_STZ(unsigned dry_run,u8 buf[])628 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
629 {
630 	if (dry_run)
631 		return SZ_DMASTZ;
632 
633 	buf[0] = CMD_DMASTZ;
634 
635 	PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
636 
637 	return SZ_DMASTZ;
638 }
639 
_emit_WFE(unsigned dry_run,u8 buf[],u8 ev,unsigned invalidate)640 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
641 		unsigned invalidate)
642 {
643 	if (dry_run)
644 		return SZ_DMAWFE;
645 
646 	buf[0] = CMD_DMAWFE;
647 
648 	ev &= 0x1f;
649 	ev <<= 3;
650 	buf[1] = ev;
651 
652 	if (invalidate)
653 		buf[1] |= (1 << 1);
654 
655 	PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
656 		ev >> 3, invalidate ? ", I" : "");
657 
658 	return SZ_DMAWFE;
659 }
660 
_emit_WFP(unsigned dry_run,u8 buf[],enum pl330_cond cond,u8 peri)661 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
662 		enum pl330_cond cond, u8 peri)
663 {
664 	if (dry_run)
665 		return SZ_DMAWFP;
666 
667 	buf[0] = CMD_DMAWFP;
668 
669 	if (cond == SINGLE)
670 		buf[0] |= (0 << 1) | (0 << 0);
671 	else if (cond == BURST)
672 		buf[0] |= (1 << 1) | (0 << 0);
673 	else
674 		buf[0] |= (0 << 1) | (1 << 0);
675 
676 	peri &= 0x1f;
677 	peri <<= 3;
678 	buf[1] = peri;
679 
680 	PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
681 		cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
682 
683 	return SZ_DMAWFP;
684 }
685 
_emit_WMB(unsigned dry_run,u8 buf[])686 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
687 {
688 	if (dry_run)
689 		return SZ_DMAWMB;
690 
691 	buf[0] = CMD_DMAWMB;
692 
693 	PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
694 
695 	return SZ_DMAWMB;
696 }
697 
698 struct _arg_GO {
699 	u8 chan;
700 	u32 addr;
701 	unsigned ns;
702 };
703 
_emit_GO(unsigned dry_run,u8 buf[],const struct _arg_GO * arg)704 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
705 		const struct _arg_GO *arg)
706 {
707 	u8 chan = arg->chan;
708 	u32 addr = arg->addr;
709 	unsigned ns = arg->ns;
710 
711 	if (dry_run)
712 		return SZ_DMAGO;
713 
714 	buf[0] = CMD_DMAGO;
715 	buf[0] |= (ns << 1);
716 
717 	buf[1] = chan & 0x7;
718 
719 	*((u32 *)&buf[2]) = addr;
720 
721 	return SZ_DMAGO;
722 }
723 
724 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
725 
726 /* Returns Time-Out */
_until_dmac_idle(struct pl330_thread * thrd)727 static bool _until_dmac_idle(struct pl330_thread *thrd)
728 {
729 	void __iomem *regs = thrd->dmac->pinfo->base;
730 	unsigned long loops = msecs_to_loops(5);
731 
732 	do {
733 		/* Until Manager is Idle */
734 		if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
735 			break;
736 
737 		cpu_relax();
738 	} while (--loops);
739 
740 	if (!loops)
741 		return true;
742 
743 	return false;
744 }
745 
_execute_DBGINSN(struct pl330_thread * thrd,u8 insn[],bool as_manager)746 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
747 		u8 insn[], bool as_manager)
748 {
749 	void __iomem *regs = thrd->dmac->pinfo->base;
750 	u32 val;
751 
752 	val = (insn[0] << 16) | (insn[1] << 24);
753 	if (!as_manager) {
754 		val |= (1 << 0);
755 		val |= (thrd->id << 8); /* Channel Number */
756 	}
757 	writel(val, regs + DBGINST0);
758 
759 	val = *((u32 *)&insn[2]);
760 	writel(val, regs + DBGINST1);
761 
762 	/* If timed out due to halted state-machine */
763 	if (_until_dmac_idle(thrd)) {
764 		dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
765 		return;
766 	}
767 
768 	/* Get going */
769 	writel(0, regs + DBGCMD);
770 }
771 
772 /*
773  * Mark a _pl330_req as free.
774  * We do it by writing DMAEND as the first instruction
775  * because no valid request is going to have DMAEND as
776  * its first instruction to execute.
777  */
mark_free(struct pl330_thread * thrd,int idx)778 static void mark_free(struct pl330_thread *thrd, int idx)
779 {
780 	struct _pl330_req *req = &thrd->req[idx];
781 
782 	_emit_END(0, req->mc_cpu);
783 	req->mc_len = 0;
784 
785 	thrd->req_running = -1;
786 }
787 
_state(struct pl330_thread * thrd)788 static inline u32 _state(struct pl330_thread *thrd)
789 {
790 	void __iomem *regs = thrd->dmac->pinfo->base;
791 	u32 val;
792 
793 	if (is_manager(thrd))
794 		val = readl(regs + DS) & 0xf;
795 	else
796 		val = readl(regs + CS(thrd->id)) & 0xf;
797 
798 	switch (val) {
799 	case DS_ST_STOP:
800 		return PL330_STATE_STOPPED;
801 	case DS_ST_EXEC:
802 		return PL330_STATE_EXECUTING;
803 	case DS_ST_CMISS:
804 		return PL330_STATE_CACHEMISS;
805 	case DS_ST_UPDTPC:
806 		return PL330_STATE_UPDTPC;
807 	case DS_ST_WFE:
808 		return PL330_STATE_WFE;
809 	case DS_ST_FAULT:
810 		return PL330_STATE_FAULTING;
811 	case DS_ST_ATBRR:
812 		if (is_manager(thrd))
813 			return PL330_STATE_INVALID;
814 		else
815 			return PL330_STATE_ATBARRIER;
816 	case DS_ST_QBUSY:
817 		if (is_manager(thrd))
818 			return PL330_STATE_INVALID;
819 		else
820 			return PL330_STATE_QUEUEBUSY;
821 	case DS_ST_WFP:
822 		if (is_manager(thrd))
823 			return PL330_STATE_INVALID;
824 		else
825 			return PL330_STATE_WFP;
826 	case DS_ST_KILL:
827 		if (is_manager(thrd))
828 			return PL330_STATE_INVALID;
829 		else
830 			return PL330_STATE_KILLING;
831 	case DS_ST_CMPLT:
832 		if (is_manager(thrd))
833 			return PL330_STATE_INVALID;
834 		else
835 			return PL330_STATE_COMPLETING;
836 	case DS_ST_FLTCMP:
837 		if (is_manager(thrd))
838 			return PL330_STATE_INVALID;
839 		else
840 			return PL330_STATE_FAULT_COMPLETING;
841 	default:
842 		return PL330_STATE_INVALID;
843 	}
844 }
845 
_stop(struct pl330_thread * thrd)846 static void _stop(struct pl330_thread *thrd)
847 {
848 	void __iomem *regs = thrd->dmac->pinfo->base;
849 	u8 insn[6] = {0, 0, 0, 0, 0, 0};
850 
851 	if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
852 		UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
853 
854 	/* Return if nothing needs to be done */
855 	if (_state(thrd) == PL330_STATE_COMPLETING
856 		  || _state(thrd) == PL330_STATE_KILLING
857 		  || _state(thrd) == PL330_STATE_STOPPED)
858 		return;
859 
860 	_emit_KILL(0, insn);
861 
862 	/* Stop generating interrupts for SEV */
863 	writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
864 
865 	_execute_DBGINSN(thrd, insn, is_manager(thrd));
866 }
867 
868 /* Start doing req 'idx' of thread 'thrd' */
_trigger(struct pl330_thread * thrd)869 static bool _trigger(struct pl330_thread *thrd)
870 {
871 	void __iomem *regs = thrd->dmac->pinfo->base;
872 	struct _pl330_req *req;
873 	struct pl330_req *r;
874 	struct _arg_GO go;
875 	unsigned ns;
876 	u8 insn[6] = {0, 0, 0, 0, 0, 0};
877 	int idx;
878 
879 	/* Return if already ACTIVE */
880 	if (_state(thrd) != PL330_STATE_STOPPED)
881 		return true;
882 
883 	idx = 1 - thrd->lstenq;
884 	if (!IS_FREE(&thrd->req[idx]))
885 		req = &thrd->req[idx];
886 	else {
887 		idx = thrd->lstenq;
888 		if (!IS_FREE(&thrd->req[idx]))
889 			req = &thrd->req[idx];
890 		else
891 			req = NULL;
892 	}
893 
894 	/* Return if no request */
895 	if (!req || !req->r)
896 		return true;
897 
898 	r = req->r;
899 
900 	if (r->cfg)
901 		ns = r->cfg->nonsecure ? 1 : 0;
902 	else if (readl(regs + CS(thrd->id)) & CS_CNS)
903 		ns = 1;
904 	else
905 		ns = 0;
906 
907 	/* See 'Abort Sources' point-4 at Page 2-25 */
908 	if (_manager_ns(thrd) && !ns)
909 		dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
910 			__func__, __LINE__);
911 
912 	go.chan = thrd->id;
913 	go.addr = req->mc_bus;
914 	go.ns = ns;
915 	_emit_GO(0, insn, &go);
916 
917 	/* Set to generate interrupts for SEV */
918 	writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
919 
920 	/* Only manager can execute GO */
921 	_execute_DBGINSN(thrd, insn, true);
922 
923 	thrd->req_running = idx;
924 
925 	return true;
926 }
927 
_start(struct pl330_thread * thrd)928 static bool _start(struct pl330_thread *thrd)
929 {
930 	switch (_state(thrd)) {
931 	case PL330_STATE_FAULT_COMPLETING:
932 		UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
933 
934 		if (_state(thrd) == PL330_STATE_KILLING)
935 			UNTIL(thrd, PL330_STATE_STOPPED)
936 
937 	case PL330_STATE_FAULTING:
938 		_stop(thrd);
939 
940 	case PL330_STATE_KILLING:
941 	case PL330_STATE_COMPLETING:
942 		UNTIL(thrd, PL330_STATE_STOPPED)
943 
944 	case PL330_STATE_STOPPED:
945 		return _trigger(thrd);
946 
947 	case PL330_STATE_WFP:
948 	case PL330_STATE_QUEUEBUSY:
949 	case PL330_STATE_ATBARRIER:
950 	case PL330_STATE_UPDTPC:
951 	case PL330_STATE_CACHEMISS:
952 	case PL330_STATE_EXECUTING:
953 		return true;
954 
955 	case PL330_STATE_WFE: /* For RESUME, nothing yet */
956 	default:
957 		return false;
958 	}
959 }
960 
_ldst_memtomem(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)961 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
962 		const struct _xfer_spec *pxs, int cyc)
963 {
964 	int off = 0;
965 
966 	while (cyc--) {
967 		off += _emit_LD(dry_run, &buf[off], ALWAYS);
968 		off += _emit_RMB(dry_run, &buf[off]);
969 		off += _emit_ST(dry_run, &buf[off], ALWAYS);
970 		off += _emit_WMB(dry_run, &buf[off]);
971 	}
972 
973 	return off;
974 }
975 
_ldst_devtomem(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)976 static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
977 		const struct _xfer_spec *pxs, int cyc)
978 {
979 	int off = 0;
980 
981 	while (cyc--) {
982 		off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
983 		off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
984 		off += _emit_ST(dry_run, &buf[off], ALWAYS);
985 		off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
986 	}
987 
988 	return off;
989 }
990 
_ldst_memtodev(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)991 static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
992 		const struct _xfer_spec *pxs, int cyc)
993 {
994 	int off = 0;
995 
996 	while (cyc--) {
997 		off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
998 		off += _emit_LD(dry_run, &buf[off], ALWAYS);
999 		off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1000 		off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1001 	}
1002 
1003 	return off;
1004 }
1005 
_bursts(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs,int cyc)1006 static int _bursts(unsigned dry_run, u8 buf[],
1007 		const struct _xfer_spec *pxs, int cyc)
1008 {
1009 	int off = 0;
1010 
1011 	switch (pxs->r->rqtype) {
1012 	case MEMTODEV:
1013 		off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1014 		break;
1015 	case DEVTOMEM:
1016 		off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1017 		break;
1018 	case MEMTOMEM:
1019 		off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1020 		break;
1021 	default:
1022 		off += 0x40000000; /* Scare off the Client */
1023 		break;
1024 	}
1025 
1026 	return off;
1027 }
1028 
1029 /* Returns bytes consumed and updates bursts */
_loop(unsigned dry_run,u8 buf[],unsigned long * bursts,const struct _xfer_spec * pxs)1030 static inline int _loop(unsigned dry_run, u8 buf[],
1031 		unsigned long *bursts, const struct _xfer_spec *pxs)
1032 {
1033 	int cyc, cycmax, szlp, szlpend, szbrst, off;
1034 	unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1035 	struct _arg_LPEND lpend;
1036 
1037 	/* Max iterations possible in DMALP is 256 */
1038 	if (*bursts >= 256*256) {
1039 		lcnt1 = 256;
1040 		lcnt0 = 256;
1041 		cyc = *bursts / lcnt1 / lcnt0;
1042 	} else if (*bursts > 256) {
1043 		lcnt1 = 256;
1044 		lcnt0 = *bursts / lcnt1;
1045 		cyc = 1;
1046 	} else {
1047 		lcnt1 = *bursts;
1048 		lcnt0 = 0;
1049 		cyc = 1;
1050 	}
1051 
1052 	szlp = _emit_LP(1, buf, 0, 0);
1053 	szbrst = _bursts(1, buf, pxs, 1);
1054 
1055 	lpend.cond = ALWAYS;
1056 	lpend.forever = false;
1057 	lpend.loop = 0;
1058 	lpend.bjump = 0;
1059 	szlpend = _emit_LPEND(1, buf, &lpend);
1060 
1061 	if (lcnt0) {
1062 		szlp *= 2;
1063 		szlpend *= 2;
1064 	}
1065 
1066 	/*
1067 	 * Max bursts that we can unroll due to limit on the
1068 	 * size of backward jump that can be encoded in DMALPEND
1069 	 * which is 8-bits and hence 255
1070 	 */
1071 	cycmax = (255 - (szlp + szlpend)) / szbrst;
1072 
1073 	cyc = (cycmax < cyc) ? cycmax : cyc;
1074 
1075 	off = 0;
1076 
1077 	if (lcnt0) {
1078 		off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1079 		ljmp0 = off;
1080 	}
1081 
1082 	off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1083 	ljmp1 = off;
1084 
1085 	off += _bursts(dry_run, &buf[off], pxs, cyc);
1086 
1087 	lpend.cond = ALWAYS;
1088 	lpend.forever = false;
1089 	lpend.loop = 1;
1090 	lpend.bjump = off - ljmp1;
1091 	off += _emit_LPEND(dry_run, &buf[off], &lpend);
1092 
1093 	if (lcnt0) {
1094 		lpend.cond = ALWAYS;
1095 		lpend.forever = false;
1096 		lpend.loop = 0;
1097 		lpend.bjump = off - ljmp0;
1098 		off += _emit_LPEND(dry_run, &buf[off], &lpend);
1099 	}
1100 
1101 	*bursts = lcnt1 * cyc;
1102 	if (lcnt0)
1103 		*bursts *= lcnt0;
1104 
1105 	return off;
1106 }
1107 
_setup_loops(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs)1108 static inline int _setup_loops(unsigned dry_run, u8 buf[],
1109 		const struct _xfer_spec *pxs)
1110 {
1111 	struct pl330_xfer *x = pxs->x;
1112 	u32 ccr = pxs->ccr;
1113 	unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1114 	int off = 0;
1115 
1116 	while (bursts) {
1117 		c = bursts;
1118 		off += _loop(dry_run, &buf[off], &c, pxs);
1119 		bursts -= c;
1120 	}
1121 
1122 	return off;
1123 }
1124 
_setup_xfer(unsigned dry_run,u8 buf[],const struct _xfer_spec * pxs)1125 static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1126 		const struct _xfer_spec *pxs)
1127 {
1128 	struct pl330_xfer *x = pxs->x;
1129 	int off = 0;
1130 
1131 	/* DMAMOV SAR, x->src_addr */
1132 	off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1133 	/* DMAMOV DAR, x->dst_addr */
1134 	off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1135 
1136 	/* Setup Loop(s) */
1137 	off += _setup_loops(dry_run, &buf[off], pxs);
1138 
1139 	return off;
1140 }
1141 
1142 /*
1143  * A req is a sequence of one or more xfer units.
1144  * Returns the number of bytes taken to setup the MC for the req.
1145  */
_setup_req(unsigned dry_run,struct pl330_thread * thrd,unsigned index,struct _xfer_spec * pxs)1146 static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1147 		unsigned index, struct _xfer_spec *pxs)
1148 {
1149 	struct _pl330_req *req = &thrd->req[index];
1150 	struct pl330_xfer *x;
1151 	u8 *buf = req->mc_cpu;
1152 	int off = 0;
1153 
1154 	PL330_DBGMC_START(req->mc_bus);
1155 
1156 	/* DMAMOV CCR, ccr */
1157 	off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1158 
1159 	x = pxs->r->x;
1160 	do {
1161 		/* Error if xfer length is not aligned at burst size */
1162 		if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1163 			return -EINVAL;
1164 
1165 		pxs->x = x;
1166 		off += _setup_xfer(dry_run, &buf[off], pxs);
1167 
1168 		x = x->next;
1169 	} while (x);
1170 
1171 	/* DMASEV peripheral/event */
1172 	off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1173 	/* DMAEND */
1174 	off += _emit_END(dry_run, &buf[off]);
1175 
1176 	return off;
1177 }
1178 
_prepare_ccr(const struct pl330_reqcfg * rqc)1179 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1180 {
1181 	u32 ccr = 0;
1182 
1183 	if (rqc->src_inc)
1184 		ccr |= CC_SRCINC;
1185 
1186 	if (rqc->dst_inc)
1187 		ccr |= CC_DSTINC;
1188 
1189 	/* We set same protection levels for Src and DST for now */
1190 	if (rqc->privileged)
1191 		ccr |= CC_SRCPRI | CC_DSTPRI;
1192 	if (rqc->nonsecure)
1193 		ccr |= CC_SRCNS | CC_DSTNS;
1194 	if (rqc->insnaccess)
1195 		ccr |= CC_SRCIA | CC_DSTIA;
1196 
1197 	ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1198 	ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1199 
1200 	ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1201 	ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1202 
1203 	ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1204 	ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1205 
1206 	ccr |= (rqc->swap << CC_SWAP_SHFT);
1207 
1208 	return ccr;
1209 }
1210 
_is_valid(u32 ccr)1211 static inline bool _is_valid(u32 ccr)
1212 {
1213 	enum pl330_dstcachectrl dcctl;
1214 	enum pl330_srccachectrl scctl;
1215 
1216 	dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1217 	scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1218 
1219 	if (dcctl == DINVALID1 || dcctl == DINVALID2
1220 			|| scctl == SINVALID1 || scctl == SINVALID2)
1221 		return false;
1222 	else
1223 		return true;
1224 }
1225 
1226 /*
1227  * Submit a list of xfers after which the client wants notification.
1228  * Client is not notified after each xfer unit, just once after all
1229  * xfer units are done or some error occurs.
1230  */
pl330_submit_req(void * ch_id,struct pl330_req * r)1231 int pl330_submit_req(void *ch_id, struct pl330_req *r)
1232 {
1233 	struct pl330_thread *thrd = ch_id;
1234 	struct pl330_dmac *pl330;
1235 	struct pl330_info *pi;
1236 	struct _xfer_spec xs;
1237 	unsigned long flags;
1238 	void __iomem *regs;
1239 	unsigned idx;
1240 	u32 ccr;
1241 	int ret = 0;
1242 
1243 	/* No Req or Unacquired Channel or DMAC */
1244 	if (!r || !thrd || thrd->free)
1245 		return -EINVAL;
1246 
1247 	pl330 = thrd->dmac;
1248 	pi = pl330->pinfo;
1249 	regs = pi->base;
1250 
1251 	if (pl330->state == DYING
1252 		|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1253 		dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1254 			__func__, __LINE__);
1255 		return -EAGAIN;
1256 	}
1257 
1258 	/* If request for non-existing peripheral */
1259 	if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1260 		dev_info(thrd->dmac->pinfo->dev,
1261 				"%s:%d Invalid peripheral(%u)!\n",
1262 				__func__, __LINE__, r->peri);
1263 		return -EINVAL;
1264 	}
1265 
1266 	spin_lock_irqsave(&pl330->lock, flags);
1267 
1268 	if (_queue_full(thrd)) {
1269 		ret = -EAGAIN;
1270 		goto xfer_exit;
1271 	}
1272 
1273 	/* Prefer Secure Channel */
1274 	if (!_manager_ns(thrd))
1275 		r->cfg->nonsecure = 0;
1276 	else
1277 		r->cfg->nonsecure = 1;
1278 
1279 	/* Use last settings, if not provided */
1280 	if (r->cfg)
1281 		ccr = _prepare_ccr(r->cfg);
1282 	else
1283 		ccr = readl(regs + CC(thrd->id));
1284 
1285 	/* If this req doesn't have valid xfer settings */
1286 	if (!_is_valid(ccr)) {
1287 		ret = -EINVAL;
1288 		dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1289 			__func__, __LINE__, ccr);
1290 		goto xfer_exit;
1291 	}
1292 
1293 	idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1294 
1295 	xs.ccr = ccr;
1296 	xs.r = r;
1297 
1298 	/* First dry run to check if req is acceptable */
1299 	ret = _setup_req(1, thrd, idx, &xs);
1300 	if (ret < 0)
1301 		goto xfer_exit;
1302 
1303 	if (ret > pi->mcbufsz / 2) {
1304 		dev_info(thrd->dmac->pinfo->dev,
1305 			"%s:%d Trying increasing mcbufsz\n",
1306 				__func__, __LINE__);
1307 		ret = -ENOMEM;
1308 		goto xfer_exit;
1309 	}
1310 
1311 	/* Hook the request */
1312 	thrd->lstenq = idx;
1313 	thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1314 	thrd->req[idx].r = r;
1315 
1316 	ret = 0;
1317 
1318 xfer_exit:
1319 	spin_unlock_irqrestore(&pl330->lock, flags);
1320 
1321 	return ret;
1322 }
1323 EXPORT_SYMBOL(pl330_submit_req);
1324 
pl330_dotask(unsigned long data)1325 static void pl330_dotask(unsigned long data)
1326 {
1327 	struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1328 	struct pl330_info *pi = pl330->pinfo;
1329 	unsigned long flags;
1330 	int i;
1331 
1332 	spin_lock_irqsave(&pl330->lock, flags);
1333 
1334 	/* The DMAC itself gone nuts */
1335 	if (pl330->dmac_tbd.reset_dmac) {
1336 		pl330->state = DYING;
1337 		/* Reset the manager too */
1338 		pl330->dmac_tbd.reset_mngr = true;
1339 		/* Clear the reset flag */
1340 		pl330->dmac_tbd.reset_dmac = false;
1341 	}
1342 
1343 	if (pl330->dmac_tbd.reset_mngr) {
1344 		_stop(pl330->manager);
1345 		/* Reset all channels */
1346 		pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1347 		/* Clear the reset flag */
1348 		pl330->dmac_tbd.reset_mngr = false;
1349 	}
1350 
1351 	for (i = 0; i < pi->pcfg.num_chan; i++) {
1352 
1353 		if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1354 			struct pl330_thread *thrd = &pl330->channels[i];
1355 			void __iomem *regs = pi->base;
1356 			enum pl330_op_err err;
1357 
1358 			_stop(thrd);
1359 
1360 			if (readl(regs + FSC) & (1 << thrd->id))
1361 				err = PL330_ERR_FAIL;
1362 			else
1363 				err = PL330_ERR_ABORT;
1364 
1365 			spin_unlock_irqrestore(&pl330->lock, flags);
1366 
1367 			_callback(thrd->req[1 - thrd->lstenq].r, err);
1368 			_callback(thrd->req[thrd->lstenq].r, err);
1369 
1370 			spin_lock_irqsave(&pl330->lock, flags);
1371 
1372 			thrd->req[0].r = NULL;
1373 			thrd->req[1].r = NULL;
1374 			mark_free(thrd, 0);
1375 			mark_free(thrd, 1);
1376 
1377 			/* Clear the reset flag */
1378 			pl330->dmac_tbd.reset_chan &= ~(1 << i);
1379 		}
1380 	}
1381 
1382 	spin_unlock_irqrestore(&pl330->lock, flags);
1383 
1384 	return;
1385 }
1386 
1387 /* Returns 1 if state was updated, 0 otherwise */
pl330_update(const struct pl330_info * pi)1388 int pl330_update(const struct pl330_info *pi)
1389 {
1390 	struct _pl330_req *rqdone;
1391 	struct pl330_dmac *pl330;
1392 	unsigned long flags;
1393 	void __iomem *regs;
1394 	u32 val;
1395 	int id, ev, ret = 0;
1396 
1397 	if (!pi || !pi->pl330_data)
1398 		return 0;
1399 
1400 	regs = pi->base;
1401 	pl330 = pi->pl330_data;
1402 
1403 	spin_lock_irqsave(&pl330->lock, flags);
1404 
1405 	val = readl(regs + FSM) & 0x1;
1406 	if (val)
1407 		pl330->dmac_tbd.reset_mngr = true;
1408 	else
1409 		pl330->dmac_tbd.reset_mngr = false;
1410 
1411 	val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1412 	pl330->dmac_tbd.reset_chan |= val;
1413 	if (val) {
1414 		int i = 0;
1415 		while (i < pi->pcfg.num_chan) {
1416 			if (val & (1 << i)) {
1417 				dev_info(pi->dev,
1418 					"Reset Channel-%d\t CS-%x FTC-%x\n",
1419 						i, readl(regs + CS(i)),
1420 						readl(regs + FTC(i)));
1421 				_stop(&pl330->channels[i]);
1422 			}
1423 			i++;
1424 		}
1425 	}
1426 
1427 	/* Check which event happened i.e, thread notified */
1428 	val = readl(regs + ES);
1429 	if (pi->pcfg.num_events < 32
1430 			&& val & ~((1 << pi->pcfg.num_events) - 1)) {
1431 		pl330->dmac_tbd.reset_dmac = true;
1432 		dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1433 		ret = 1;
1434 		goto updt_exit;
1435 	}
1436 
1437 	for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1438 		if (val & (1 << ev)) { /* Event occurred */
1439 			struct pl330_thread *thrd;
1440 			u32 inten = readl(regs + INTEN);
1441 			int active;
1442 
1443 			/* Clear the event */
1444 			if (inten & (1 << ev))
1445 				writel(1 << ev, regs + INTCLR);
1446 
1447 			ret = 1;
1448 
1449 			id = pl330->events[ev];
1450 
1451 			thrd = &pl330->channels[id];
1452 
1453 			active = thrd->req_running;
1454 			if (active == -1) /* Aborted */
1455 				continue;
1456 
1457 			rqdone = &thrd->req[active];
1458 			mark_free(thrd, active);
1459 
1460 			/* Get going again ASAP */
1461 			_start(thrd);
1462 
1463 			/* For now, just make a list of callbacks to be done */
1464 			list_add_tail(&rqdone->rqd, &pl330->req_done);
1465 		}
1466 	}
1467 
1468 	/* Now that we are in no hurry, do the callbacks */
1469 	while (!list_empty(&pl330->req_done)) {
1470 		struct pl330_req *r;
1471 
1472 		rqdone = container_of(pl330->req_done.next,
1473 					struct _pl330_req, rqd);
1474 
1475 		list_del_init(&rqdone->rqd);
1476 
1477 		/* Detach the req */
1478 		r = rqdone->r;
1479 		rqdone->r = NULL;
1480 
1481 		spin_unlock_irqrestore(&pl330->lock, flags);
1482 		_callback(r, PL330_ERR_NONE);
1483 		spin_lock_irqsave(&pl330->lock, flags);
1484 	}
1485 
1486 updt_exit:
1487 	spin_unlock_irqrestore(&pl330->lock, flags);
1488 
1489 	if (pl330->dmac_tbd.reset_dmac
1490 			|| pl330->dmac_tbd.reset_mngr
1491 			|| pl330->dmac_tbd.reset_chan) {
1492 		ret = 1;
1493 		tasklet_schedule(&pl330->tasks);
1494 	}
1495 
1496 	return ret;
1497 }
1498 EXPORT_SYMBOL(pl330_update);
1499 
pl330_chan_ctrl(void * ch_id,enum pl330_chan_op op)1500 int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1501 {
1502 	struct pl330_thread *thrd = ch_id;
1503 	struct pl330_dmac *pl330;
1504 	unsigned long flags;
1505 	int ret = 0, active;
1506 
1507 	if (!thrd || thrd->free || thrd->dmac->state == DYING)
1508 		return -EINVAL;
1509 
1510 	pl330 = thrd->dmac;
1511 	active = thrd->req_running;
1512 
1513 	spin_lock_irqsave(&pl330->lock, flags);
1514 
1515 	switch (op) {
1516 	case PL330_OP_FLUSH:
1517 		/* Make sure the channel is stopped */
1518 		_stop(thrd);
1519 
1520 		thrd->req[0].r = NULL;
1521 		thrd->req[1].r = NULL;
1522 		mark_free(thrd, 0);
1523 		mark_free(thrd, 1);
1524 		break;
1525 
1526 	case PL330_OP_ABORT:
1527 		/* Make sure the channel is stopped */
1528 		_stop(thrd);
1529 
1530 		/* ABORT is only for the active req */
1531 		if (active == -1)
1532 			break;
1533 
1534 		thrd->req[active].r = NULL;
1535 		mark_free(thrd, active);
1536 
1537 		/* Start the next */
1538 	case PL330_OP_START:
1539 		if ((active == -1) && !_start(thrd))
1540 			ret = -EIO;
1541 		break;
1542 
1543 	default:
1544 		ret = -EINVAL;
1545 	}
1546 
1547 	spin_unlock_irqrestore(&pl330->lock, flags);
1548 	return ret;
1549 }
1550 EXPORT_SYMBOL(pl330_chan_ctrl);
1551 
pl330_chan_status(void * ch_id,struct pl330_chanstatus * pstatus)1552 int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
1553 {
1554 	struct pl330_thread *thrd = ch_id;
1555 	struct pl330_dmac *pl330;
1556 	struct pl330_info *pi;
1557 	void __iomem *regs;
1558 	int active;
1559 	u32 val;
1560 
1561 	if (!pstatus || !thrd || thrd->free)
1562 		return -EINVAL;
1563 
1564 	pl330 = thrd->dmac;
1565 	pi = pl330->pinfo;
1566 	regs = pi->base;
1567 
1568 	/* The client should remove the DMAC and add again */
1569 	if (pl330->state == DYING)
1570 		pstatus->dmac_halted = true;
1571 	else
1572 		pstatus->dmac_halted = false;
1573 
1574 	val = readl(regs + FSC);
1575 	if (val & (1 << thrd->id))
1576 		pstatus->faulting = true;
1577 	else
1578 		pstatus->faulting = false;
1579 
1580 	active = thrd->req_running;
1581 
1582 	if (active == -1) {
1583 		/* Indicate that the thread is not running */
1584 		pstatus->top_req = NULL;
1585 		pstatus->wait_req = NULL;
1586 	} else {
1587 		pstatus->top_req = thrd->req[active].r;
1588 		pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
1589 					? thrd->req[1 - active].r : NULL;
1590 	}
1591 
1592 	pstatus->src_addr = readl(regs + SA(thrd->id));
1593 	pstatus->dst_addr = readl(regs + DA(thrd->id));
1594 
1595 	return 0;
1596 }
1597 EXPORT_SYMBOL(pl330_chan_status);
1598 
1599 /* Reserve an event */
_alloc_event(struct pl330_thread * thrd)1600 static inline int _alloc_event(struct pl330_thread *thrd)
1601 {
1602 	struct pl330_dmac *pl330 = thrd->dmac;
1603 	struct pl330_info *pi = pl330->pinfo;
1604 	int ev;
1605 
1606 	for (ev = 0; ev < pi->pcfg.num_events; ev++)
1607 		if (pl330->events[ev] == -1) {
1608 			pl330->events[ev] = thrd->id;
1609 			return ev;
1610 		}
1611 
1612 	return -1;
1613 }
1614 
_chan_ns(const struct pl330_info * pi,int i)1615 static bool _chan_ns(const struct pl330_info *pi, int i)
1616 {
1617 	return pi->pcfg.irq_ns & (1 << i);
1618 }
1619 
1620 /* Upon success, returns IdentityToken for the
1621  * allocated channel, NULL otherwise.
1622  */
pl330_request_channel(const struct pl330_info * pi)1623 void *pl330_request_channel(const struct pl330_info *pi)
1624 {
1625 	struct pl330_thread *thrd = NULL;
1626 	struct pl330_dmac *pl330;
1627 	unsigned long flags;
1628 	int chans, i;
1629 
1630 	if (!pi || !pi->pl330_data)
1631 		return NULL;
1632 
1633 	pl330 = pi->pl330_data;
1634 
1635 	if (pl330->state == DYING)
1636 		return NULL;
1637 
1638 	chans = pi->pcfg.num_chan;
1639 
1640 	spin_lock_irqsave(&pl330->lock, flags);
1641 
1642 	for (i = 0; i < chans; i++) {
1643 		thrd = &pl330->channels[i];
1644 		if ((thrd->free) && (!_manager_ns(thrd) ||
1645 					_chan_ns(pi, i))) {
1646 			thrd->ev = _alloc_event(thrd);
1647 			if (thrd->ev >= 0) {
1648 				thrd->free = false;
1649 				thrd->lstenq = 1;
1650 				thrd->req[0].r = NULL;
1651 				mark_free(thrd, 0);
1652 				thrd->req[1].r = NULL;
1653 				mark_free(thrd, 1);
1654 				break;
1655 			}
1656 		}
1657 		thrd = NULL;
1658 	}
1659 
1660 	spin_unlock_irqrestore(&pl330->lock, flags);
1661 
1662 	return thrd;
1663 }
1664 EXPORT_SYMBOL(pl330_request_channel);
1665 
1666 /* Release an event */
_free_event(struct pl330_thread * thrd,int ev)1667 static inline void _free_event(struct pl330_thread *thrd, int ev)
1668 {
1669 	struct pl330_dmac *pl330 = thrd->dmac;
1670 	struct pl330_info *pi = pl330->pinfo;
1671 
1672 	/* If the event is valid and was held by the thread */
1673 	if (ev >= 0 && ev < pi->pcfg.num_events
1674 			&& pl330->events[ev] == thrd->id)
1675 		pl330->events[ev] = -1;
1676 }
1677 
pl330_release_channel(void * ch_id)1678 void pl330_release_channel(void *ch_id)
1679 {
1680 	struct pl330_thread *thrd = ch_id;
1681 	struct pl330_dmac *pl330;
1682 	unsigned long flags;
1683 
1684 	if (!thrd || thrd->free)
1685 		return;
1686 
1687 	_stop(thrd);
1688 
1689 	_callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1690 	_callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1691 
1692 	pl330 = thrd->dmac;
1693 
1694 	spin_lock_irqsave(&pl330->lock, flags);
1695 	_free_event(thrd, thrd->ev);
1696 	thrd->free = true;
1697 	spin_unlock_irqrestore(&pl330->lock, flags);
1698 }
1699 EXPORT_SYMBOL(pl330_release_channel);
1700 
1701 /* Initialize the structure for PL330 configuration, that can be used
1702  * by the client driver the make best use of the DMAC
1703  */
read_dmac_config(struct pl330_info * pi)1704 static void read_dmac_config(struct pl330_info *pi)
1705 {
1706 	void __iomem *regs = pi->base;
1707 	u32 val;
1708 
1709 	val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1710 	val &= CRD_DATA_WIDTH_MASK;
1711 	pi->pcfg.data_bus_width = 8 * (1 << val);
1712 
1713 	val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1714 	val &= CRD_DATA_BUFF_MASK;
1715 	pi->pcfg.data_buf_dep = val + 1;
1716 
1717 	val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1718 	val &= CR0_NUM_CHANS_MASK;
1719 	val += 1;
1720 	pi->pcfg.num_chan = val;
1721 
1722 	val = readl(regs + CR0);
1723 	if (val & CR0_PERIPH_REQ_SET) {
1724 		val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1725 		val += 1;
1726 		pi->pcfg.num_peri = val;
1727 		pi->pcfg.peri_ns = readl(regs + CR4);
1728 	} else {
1729 		pi->pcfg.num_peri = 0;
1730 	}
1731 
1732 	val = readl(regs + CR0);
1733 	if (val & CR0_BOOT_MAN_NS)
1734 		pi->pcfg.mode |= DMAC_MODE_NS;
1735 	else
1736 		pi->pcfg.mode &= ~DMAC_MODE_NS;
1737 
1738 	val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1739 	val &= CR0_NUM_EVENTS_MASK;
1740 	val += 1;
1741 	pi->pcfg.num_events = val;
1742 
1743 	pi->pcfg.irq_ns = readl(regs + CR3);
1744 
1745 	pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1746 	pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1747 }
1748 
_reset_thread(struct pl330_thread * thrd)1749 static inline void _reset_thread(struct pl330_thread *thrd)
1750 {
1751 	struct pl330_dmac *pl330 = thrd->dmac;
1752 	struct pl330_info *pi = pl330->pinfo;
1753 
1754 	thrd->req[0].mc_cpu = pl330->mcode_cpu
1755 				+ (thrd->id * pi->mcbufsz);
1756 	thrd->req[0].mc_bus = pl330->mcode_bus
1757 				+ (thrd->id * pi->mcbufsz);
1758 	thrd->req[0].r = NULL;
1759 	mark_free(thrd, 0);
1760 
1761 	thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1762 				+ pi->mcbufsz / 2;
1763 	thrd->req[1].mc_bus = thrd->req[0].mc_bus
1764 				+ pi->mcbufsz / 2;
1765 	thrd->req[1].r = NULL;
1766 	mark_free(thrd, 1);
1767 }
1768 
dmac_alloc_threads(struct pl330_dmac * pl330)1769 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1770 {
1771 	struct pl330_info *pi = pl330->pinfo;
1772 	int chans = pi->pcfg.num_chan;
1773 	struct pl330_thread *thrd;
1774 	int i;
1775 
1776 	/* Allocate 1 Manager and 'chans' Channel threads */
1777 	pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1778 					GFP_KERNEL);
1779 	if (!pl330->channels)
1780 		return -ENOMEM;
1781 
1782 	/* Init Channel threads */
1783 	for (i = 0; i < chans; i++) {
1784 		thrd = &pl330->channels[i];
1785 		thrd->id = i;
1786 		thrd->dmac = pl330;
1787 		_reset_thread(thrd);
1788 		thrd->free = true;
1789 	}
1790 
1791 	/* MANAGER is indexed at the end */
1792 	thrd = &pl330->channels[chans];
1793 	thrd->id = chans;
1794 	thrd->dmac = pl330;
1795 	thrd->free = false;
1796 	pl330->manager = thrd;
1797 
1798 	return 0;
1799 }
1800 
dmac_alloc_resources(struct pl330_dmac * pl330)1801 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1802 {
1803 	struct pl330_info *pi = pl330->pinfo;
1804 	int chans = pi->pcfg.num_chan;
1805 	int ret;
1806 
1807 	/*
1808 	 * Alloc MicroCode buffer for 'chans' Channel threads.
1809 	 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1810 	 */
1811 	pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
1812 				chans * pi->mcbufsz,
1813 				&pl330->mcode_bus, GFP_KERNEL);
1814 	if (!pl330->mcode_cpu) {
1815 		dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1816 			__func__, __LINE__);
1817 		return -ENOMEM;
1818 	}
1819 
1820 	ret = dmac_alloc_threads(pl330);
1821 	if (ret) {
1822 		dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
1823 			__func__, __LINE__);
1824 		dma_free_coherent(pi->dev,
1825 				chans * pi->mcbufsz,
1826 				pl330->mcode_cpu, pl330->mcode_bus);
1827 		return ret;
1828 	}
1829 
1830 	return 0;
1831 }
1832 
pl330_add(struct pl330_info * pi)1833 int pl330_add(struct pl330_info *pi)
1834 {
1835 	struct pl330_dmac *pl330;
1836 	void __iomem *regs;
1837 	int i, ret;
1838 
1839 	if (!pi || !pi->dev)
1840 		return -EINVAL;
1841 
1842 	/* If already added */
1843 	if (pi->pl330_data)
1844 		return -EINVAL;
1845 
1846 	/*
1847 	 * If the SoC can perform reset on the DMAC, then do it
1848 	 * before reading its configuration.
1849 	 */
1850 	if (pi->dmac_reset)
1851 		pi->dmac_reset(pi);
1852 
1853 	regs = pi->base;
1854 
1855 	/* Check if we can handle this DMAC */
1856 	if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
1857 	   || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1858 		dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1859 			get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
1860 		return -EINVAL;
1861 	}
1862 
1863 	/* Read the configuration of the DMAC */
1864 	read_dmac_config(pi);
1865 
1866 	if (pi->pcfg.num_events == 0) {
1867 		dev_err(pi->dev, "%s:%d Can't work without events!\n",
1868 			__func__, __LINE__);
1869 		return -EINVAL;
1870 	}
1871 
1872 	pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
1873 	if (!pl330) {
1874 		dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1875 			__func__, __LINE__);
1876 		return -ENOMEM;
1877 	}
1878 
1879 	/* Assign the info structure and private data */
1880 	pl330->pinfo = pi;
1881 	pi->pl330_data = pl330;
1882 
1883 	spin_lock_init(&pl330->lock);
1884 
1885 	INIT_LIST_HEAD(&pl330->req_done);
1886 
1887 	/* Use default MC buffer size if not provided */
1888 	if (!pi->mcbufsz)
1889 		pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1890 
1891 	/* Mark all events as free */
1892 	for (i = 0; i < pi->pcfg.num_events; i++)
1893 		pl330->events[i] = -1;
1894 
1895 	/* Allocate resources needed by the DMAC */
1896 	ret = dmac_alloc_resources(pl330);
1897 	if (ret) {
1898 		dev_err(pi->dev, "Unable to create channels for DMAC\n");
1899 		kfree(pl330);
1900 		return ret;
1901 	}
1902 
1903 	tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1904 
1905 	pl330->state = INIT;
1906 
1907 	return 0;
1908 }
1909 EXPORT_SYMBOL(pl330_add);
1910 
dmac_free_threads(struct pl330_dmac * pl330)1911 static int dmac_free_threads(struct pl330_dmac *pl330)
1912 {
1913 	struct pl330_info *pi = pl330->pinfo;
1914 	int chans = pi->pcfg.num_chan;
1915 	struct pl330_thread *thrd;
1916 	int i;
1917 
1918 	/* Release Channel threads */
1919 	for (i = 0; i < chans; i++) {
1920 		thrd = &pl330->channels[i];
1921 		pl330_release_channel((void *)thrd);
1922 	}
1923 
1924 	/* Free memory */
1925 	kfree(pl330->channels);
1926 
1927 	return 0;
1928 }
1929 
dmac_free_resources(struct pl330_dmac * pl330)1930 static void dmac_free_resources(struct pl330_dmac *pl330)
1931 {
1932 	struct pl330_info *pi = pl330->pinfo;
1933 	int chans = pi->pcfg.num_chan;
1934 
1935 	dmac_free_threads(pl330);
1936 
1937 	dma_free_coherent(pi->dev, chans * pi->mcbufsz,
1938 				pl330->mcode_cpu, pl330->mcode_bus);
1939 }
1940 
pl330_del(struct pl330_info * pi)1941 void pl330_del(struct pl330_info *pi)
1942 {
1943 	struct pl330_dmac *pl330;
1944 
1945 	if (!pi || !pi->pl330_data)
1946 		return;
1947 
1948 	pl330 = pi->pl330_data;
1949 
1950 	pl330->state = UNINIT;
1951 
1952 	tasklet_kill(&pl330->tasks);
1953 
1954 	/* Free DMAC resources */
1955 	dmac_free_resources(pl330);
1956 
1957 	kfree(pl330);
1958 	pi->pl330_data = NULL;
1959 }
1960 EXPORT_SYMBOL(pl330_del);
1961