1 /*
2  * R8A66597 UDC (USB gadget)
3  *
4  * Copyright (C) 2006-2009 Renesas Solutions Corp.
5  *
6  * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; version 2 of the License.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 
23 #include <linux/usb/ch9.h>
24 #include <linux/usb/gadget.h>
25 
26 #include "r8a66597-udc.h"
27 
28 #define DRIVER_VERSION	"2011-09-26"
29 
30 static const char udc_name[] = "r8a66597_udc";
31 static const char *r8a66597_ep_name[] = {
32 	"ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
33 	"ep8", "ep9",
34 };
35 
36 static void init_controller(struct r8a66597 *r8a66597);
37 static void disable_controller(struct r8a66597 *r8a66597);
38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
39 static void irq_packet_write(struct r8a66597_ep *ep,
40 				struct r8a66597_request *req);
41 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
42 			gfp_t gfp_flags);
43 
44 static void transfer_complete(struct r8a66597_ep *ep,
45 		struct r8a66597_request *req, int status);
46 
47 /*-------------------------------------------------------------------------*/
get_usb_speed(struct r8a66597 * r8a66597)48 static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
49 {
50 	return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
51 }
52 
enable_pipe_irq(struct r8a66597 * r8a66597,u16 pipenum,unsigned long reg)53 static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
54 		unsigned long reg)
55 {
56 	u16 tmp;
57 
58 	tmp = r8a66597_read(r8a66597, INTENB0);
59 	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
60 			INTENB0);
61 	r8a66597_bset(r8a66597, (1 << pipenum), reg);
62 	r8a66597_write(r8a66597, tmp, INTENB0);
63 }
64 
disable_pipe_irq(struct r8a66597 * r8a66597,u16 pipenum,unsigned long reg)65 static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
66 		unsigned long reg)
67 {
68 	u16 tmp;
69 
70 	tmp = r8a66597_read(r8a66597, INTENB0);
71 	r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
72 			INTENB0);
73 	r8a66597_bclr(r8a66597, (1 << pipenum), reg);
74 	r8a66597_write(r8a66597, tmp, INTENB0);
75 }
76 
r8a66597_usb_connect(struct r8a66597 * r8a66597)77 static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
78 {
79 	r8a66597_bset(r8a66597, CTRE, INTENB0);
80 	r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
81 
82 	r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
83 }
84 
r8a66597_usb_disconnect(struct r8a66597 * r8a66597)85 static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
86 __releases(r8a66597->lock)
87 __acquires(r8a66597->lock)
88 {
89 	r8a66597_bclr(r8a66597, CTRE, INTENB0);
90 	r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
91 	r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
92 
93 	r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
94 	spin_unlock(&r8a66597->lock);
95 	r8a66597->driver->disconnect(&r8a66597->gadget);
96 	spin_lock(&r8a66597->lock);
97 
98 	disable_controller(r8a66597);
99 	init_controller(r8a66597);
100 	r8a66597_bset(r8a66597, VBSE, INTENB0);
101 	INIT_LIST_HEAD(&r8a66597->ep[0].queue);
102 }
103 
control_reg_get_pid(struct r8a66597 * r8a66597,u16 pipenum)104 static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
105 {
106 	u16 pid = 0;
107 	unsigned long offset;
108 
109 	if (pipenum == 0) {
110 		pid = r8a66597_read(r8a66597, DCPCTR) & PID;
111 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
112 		offset = get_pipectr_addr(pipenum);
113 		pid = r8a66597_read(r8a66597, offset) & PID;
114 	} else {
115 		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
116 			pipenum);
117 	}
118 
119 	return pid;
120 }
121 
control_reg_set_pid(struct r8a66597 * r8a66597,u16 pipenum,u16 pid)122 static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
123 		u16 pid)
124 {
125 	unsigned long offset;
126 
127 	if (pipenum == 0) {
128 		r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
129 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
130 		offset = get_pipectr_addr(pipenum);
131 		r8a66597_mdfy(r8a66597, pid, PID, offset);
132 	} else {
133 		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
134 			pipenum);
135 	}
136 }
137 
pipe_start(struct r8a66597 * r8a66597,u16 pipenum)138 static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
139 {
140 	control_reg_set_pid(r8a66597, pipenum, PID_BUF);
141 }
142 
pipe_stop(struct r8a66597 * r8a66597,u16 pipenum)143 static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
144 {
145 	control_reg_set_pid(r8a66597, pipenum, PID_NAK);
146 }
147 
pipe_stall(struct r8a66597 * r8a66597,u16 pipenum)148 static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
149 {
150 	control_reg_set_pid(r8a66597, pipenum, PID_STALL);
151 }
152 
control_reg_get(struct r8a66597 * r8a66597,u16 pipenum)153 static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
154 {
155 	u16 ret = 0;
156 	unsigned long offset;
157 
158 	if (pipenum == 0) {
159 		ret = r8a66597_read(r8a66597, DCPCTR);
160 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
161 		offset = get_pipectr_addr(pipenum);
162 		ret = r8a66597_read(r8a66597, offset);
163 	} else {
164 		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
165 			pipenum);
166 	}
167 
168 	return ret;
169 }
170 
control_reg_sqclr(struct r8a66597 * r8a66597,u16 pipenum)171 static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
172 {
173 	unsigned long offset;
174 
175 	pipe_stop(r8a66597, pipenum);
176 
177 	if (pipenum == 0) {
178 		r8a66597_bset(r8a66597, SQCLR, DCPCTR);
179 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
180 		offset = get_pipectr_addr(pipenum);
181 		r8a66597_bset(r8a66597, SQCLR, offset);
182 	} else {
183 		dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
184 			pipenum);
185 	}
186 }
187 
control_reg_sqset(struct r8a66597 * r8a66597,u16 pipenum)188 static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
189 {
190 	unsigned long offset;
191 
192 	pipe_stop(r8a66597, pipenum);
193 
194 	if (pipenum == 0) {
195 		r8a66597_bset(r8a66597, SQSET, DCPCTR);
196 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
197 		offset = get_pipectr_addr(pipenum);
198 		r8a66597_bset(r8a66597, SQSET, offset);
199 	} else {
200 		dev_err(r8a66597_to_dev(r8a66597),
201 			"unexpect pipe num(%d)\n", pipenum);
202 	}
203 }
204 
control_reg_sqmon(struct r8a66597 * r8a66597,u16 pipenum)205 static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
206 {
207 	unsigned long offset;
208 
209 	if (pipenum == 0) {
210 		return r8a66597_read(r8a66597, DCPCTR) & SQMON;
211 	} else if (pipenum < R8A66597_MAX_NUM_PIPE) {
212 		offset = get_pipectr_addr(pipenum);
213 		return r8a66597_read(r8a66597, offset) & SQMON;
214 	} else {
215 		dev_err(r8a66597_to_dev(r8a66597),
216 			"unexpect pipe num(%d)\n", pipenum);
217 	}
218 
219 	return 0;
220 }
221 
save_usb_toggle(struct r8a66597 * r8a66597,u16 pipenum)222 static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
223 {
224 	return control_reg_sqmon(r8a66597, pipenum);
225 }
226 
restore_usb_toggle(struct r8a66597 * r8a66597,u16 pipenum,u16 toggle)227 static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
228 			       u16 toggle)
229 {
230 	if (toggle)
231 		control_reg_sqset(r8a66597, pipenum);
232 	else
233 		control_reg_sqclr(r8a66597, pipenum);
234 }
235 
get_buffer_size(struct r8a66597 * r8a66597,u16 pipenum)236 static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
237 {
238 	u16 tmp;
239 	int size;
240 
241 	if (pipenum == 0) {
242 		tmp = r8a66597_read(r8a66597, DCPCFG);
243 		if ((tmp & R8A66597_CNTMD) != 0)
244 			size = 256;
245 		else {
246 			tmp = r8a66597_read(r8a66597, DCPMAXP);
247 			size = tmp & MAXP;
248 		}
249 	} else {
250 		r8a66597_write(r8a66597, pipenum, PIPESEL);
251 		tmp = r8a66597_read(r8a66597, PIPECFG);
252 		if ((tmp & R8A66597_CNTMD) != 0) {
253 			tmp = r8a66597_read(r8a66597, PIPEBUF);
254 			size = ((tmp >> 10) + 1) * 64;
255 		} else {
256 			tmp = r8a66597_read(r8a66597, PIPEMAXP);
257 			size = tmp & MXPS;
258 		}
259 	}
260 
261 	return size;
262 }
263 
mbw_value(struct r8a66597 * r8a66597)264 static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
265 {
266 	if (r8a66597->pdata->on_chip)
267 		return MBW_32;
268 	else
269 		return MBW_16;
270 }
271 
r8a66597_change_curpipe(struct r8a66597 * r8a66597,u16 pipenum,u16 isel,u16 fifosel)272 static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
273 				    u16 isel, u16 fifosel)
274 {
275 	u16 tmp, mask, loop;
276 	int i = 0;
277 
278 	if (!pipenum) {
279 		mask = ISEL | CURPIPE;
280 		loop = isel;
281 	} else {
282 		mask = CURPIPE;
283 		loop = pipenum;
284 	}
285 	r8a66597_mdfy(r8a66597, loop, mask, fifosel);
286 
287 	do {
288 		tmp = r8a66597_read(r8a66597, fifosel);
289 		if (i++ > 1000000) {
290 			dev_err(r8a66597_to_dev(r8a66597),
291 				"r8a66597: register%x, loop %x "
292 				"is timeout\n", fifosel, loop);
293 			break;
294 		}
295 		ndelay(1);
296 	} while ((tmp & mask) != loop);
297 }
298 
pipe_change(struct r8a66597 * r8a66597,u16 pipenum)299 static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
300 {
301 	struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
302 
303 	if (ep->use_dma)
304 		r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
305 
306 	r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
307 
308 	ndelay(450);
309 
310 	if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
311 		r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
312 	else
313 		r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
314 
315 	if (ep->use_dma)
316 		r8a66597_bset(r8a66597, DREQE, ep->fifosel);
317 }
318 
pipe_buffer_setting(struct r8a66597 * r8a66597,struct r8a66597_pipe_info * info)319 static int pipe_buffer_setting(struct r8a66597 *r8a66597,
320 		struct r8a66597_pipe_info *info)
321 {
322 	u16 bufnum = 0, buf_bsize = 0;
323 	u16 pipecfg = 0;
324 
325 	if (info->pipe == 0)
326 		return -EINVAL;
327 
328 	r8a66597_write(r8a66597, info->pipe, PIPESEL);
329 
330 	if (info->dir_in)
331 		pipecfg |= R8A66597_DIR;
332 	pipecfg |= info->type;
333 	pipecfg |= info->epnum;
334 	switch (info->type) {
335 	case R8A66597_INT:
336 		bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
337 		buf_bsize = 0;
338 		break;
339 	case R8A66597_BULK:
340 		/* isochronous pipes may be used as bulk pipes */
341 		if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
342 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
343 		else
344 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
345 
346 		bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
347 		buf_bsize = 7;
348 		pipecfg |= R8A66597_DBLB;
349 		if (!info->dir_in)
350 			pipecfg |= R8A66597_SHTNAK;
351 		break;
352 	case R8A66597_ISO:
353 		bufnum = R8A66597_BASE_BUFNUM +
354 			 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
355 		buf_bsize = 7;
356 		break;
357 	}
358 
359 	if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
360 		pr_err("r8a66597 pipe memory is insufficient\n");
361 		return -ENOMEM;
362 	}
363 
364 	r8a66597_write(r8a66597, pipecfg, PIPECFG);
365 	r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
366 	r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
367 	if (info->interval)
368 		info->interval--;
369 	r8a66597_write(r8a66597, info->interval, PIPEPERI);
370 
371 	return 0;
372 }
373 
pipe_buffer_release(struct r8a66597 * r8a66597,struct r8a66597_pipe_info * info)374 static void pipe_buffer_release(struct r8a66597 *r8a66597,
375 				struct r8a66597_pipe_info *info)
376 {
377 	if (info->pipe == 0)
378 		return;
379 
380 	if (is_bulk_pipe(info->pipe)) {
381 		r8a66597->bulk--;
382 	} else if (is_interrupt_pipe(info->pipe)) {
383 		r8a66597->interrupt--;
384 	} else if (is_isoc_pipe(info->pipe)) {
385 		r8a66597->isochronous--;
386 		if (info->type == R8A66597_BULK)
387 			r8a66597->bulk--;
388 	} else {
389 		dev_err(r8a66597_to_dev(r8a66597),
390 			"ep_release: unexpect pipenum (%d)\n", info->pipe);
391 	}
392 }
393 
pipe_initialize(struct r8a66597_ep * ep)394 static void pipe_initialize(struct r8a66597_ep *ep)
395 {
396 	struct r8a66597 *r8a66597 = ep->r8a66597;
397 
398 	r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
399 
400 	r8a66597_write(r8a66597, ACLRM, ep->pipectr);
401 	r8a66597_write(r8a66597, 0, ep->pipectr);
402 	r8a66597_write(r8a66597, SQCLR, ep->pipectr);
403 	if (ep->use_dma) {
404 		r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
405 
406 		ndelay(450);
407 
408 		r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
409 	}
410 }
411 
r8a66597_ep_setting(struct r8a66597 * r8a66597,struct r8a66597_ep * ep,const struct usb_endpoint_descriptor * desc,u16 pipenum,int dma)412 static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
413 				struct r8a66597_ep *ep,
414 				const struct usb_endpoint_descriptor *desc,
415 				u16 pipenum, int dma)
416 {
417 	ep->use_dma = 0;
418 	ep->fifoaddr = CFIFO;
419 	ep->fifosel = CFIFOSEL;
420 	ep->fifoctr = CFIFOCTR;
421 
422 	ep->pipectr = get_pipectr_addr(pipenum);
423 	if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
424 		ep->pipetre = get_pipetre_addr(pipenum);
425 		ep->pipetrn = get_pipetrn_addr(pipenum);
426 	} else {
427 		ep->pipetre = 0;
428 		ep->pipetrn = 0;
429 	}
430 	ep->pipenum = pipenum;
431 	ep->ep.maxpacket = usb_endpoint_maxp(desc);
432 	r8a66597->pipenum2ep[pipenum] = ep;
433 	r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
434 		= ep;
435 	INIT_LIST_HEAD(&ep->queue);
436 }
437 
r8a66597_ep_release(struct r8a66597_ep * ep)438 static void r8a66597_ep_release(struct r8a66597_ep *ep)
439 {
440 	struct r8a66597 *r8a66597 = ep->r8a66597;
441 	u16 pipenum = ep->pipenum;
442 
443 	if (pipenum == 0)
444 		return;
445 
446 	if (ep->use_dma)
447 		r8a66597->num_dma--;
448 	ep->pipenum = 0;
449 	ep->busy = 0;
450 	ep->use_dma = 0;
451 }
452 
alloc_pipe_config(struct r8a66597_ep * ep,const struct usb_endpoint_descriptor * desc)453 static int alloc_pipe_config(struct r8a66597_ep *ep,
454 		const struct usb_endpoint_descriptor *desc)
455 {
456 	struct r8a66597 *r8a66597 = ep->r8a66597;
457 	struct r8a66597_pipe_info info;
458 	int dma = 0;
459 	unsigned char *counter;
460 	int ret;
461 
462 	ep->desc = desc;
463 
464 	if (ep->pipenum)	/* already allocated pipe  */
465 		return 0;
466 
467 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
468 	case USB_ENDPOINT_XFER_BULK:
469 		if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
470 			if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
471 				dev_err(r8a66597_to_dev(r8a66597),
472 					"bulk pipe is insufficient\n");
473 				return -ENODEV;
474 			} else {
475 				info.pipe = R8A66597_BASE_PIPENUM_ISOC
476 						+ r8a66597->isochronous;
477 				counter = &r8a66597->isochronous;
478 			}
479 		} else {
480 			info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
481 			counter = &r8a66597->bulk;
482 		}
483 		info.type = R8A66597_BULK;
484 		dma = 1;
485 		break;
486 	case USB_ENDPOINT_XFER_INT:
487 		if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
488 			dev_err(r8a66597_to_dev(r8a66597),
489 				"interrupt pipe is insufficient\n");
490 			return -ENODEV;
491 		}
492 		info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
493 		info.type = R8A66597_INT;
494 		counter = &r8a66597->interrupt;
495 		break;
496 	case USB_ENDPOINT_XFER_ISOC:
497 		if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
498 			dev_err(r8a66597_to_dev(r8a66597),
499 				"isochronous pipe is insufficient\n");
500 			return -ENODEV;
501 		}
502 		info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
503 		info.type = R8A66597_ISO;
504 		counter = &r8a66597->isochronous;
505 		break;
506 	default:
507 		dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
508 		return -EINVAL;
509 	}
510 	ep->type = info.type;
511 
512 	info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
513 	info.maxpacket = usb_endpoint_maxp(desc);
514 	info.interval = desc->bInterval;
515 	if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
516 		info.dir_in = 1;
517 	else
518 		info.dir_in = 0;
519 
520 	ret = pipe_buffer_setting(r8a66597, &info);
521 	if (ret < 0) {
522 		dev_err(r8a66597_to_dev(r8a66597),
523 			"pipe_buffer_setting fail\n");
524 		return ret;
525 	}
526 
527 	(*counter)++;
528 	if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
529 		r8a66597->bulk++;
530 
531 	r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
532 	pipe_initialize(ep);
533 
534 	return 0;
535 }
536 
free_pipe_config(struct r8a66597_ep * ep)537 static int free_pipe_config(struct r8a66597_ep *ep)
538 {
539 	struct r8a66597 *r8a66597 = ep->r8a66597;
540 	struct r8a66597_pipe_info info;
541 
542 	info.pipe = ep->pipenum;
543 	info.type = ep->type;
544 	pipe_buffer_release(r8a66597, &info);
545 	r8a66597_ep_release(ep);
546 
547 	return 0;
548 }
549 
550 /*-------------------------------------------------------------------------*/
pipe_irq_enable(struct r8a66597 * r8a66597,u16 pipenum)551 static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
552 {
553 	enable_irq_ready(r8a66597, pipenum);
554 	enable_irq_nrdy(r8a66597, pipenum);
555 }
556 
pipe_irq_disable(struct r8a66597 * r8a66597,u16 pipenum)557 static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
558 {
559 	disable_irq_ready(r8a66597, pipenum);
560 	disable_irq_nrdy(r8a66597, pipenum);
561 }
562 
563 /* if complete is true, gadget driver complete function is not call */
control_end(struct r8a66597 * r8a66597,unsigned ccpl)564 static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
565 {
566 	r8a66597->ep[0].internal_ccpl = ccpl;
567 	pipe_start(r8a66597, 0);
568 	r8a66597_bset(r8a66597, CCPL, DCPCTR);
569 }
570 
start_ep0_write(struct r8a66597_ep * ep,struct r8a66597_request * req)571 static void start_ep0_write(struct r8a66597_ep *ep,
572 				struct r8a66597_request *req)
573 {
574 	struct r8a66597 *r8a66597 = ep->r8a66597;
575 
576 	pipe_change(r8a66597, ep->pipenum);
577 	r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
578 	r8a66597_write(r8a66597, BCLR, ep->fifoctr);
579 	if (req->req.length == 0) {
580 		r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
581 		pipe_start(r8a66597, 0);
582 		transfer_complete(ep, req, 0);
583 	} else {
584 		r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
585 		irq_ep0_write(ep, req);
586 	}
587 }
588 
disable_fifosel(struct r8a66597 * r8a66597,u16 pipenum,u16 fifosel)589 static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
590 			    u16 fifosel)
591 {
592 	u16 tmp;
593 
594 	tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
595 	if (tmp == pipenum)
596 		r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
597 }
598 
change_bfre_mode(struct r8a66597 * r8a66597,u16 pipenum,int enable)599 static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
600 			     int enable)
601 {
602 	struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
603 	u16 tmp, toggle;
604 
605 	/* check current BFRE bit */
606 	r8a66597_write(r8a66597, pipenum, PIPESEL);
607 	tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
608 	if ((enable && tmp) || (!enable && !tmp))
609 		return;
610 
611 	/* change BFRE bit */
612 	pipe_stop(r8a66597, pipenum);
613 	disable_fifosel(r8a66597, pipenum, CFIFOSEL);
614 	disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
615 	disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
616 
617 	toggle = save_usb_toggle(r8a66597, pipenum);
618 
619 	r8a66597_write(r8a66597, pipenum, PIPESEL);
620 	if (enable)
621 		r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
622 	else
623 		r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
624 
625 	/* initialize for internal BFRE flag */
626 	r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
627 	r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
628 
629 	restore_usb_toggle(r8a66597, pipenum, toggle);
630 }
631 
sudmac_alloc_channel(struct r8a66597 * r8a66597,struct r8a66597_ep * ep,struct r8a66597_request * req)632 static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
633 				struct r8a66597_ep *ep,
634 				struct r8a66597_request *req)
635 {
636 	struct r8a66597_dma *dma;
637 
638 	if (!r8a66597_is_sudmac(r8a66597))
639 		return -ENODEV;
640 
641 	/* Check transfer type */
642 	if (!is_bulk_pipe(ep->pipenum))
643 		return -EIO;
644 
645 	if (r8a66597->dma.used)
646 		return -EBUSY;
647 
648 	/* set SUDMAC parameters */
649 	dma = &r8a66597->dma;
650 	dma->used = 1;
651 	if (ep->desc->bEndpointAddress & USB_DIR_IN) {
652 		dma->dir = 1;
653 	} else {
654 		dma->dir = 0;
655 		change_bfre_mode(r8a66597, ep->pipenum, 1);
656 	}
657 
658 	/* set r8a66597_ep paramters */
659 	ep->use_dma = 1;
660 	ep->dma = dma;
661 	ep->fifoaddr = D0FIFO;
662 	ep->fifosel = D0FIFOSEL;
663 	ep->fifoctr = D0FIFOCTR;
664 
665 	/* dma mapping */
666 	req->req.dma = dma_map_single(r8a66597_to_dev(ep->r8a66597),
667 				req->req.buf, req->req.length,
668 				dma->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
669 
670 	return 0;
671 }
672 
sudmac_free_channel(struct r8a66597 * r8a66597,struct r8a66597_ep * ep,struct r8a66597_request * req)673 static void sudmac_free_channel(struct r8a66597 *r8a66597,
674 				struct r8a66597_ep *ep,
675 				struct r8a66597_request *req)
676 {
677 	if (!r8a66597_is_sudmac(r8a66597))
678 		return;
679 
680 	dma_unmap_single(r8a66597_to_dev(ep->r8a66597),
681 			 req->req.dma, req->req.length,
682 			 ep->dma->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
683 
684 	r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
685 	r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
686 
687 	ep->dma->used = 0;
688 	ep->use_dma = 0;
689 	ep->fifoaddr = CFIFO;
690 	ep->fifosel = CFIFOSEL;
691 	ep->fifoctr = CFIFOCTR;
692 }
693 
sudmac_start(struct r8a66597 * r8a66597,struct r8a66597_ep * ep,struct r8a66597_request * req)694 static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
695 			 struct r8a66597_request *req)
696 {
697 	BUG_ON(req->req.length == 0);
698 
699 	r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
700 	r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
701 	r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
702 	r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
703 
704 	r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
705 }
706 
start_packet_write(struct r8a66597_ep * ep,struct r8a66597_request * req)707 static void start_packet_write(struct r8a66597_ep *ep,
708 				struct r8a66597_request *req)
709 {
710 	struct r8a66597 *r8a66597 = ep->r8a66597;
711 	u16 tmp;
712 
713 	pipe_change(r8a66597, ep->pipenum);
714 	disable_irq_empty(r8a66597, ep->pipenum);
715 	pipe_start(r8a66597, ep->pipenum);
716 
717 	if (req->req.length == 0) {
718 		transfer_complete(ep, req, 0);
719 	} else {
720 		r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
721 		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
722 			/* PIO mode */
723 			pipe_change(r8a66597, ep->pipenum);
724 			disable_irq_empty(r8a66597, ep->pipenum);
725 			pipe_start(r8a66597, ep->pipenum);
726 			tmp = r8a66597_read(r8a66597, ep->fifoctr);
727 			if (unlikely((tmp & FRDY) == 0))
728 				pipe_irq_enable(r8a66597, ep->pipenum);
729 			else
730 				irq_packet_write(ep, req);
731 		} else {
732 			/* DMA mode */
733 			pipe_change(r8a66597, ep->pipenum);
734 			disable_irq_nrdy(r8a66597, ep->pipenum);
735 			pipe_start(r8a66597, ep->pipenum);
736 			enable_irq_nrdy(r8a66597, ep->pipenum);
737 			sudmac_start(r8a66597, ep, req);
738 		}
739 	}
740 }
741 
start_packet_read(struct r8a66597_ep * ep,struct r8a66597_request * req)742 static void start_packet_read(struct r8a66597_ep *ep,
743 				struct r8a66597_request *req)
744 {
745 	struct r8a66597 *r8a66597 = ep->r8a66597;
746 	u16 pipenum = ep->pipenum;
747 
748 	if (ep->pipenum == 0) {
749 		r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
750 		r8a66597_write(r8a66597, BCLR, ep->fifoctr);
751 		pipe_start(r8a66597, pipenum);
752 		pipe_irq_enable(r8a66597, pipenum);
753 	} else {
754 		pipe_stop(r8a66597, pipenum);
755 		if (ep->pipetre) {
756 			enable_irq_nrdy(r8a66597, pipenum);
757 			r8a66597_write(r8a66597, TRCLR, ep->pipetre);
758 			r8a66597_write(r8a66597,
759 				DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
760 				ep->pipetrn);
761 			r8a66597_bset(r8a66597, TRENB, ep->pipetre);
762 		}
763 
764 		if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
765 			/* PIO mode */
766 			change_bfre_mode(r8a66597, ep->pipenum, 0);
767 			pipe_start(r8a66597, pipenum);	/* trigger once */
768 			pipe_irq_enable(r8a66597, pipenum);
769 		} else {
770 			pipe_change(r8a66597, pipenum);
771 			sudmac_start(r8a66597, ep, req);
772 			pipe_start(r8a66597, pipenum);	/* trigger once */
773 		}
774 	}
775 }
776 
start_packet(struct r8a66597_ep * ep,struct r8a66597_request * req)777 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
778 {
779 	if (ep->desc->bEndpointAddress & USB_DIR_IN)
780 		start_packet_write(ep, req);
781 	else
782 		start_packet_read(ep, req);
783 }
784 
start_ep0(struct r8a66597_ep * ep,struct r8a66597_request * req)785 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
786 {
787 	u16 ctsq;
788 
789 	ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
790 
791 	switch (ctsq) {
792 	case CS_RDDS:
793 		start_ep0_write(ep, req);
794 		break;
795 	case CS_WRDS:
796 		start_packet_read(ep, req);
797 		break;
798 
799 	case CS_WRND:
800 		control_end(ep->r8a66597, 0);
801 		break;
802 	default:
803 		dev_err(r8a66597_to_dev(ep->r8a66597),
804 			"start_ep0: unexpect ctsq(%x)\n", ctsq);
805 		break;
806 	}
807 }
808 
init_controller(struct r8a66597 * r8a66597)809 static void init_controller(struct r8a66597 *r8a66597)
810 {
811 	u16 vif = r8a66597->pdata->vif ? LDRV : 0;
812 	u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
813 	u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
814 
815 	if (r8a66597->pdata->on_chip) {
816 		if (r8a66597->pdata->buswait)
817 			r8a66597_write(r8a66597, r8a66597->pdata->buswait,
818 					SYSCFG1);
819 		else
820 			r8a66597_write(r8a66597, 0x0f, SYSCFG1);
821 		r8a66597_bset(r8a66597, HSE, SYSCFG0);
822 
823 		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
824 		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
825 		r8a66597_bset(r8a66597, USBE, SYSCFG0);
826 
827 		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
828 
829 		r8a66597_bset(r8a66597, irq_sense, INTENB1);
830 		r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
831 				DMA0CFG);
832 	} else {
833 		r8a66597_bset(r8a66597, vif | endian, PINCFG);
834 		r8a66597_bset(r8a66597, HSE, SYSCFG0);		/* High spd */
835 		r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
836 				XTAL, SYSCFG0);
837 
838 		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
839 		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
840 		r8a66597_bset(r8a66597, USBE, SYSCFG0);
841 
842 		r8a66597_bset(r8a66597, XCKE, SYSCFG0);
843 
844 		msleep(3);
845 
846 		r8a66597_bset(r8a66597, PLLC, SYSCFG0);
847 
848 		msleep(1);
849 
850 		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
851 
852 		r8a66597_bset(r8a66597, irq_sense, INTENB1);
853 		r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
854 			       DMA0CFG);
855 	}
856 }
857 
disable_controller(struct r8a66597 * r8a66597)858 static void disable_controller(struct r8a66597 *r8a66597)
859 {
860 	if (r8a66597->pdata->on_chip) {
861 		r8a66597_bset(r8a66597, SCKE, SYSCFG0);
862 		r8a66597_bclr(r8a66597, UTST, TESTMODE);
863 
864 		/* disable interrupts */
865 		r8a66597_write(r8a66597, 0, INTENB0);
866 		r8a66597_write(r8a66597, 0, INTENB1);
867 		r8a66597_write(r8a66597, 0, BRDYENB);
868 		r8a66597_write(r8a66597, 0, BEMPENB);
869 		r8a66597_write(r8a66597, 0, NRDYENB);
870 
871 		/* clear status */
872 		r8a66597_write(r8a66597, 0, BRDYSTS);
873 		r8a66597_write(r8a66597, 0, NRDYSTS);
874 		r8a66597_write(r8a66597, 0, BEMPSTS);
875 
876 		r8a66597_bclr(r8a66597, USBE, SYSCFG0);
877 		r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
878 
879 	} else {
880 		r8a66597_bclr(r8a66597, UTST, TESTMODE);
881 		r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
882 		udelay(1);
883 		r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
884 		udelay(1);
885 		udelay(1);
886 		r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
887 	}
888 }
889 
r8a66597_start_xclock(struct r8a66597 * r8a66597)890 static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
891 {
892 	u16 tmp;
893 
894 	if (!r8a66597->pdata->on_chip) {
895 		tmp = r8a66597_read(r8a66597, SYSCFG0);
896 		if (!(tmp & XCKE))
897 			r8a66597_bset(r8a66597, XCKE, SYSCFG0);
898 	}
899 }
900 
get_request_from_ep(struct r8a66597_ep * ep)901 static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
902 {
903 	return list_entry(ep->queue.next, struct r8a66597_request, queue);
904 }
905 
906 /*-------------------------------------------------------------------------*/
transfer_complete(struct r8a66597_ep * ep,struct r8a66597_request * req,int status)907 static void transfer_complete(struct r8a66597_ep *ep,
908 		struct r8a66597_request *req, int status)
909 __releases(r8a66597->lock)
910 __acquires(r8a66597->lock)
911 {
912 	int restart = 0;
913 
914 	if (unlikely(ep->pipenum == 0)) {
915 		if (ep->internal_ccpl) {
916 			ep->internal_ccpl = 0;
917 			return;
918 		}
919 	}
920 
921 	list_del_init(&req->queue);
922 	if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
923 		req->req.status = -ESHUTDOWN;
924 	else
925 		req->req.status = status;
926 
927 	if (!list_empty(&ep->queue))
928 		restart = 1;
929 
930 	if (ep->use_dma)
931 		sudmac_free_channel(ep->r8a66597, ep, req);
932 
933 	spin_unlock(&ep->r8a66597->lock);
934 	req->req.complete(&ep->ep, &req->req);
935 	spin_lock(&ep->r8a66597->lock);
936 
937 	if (restart) {
938 		req = get_request_from_ep(ep);
939 		if (ep->desc)
940 			start_packet(ep, req);
941 	}
942 }
943 
irq_ep0_write(struct r8a66597_ep * ep,struct r8a66597_request * req)944 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
945 {
946 	int i;
947 	u16 tmp;
948 	unsigned bufsize;
949 	size_t size;
950 	void *buf;
951 	u16 pipenum = ep->pipenum;
952 	struct r8a66597 *r8a66597 = ep->r8a66597;
953 
954 	pipe_change(r8a66597, pipenum);
955 	r8a66597_bset(r8a66597, ISEL, ep->fifosel);
956 
957 	i = 0;
958 	do {
959 		tmp = r8a66597_read(r8a66597, ep->fifoctr);
960 		if (i++ > 100000) {
961 			dev_err(r8a66597_to_dev(r8a66597),
962 				"pipe0 is busy. maybe cpu i/o bus "
963 				"conflict. please power off this controller.");
964 			return;
965 		}
966 		ndelay(1);
967 	} while ((tmp & FRDY) == 0);
968 
969 	/* prepare parameters */
970 	bufsize = get_buffer_size(r8a66597, pipenum);
971 	buf = req->req.buf + req->req.actual;
972 	size = min(bufsize, req->req.length - req->req.actual);
973 
974 	/* write fifo */
975 	if (req->req.buf) {
976 		if (size > 0)
977 			r8a66597_write_fifo(r8a66597, ep, buf, size);
978 		if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
979 			r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
980 	}
981 
982 	/* update parameters */
983 	req->req.actual += size;
984 
985 	/* check transfer finish */
986 	if ((!req->req.zero && (req->req.actual == req->req.length))
987 			|| (size % ep->ep.maxpacket)
988 			|| (size == 0)) {
989 		disable_irq_ready(r8a66597, pipenum);
990 		disable_irq_empty(r8a66597, pipenum);
991 	} else {
992 		disable_irq_ready(r8a66597, pipenum);
993 		enable_irq_empty(r8a66597, pipenum);
994 	}
995 	pipe_start(r8a66597, pipenum);
996 }
997 
irq_packet_write(struct r8a66597_ep * ep,struct r8a66597_request * req)998 static void irq_packet_write(struct r8a66597_ep *ep,
999 				struct r8a66597_request *req)
1000 {
1001 	u16 tmp;
1002 	unsigned bufsize;
1003 	size_t size;
1004 	void *buf;
1005 	u16 pipenum = ep->pipenum;
1006 	struct r8a66597 *r8a66597 = ep->r8a66597;
1007 
1008 	pipe_change(r8a66597, pipenum);
1009 	tmp = r8a66597_read(r8a66597, ep->fifoctr);
1010 	if (unlikely((tmp & FRDY) == 0)) {
1011 		pipe_stop(r8a66597, pipenum);
1012 		pipe_irq_disable(r8a66597, pipenum);
1013 		dev_err(r8a66597_to_dev(r8a66597),
1014 			"write fifo not ready. pipnum=%d\n", pipenum);
1015 		return;
1016 	}
1017 
1018 	/* prepare parameters */
1019 	bufsize = get_buffer_size(r8a66597, pipenum);
1020 	buf = req->req.buf + req->req.actual;
1021 	size = min(bufsize, req->req.length - req->req.actual);
1022 
1023 	/* write fifo */
1024 	if (req->req.buf) {
1025 		r8a66597_write_fifo(r8a66597, ep, buf, size);
1026 		if ((size == 0)
1027 				|| ((size % ep->ep.maxpacket) != 0)
1028 				|| ((bufsize != ep->ep.maxpacket)
1029 					&& (bufsize > size)))
1030 			r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
1031 	}
1032 
1033 	/* update parameters */
1034 	req->req.actual += size;
1035 	/* check transfer finish */
1036 	if ((!req->req.zero && (req->req.actual == req->req.length))
1037 			|| (size % ep->ep.maxpacket)
1038 			|| (size == 0)) {
1039 		disable_irq_ready(r8a66597, pipenum);
1040 		enable_irq_empty(r8a66597, pipenum);
1041 	} else {
1042 		disable_irq_empty(r8a66597, pipenum);
1043 		pipe_irq_enable(r8a66597, pipenum);
1044 	}
1045 }
1046 
irq_packet_read(struct r8a66597_ep * ep,struct r8a66597_request * req)1047 static void irq_packet_read(struct r8a66597_ep *ep,
1048 				struct r8a66597_request *req)
1049 {
1050 	u16 tmp;
1051 	int rcv_len, bufsize, req_len;
1052 	int size;
1053 	void *buf;
1054 	u16 pipenum = ep->pipenum;
1055 	struct r8a66597 *r8a66597 = ep->r8a66597;
1056 	int finish = 0;
1057 
1058 	pipe_change(r8a66597, pipenum);
1059 	tmp = r8a66597_read(r8a66597, ep->fifoctr);
1060 	if (unlikely((tmp & FRDY) == 0)) {
1061 		req->req.status = -EPIPE;
1062 		pipe_stop(r8a66597, pipenum);
1063 		pipe_irq_disable(r8a66597, pipenum);
1064 		dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
1065 		return;
1066 	}
1067 
1068 	/* prepare parameters */
1069 	rcv_len = tmp & DTLN;
1070 	bufsize = get_buffer_size(r8a66597, pipenum);
1071 
1072 	buf = req->req.buf + req->req.actual;
1073 	req_len = req->req.length - req->req.actual;
1074 	if (rcv_len < bufsize)
1075 		size = min(rcv_len, req_len);
1076 	else
1077 		size = min(bufsize, req_len);
1078 
1079 	/* update parameters */
1080 	req->req.actual += size;
1081 
1082 	/* check transfer finish */
1083 	if ((!req->req.zero && (req->req.actual == req->req.length))
1084 			|| (size % ep->ep.maxpacket)
1085 			|| (size == 0)) {
1086 		pipe_stop(r8a66597, pipenum);
1087 		pipe_irq_disable(r8a66597, pipenum);
1088 		finish = 1;
1089 	}
1090 
1091 	/* read fifo */
1092 	if (req->req.buf) {
1093 		if (size == 0)
1094 			r8a66597_write(r8a66597, BCLR, ep->fifoctr);
1095 		else
1096 			r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
1097 
1098 	}
1099 
1100 	if ((ep->pipenum != 0) && finish)
1101 		transfer_complete(ep, req, 0);
1102 }
1103 
irq_pipe_ready(struct r8a66597 * r8a66597,u16 status,u16 enb)1104 static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
1105 {
1106 	u16 check;
1107 	u16 pipenum;
1108 	struct r8a66597_ep *ep;
1109 	struct r8a66597_request *req;
1110 
1111 	if ((status & BRDY0) && (enb & BRDY0)) {
1112 		r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
1113 		r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
1114 
1115 		ep = &r8a66597->ep[0];
1116 		req = get_request_from_ep(ep);
1117 		irq_packet_read(ep, req);
1118 	} else {
1119 		for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1120 			check = 1 << pipenum;
1121 			if ((status & check) && (enb & check)) {
1122 				r8a66597_write(r8a66597, ~check, BRDYSTS);
1123 				ep = r8a66597->pipenum2ep[pipenum];
1124 				req = get_request_from_ep(ep);
1125 				if (ep->desc->bEndpointAddress & USB_DIR_IN)
1126 					irq_packet_write(ep, req);
1127 				else
1128 					irq_packet_read(ep, req);
1129 			}
1130 		}
1131 	}
1132 }
1133 
irq_pipe_empty(struct r8a66597 * r8a66597,u16 status,u16 enb)1134 static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
1135 {
1136 	u16 tmp;
1137 	u16 check;
1138 	u16 pipenum;
1139 	struct r8a66597_ep *ep;
1140 	struct r8a66597_request *req;
1141 
1142 	if ((status & BEMP0) && (enb & BEMP0)) {
1143 		r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
1144 
1145 		ep = &r8a66597->ep[0];
1146 		req = get_request_from_ep(ep);
1147 		irq_ep0_write(ep, req);
1148 	} else {
1149 		for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
1150 			check = 1 << pipenum;
1151 			if ((status & check) && (enb & check)) {
1152 				r8a66597_write(r8a66597, ~check, BEMPSTS);
1153 				tmp = control_reg_get(r8a66597, pipenum);
1154 				if ((tmp & INBUFM) == 0) {
1155 					disable_irq_empty(r8a66597, pipenum);
1156 					pipe_irq_disable(r8a66597, pipenum);
1157 					pipe_stop(r8a66597, pipenum);
1158 					ep = r8a66597->pipenum2ep[pipenum];
1159 					req = get_request_from_ep(ep);
1160 					if (!list_empty(&ep->queue))
1161 						transfer_complete(ep, req, 0);
1162 				}
1163 			}
1164 		}
1165 	}
1166 }
1167 
get_status(struct r8a66597 * r8a66597,struct usb_ctrlrequest * ctrl)1168 static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1169 __releases(r8a66597->lock)
1170 __acquires(r8a66597->lock)
1171 {
1172 	struct r8a66597_ep *ep;
1173 	u16 pid;
1174 	u16 status = 0;
1175 	u16 w_index = le16_to_cpu(ctrl->wIndex);
1176 
1177 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1178 	case USB_RECIP_DEVICE:
1179 		status = 1 << USB_DEVICE_SELF_POWERED;
1180 		break;
1181 	case USB_RECIP_INTERFACE:
1182 		status = 0;
1183 		break;
1184 	case USB_RECIP_ENDPOINT:
1185 		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1186 		pid = control_reg_get_pid(r8a66597, ep->pipenum);
1187 		if (pid == PID_STALL)
1188 			status = 1 << USB_ENDPOINT_HALT;
1189 		else
1190 			status = 0;
1191 		break;
1192 	default:
1193 		pipe_stall(r8a66597, 0);
1194 		return;		/* exit */
1195 	}
1196 
1197 	r8a66597->ep0_data = cpu_to_le16(status);
1198 	r8a66597->ep0_req->buf = &r8a66597->ep0_data;
1199 	r8a66597->ep0_req->length = 2;
1200 	/* AV: what happens if we get called again before that gets through? */
1201 	spin_unlock(&r8a66597->lock);
1202 	r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
1203 	spin_lock(&r8a66597->lock);
1204 }
1205 
clear_feature(struct r8a66597 * r8a66597,struct usb_ctrlrequest * ctrl)1206 static void clear_feature(struct r8a66597 *r8a66597,
1207 				struct usb_ctrlrequest *ctrl)
1208 {
1209 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1210 	case USB_RECIP_DEVICE:
1211 		control_end(r8a66597, 1);
1212 		break;
1213 	case USB_RECIP_INTERFACE:
1214 		control_end(r8a66597, 1);
1215 		break;
1216 	case USB_RECIP_ENDPOINT: {
1217 		struct r8a66597_ep *ep;
1218 		struct r8a66597_request *req;
1219 		u16 w_index = le16_to_cpu(ctrl->wIndex);
1220 
1221 		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1222 		if (!ep->wedge) {
1223 			pipe_stop(r8a66597, ep->pipenum);
1224 			control_reg_sqclr(r8a66597, ep->pipenum);
1225 			spin_unlock(&r8a66597->lock);
1226 			usb_ep_clear_halt(&ep->ep);
1227 			spin_lock(&r8a66597->lock);
1228 		}
1229 
1230 		control_end(r8a66597, 1);
1231 
1232 		req = get_request_from_ep(ep);
1233 		if (ep->busy) {
1234 			ep->busy = 0;
1235 			if (list_empty(&ep->queue))
1236 				break;
1237 			start_packet(ep, req);
1238 		} else if (!list_empty(&ep->queue))
1239 			pipe_start(r8a66597, ep->pipenum);
1240 		}
1241 		break;
1242 	default:
1243 		pipe_stall(r8a66597, 0);
1244 		break;
1245 	}
1246 }
1247 
set_feature(struct r8a66597 * r8a66597,struct usb_ctrlrequest * ctrl)1248 static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1249 {
1250 	u16 tmp;
1251 	int timeout = 3000;
1252 
1253 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1254 	case USB_RECIP_DEVICE:
1255 		switch (le16_to_cpu(ctrl->wValue)) {
1256 		case USB_DEVICE_TEST_MODE:
1257 			control_end(r8a66597, 1);
1258 			/* Wait for the completion of status stage */
1259 			do {
1260 				tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1261 				udelay(1);
1262 			} while (tmp != CS_IDST || timeout-- > 0);
1263 
1264 			if (tmp == CS_IDST)
1265 				r8a66597_bset(r8a66597,
1266 					      le16_to_cpu(ctrl->wIndex >> 8),
1267 					      TESTMODE);
1268 			break;
1269 		default:
1270 			pipe_stall(r8a66597, 0);
1271 			break;
1272 		}
1273 		break;
1274 	case USB_RECIP_INTERFACE:
1275 		control_end(r8a66597, 1);
1276 		break;
1277 	case USB_RECIP_ENDPOINT: {
1278 		struct r8a66597_ep *ep;
1279 		u16 w_index = le16_to_cpu(ctrl->wIndex);
1280 
1281 		ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1282 		pipe_stall(r8a66597, ep->pipenum);
1283 
1284 		control_end(r8a66597, 1);
1285 		}
1286 		break;
1287 	default:
1288 		pipe_stall(r8a66597, 0);
1289 		break;
1290 	}
1291 }
1292 
1293 /* if return value is true, call class driver's setup() */
setup_packet(struct r8a66597 * r8a66597,struct usb_ctrlrequest * ctrl)1294 static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1295 {
1296 	u16 *p = (u16 *)ctrl;
1297 	unsigned long offset = USBREQ;
1298 	int i, ret = 0;
1299 
1300 	/* read fifo */
1301 	r8a66597_write(r8a66597, ~VALID, INTSTS0);
1302 
1303 	for (i = 0; i < 4; i++)
1304 		p[i] = r8a66597_read(r8a66597, offset + i*2);
1305 
1306 	/* check request */
1307 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1308 		switch (ctrl->bRequest) {
1309 		case USB_REQ_GET_STATUS:
1310 			get_status(r8a66597, ctrl);
1311 			break;
1312 		case USB_REQ_CLEAR_FEATURE:
1313 			clear_feature(r8a66597, ctrl);
1314 			break;
1315 		case USB_REQ_SET_FEATURE:
1316 			set_feature(r8a66597, ctrl);
1317 			break;
1318 		default:
1319 			ret = 1;
1320 			break;
1321 		}
1322 	} else
1323 		ret = 1;
1324 	return ret;
1325 }
1326 
r8a66597_update_usb_speed(struct r8a66597 * r8a66597)1327 static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1328 {
1329 	u16 speed = get_usb_speed(r8a66597);
1330 
1331 	switch (speed) {
1332 	case HSMODE:
1333 		r8a66597->gadget.speed = USB_SPEED_HIGH;
1334 		break;
1335 	case FSMODE:
1336 		r8a66597->gadget.speed = USB_SPEED_FULL;
1337 		break;
1338 	default:
1339 		r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1340 		dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
1341 	}
1342 }
1343 
irq_device_state(struct r8a66597 * r8a66597)1344 static void irq_device_state(struct r8a66597 *r8a66597)
1345 {
1346 	u16 dvsq;
1347 
1348 	dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1349 	r8a66597_write(r8a66597, ~DVST, INTSTS0);
1350 
1351 	if (dvsq == DS_DFLT) {
1352 		/* bus reset */
1353 		spin_unlock(&r8a66597->lock);
1354 		r8a66597->driver->disconnect(&r8a66597->gadget);
1355 		spin_lock(&r8a66597->lock);
1356 		r8a66597_update_usb_speed(r8a66597);
1357 	}
1358 	if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1359 		r8a66597_update_usb_speed(r8a66597);
1360 	if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1361 			&& r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1362 		r8a66597_update_usb_speed(r8a66597);
1363 
1364 	r8a66597->old_dvsq = dvsq;
1365 }
1366 
irq_control_stage(struct r8a66597 * r8a66597)1367 static void irq_control_stage(struct r8a66597 *r8a66597)
1368 __releases(r8a66597->lock)
1369 __acquires(r8a66597->lock)
1370 {
1371 	struct usb_ctrlrequest ctrl;
1372 	u16 ctsq;
1373 
1374 	ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1375 	r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1376 
1377 	switch (ctsq) {
1378 	case CS_IDST: {
1379 		struct r8a66597_ep *ep;
1380 		struct r8a66597_request *req;
1381 		ep = &r8a66597->ep[0];
1382 		req = get_request_from_ep(ep);
1383 		transfer_complete(ep, req, 0);
1384 		}
1385 		break;
1386 
1387 	case CS_RDDS:
1388 	case CS_WRDS:
1389 	case CS_WRND:
1390 		if (setup_packet(r8a66597, &ctrl)) {
1391 			spin_unlock(&r8a66597->lock);
1392 			if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1393 				< 0)
1394 				pipe_stall(r8a66597, 0);
1395 			spin_lock(&r8a66597->lock);
1396 		}
1397 		break;
1398 	case CS_RDSS:
1399 	case CS_WRSS:
1400 		control_end(r8a66597, 0);
1401 		break;
1402 	default:
1403 		dev_err(r8a66597_to_dev(r8a66597),
1404 			"ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1405 		break;
1406 	}
1407 }
1408 
sudmac_finish(struct r8a66597 * r8a66597,struct r8a66597_ep * ep)1409 static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
1410 {
1411 	u16 pipenum;
1412 	struct r8a66597_request *req;
1413 	u32 len;
1414 	int i = 0;
1415 
1416 	pipenum = ep->pipenum;
1417 	pipe_change(r8a66597, pipenum);
1418 
1419 	while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
1420 		udelay(1);
1421 		if (unlikely(i++ >= 10000)) {	/* timeout = 10 msec */
1422 			dev_err(r8a66597_to_dev(r8a66597),
1423 				"%s: FRDY was not set (%d)\n",
1424 				__func__, pipenum);
1425 			return;
1426 		}
1427 	}
1428 
1429 	r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
1430 	req = get_request_from_ep(ep);
1431 
1432 	/* prepare parameters */
1433 	len = r8a66597_sudmac_read(r8a66597, CH0CBC);
1434 	req->req.actual += len;
1435 
1436 	/* clear */
1437 	r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
1438 
1439 	/* check transfer finish */
1440 	if ((!req->req.zero && (req->req.actual == req->req.length))
1441 			|| (len % ep->ep.maxpacket)) {
1442 		if (ep->dma->dir) {
1443 			disable_irq_ready(r8a66597, pipenum);
1444 			enable_irq_empty(r8a66597, pipenum);
1445 		} else {
1446 			/* Clear the interrupt flag for next transfer */
1447 			r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
1448 			transfer_complete(ep, req, 0);
1449 		}
1450 	}
1451 }
1452 
r8a66597_sudmac_irq(struct r8a66597 * r8a66597)1453 static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
1454 {
1455 	u32 irqsts;
1456 	struct r8a66597_ep *ep;
1457 	u16 pipenum;
1458 
1459 	irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
1460 	if (irqsts & CH0ENDS) {
1461 		r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
1462 		pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
1463 		ep = r8a66597->pipenum2ep[pipenum];
1464 		sudmac_finish(r8a66597, ep);
1465 	}
1466 }
1467 
r8a66597_irq(int irq,void * _r8a66597)1468 static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1469 {
1470 	struct r8a66597 *r8a66597 = _r8a66597;
1471 	u16 intsts0;
1472 	u16 intenb0;
1473 	u16 brdysts, nrdysts, bempsts;
1474 	u16 brdyenb, nrdyenb, bempenb;
1475 	u16 savepipe;
1476 	u16 mask0;
1477 
1478 	if (r8a66597_is_sudmac(r8a66597))
1479 		r8a66597_sudmac_irq(r8a66597);
1480 
1481 	spin_lock(&r8a66597->lock);
1482 
1483 	intsts0 = r8a66597_read(r8a66597, INTSTS0);
1484 	intenb0 = r8a66597_read(r8a66597, INTENB0);
1485 
1486 	savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1487 
1488 	mask0 = intsts0 & intenb0;
1489 	if (mask0) {
1490 		brdysts = r8a66597_read(r8a66597, BRDYSTS);
1491 		nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1492 		bempsts = r8a66597_read(r8a66597, BEMPSTS);
1493 		brdyenb = r8a66597_read(r8a66597, BRDYENB);
1494 		nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1495 		bempenb = r8a66597_read(r8a66597, BEMPENB);
1496 
1497 		if (mask0 & VBINT) {
1498 			r8a66597_write(r8a66597,  0xffff & ~VBINT,
1499 					INTSTS0);
1500 			r8a66597_start_xclock(r8a66597);
1501 
1502 			/* start vbus sampling */
1503 			r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1504 					& VBSTS;
1505 			r8a66597->scount = R8A66597_MAX_SAMPLING;
1506 
1507 			mod_timer(&r8a66597->timer,
1508 					jiffies + msecs_to_jiffies(50));
1509 		}
1510 		if (intsts0 & DVSQ)
1511 			irq_device_state(r8a66597);
1512 
1513 		if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1514 				&& (brdysts & brdyenb))
1515 			irq_pipe_ready(r8a66597, brdysts, brdyenb);
1516 		if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1517 				&& (bempsts & bempenb))
1518 			irq_pipe_empty(r8a66597, bempsts, bempenb);
1519 
1520 		if (intsts0 & CTRT)
1521 			irq_control_stage(r8a66597);
1522 	}
1523 
1524 	r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1525 
1526 	spin_unlock(&r8a66597->lock);
1527 	return IRQ_HANDLED;
1528 }
1529 
r8a66597_timer(unsigned long _r8a66597)1530 static void r8a66597_timer(unsigned long _r8a66597)
1531 {
1532 	struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1533 	unsigned long flags;
1534 	u16 tmp;
1535 
1536 	spin_lock_irqsave(&r8a66597->lock, flags);
1537 	tmp = r8a66597_read(r8a66597, SYSCFG0);
1538 	if (r8a66597->scount > 0) {
1539 		tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1540 		if (tmp == r8a66597->old_vbus) {
1541 			r8a66597->scount--;
1542 			if (r8a66597->scount == 0) {
1543 				if (tmp == VBSTS)
1544 					r8a66597_usb_connect(r8a66597);
1545 				else
1546 					r8a66597_usb_disconnect(r8a66597);
1547 			} else {
1548 				mod_timer(&r8a66597->timer,
1549 					jiffies + msecs_to_jiffies(50));
1550 			}
1551 		} else {
1552 			r8a66597->scount = R8A66597_MAX_SAMPLING;
1553 			r8a66597->old_vbus = tmp;
1554 			mod_timer(&r8a66597->timer,
1555 					jiffies + msecs_to_jiffies(50));
1556 		}
1557 	}
1558 	spin_unlock_irqrestore(&r8a66597->lock, flags);
1559 }
1560 
1561 /*-------------------------------------------------------------------------*/
r8a66597_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1562 static int r8a66597_enable(struct usb_ep *_ep,
1563 			 const struct usb_endpoint_descriptor *desc)
1564 {
1565 	struct r8a66597_ep *ep;
1566 
1567 	ep = container_of(_ep, struct r8a66597_ep, ep);
1568 	return alloc_pipe_config(ep, desc);
1569 }
1570 
r8a66597_disable(struct usb_ep * _ep)1571 static int r8a66597_disable(struct usb_ep *_ep)
1572 {
1573 	struct r8a66597_ep *ep;
1574 	struct r8a66597_request *req;
1575 	unsigned long flags;
1576 
1577 	ep = container_of(_ep, struct r8a66597_ep, ep);
1578 	BUG_ON(!ep);
1579 
1580 	while (!list_empty(&ep->queue)) {
1581 		req = get_request_from_ep(ep);
1582 		spin_lock_irqsave(&ep->r8a66597->lock, flags);
1583 		transfer_complete(ep, req, -ECONNRESET);
1584 		spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1585 	}
1586 
1587 	pipe_irq_disable(ep->r8a66597, ep->pipenum);
1588 	return free_pipe_config(ep);
1589 }
1590 
r8a66597_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)1591 static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1592 						gfp_t gfp_flags)
1593 {
1594 	struct r8a66597_request *req;
1595 
1596 	req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1597 	if (!req)
1598 		return NULL;
1599 
1600 	INIT_LIST_HEAD(&req->queue);
1601 
1602 	return &req->req;
1603 }
1604 
r8a66597_free_request(struct usb_ep * _ep,struct usb_request * _req)1605 static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1606 {
1607 	struct r8a66597_request *req;
1608 
1609 	req = container_of(_req, struct r8a66597_request, req);
1610 	kfree(req);
1611 }
1612 
r8a66597_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1613 static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1614 			gfp_t gfp_flags)
1615 {
1616 	struct r8a66597_ep *ep;
1617 	struct r8a66597_request *req;
1618 	unsigned long flags;
1619 	int request = 0;
1620 
1621 	ep = container_of(_ep, struct r8a66597_ep, ep);
1622 	req = container_of(_req, struct r8a66597_request, req);
1623 
1624 	if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1625 		return -ESHUTDOWN;
1626 
1627 	spin_lock_irqsave(&ep->r8a66597->lock, flags);
1628 
1629 	if (list_empty(&ep->queue))
1630 		request = 1;
1631 
1632 	list_add_tail(&req->queue, &ep->queue);
1633 	req->req.actual = 0;
1634 	req->req.status = -EINPROGRESS;
1635 
1636 	if (ep->desc == NULL)	/* control */
1637 		start_ep0(ep, req);
1638 	else {
1639 		if (request && !ep->busy)
1640 			start_packet(ep, req);
1641 	}
1642 
1643 	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1644 
1645 	return 0;
1646 }
1647 
r8a66597_dequeue(struct usb_ep * _ep,struct usb_request * _req)1648 static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1649 {
1650 	struct r8a66597_ep *ep;
1651 	struct r8a66597_request *req;
1652 	unsigned long flags;
1653 
1654 	ep = container_of(_ep, struct r8a66597_ep, ep);
1655 	req = container_of(_req, struct r8a66597_request, req);
1656 
1657 	spin_lock_irqsave(&ep->r8a66597->lock, flags);
1658 	if (!list_empty(&ep->queue))
1659 		transfer_complete(ep, req, -ECONNRESET);
1660 	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1661 
1662 	return 0;
1663 }
1664 
r8a66597_set_halt(struct usb_ep * _ep,int value)1665 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1666 {
1667 	struct r8a66597_ep *ep;
1668 	struct r8a66597_request *req;
1669 	unsigned long flags;
1670 	int ret = 0;
1671 
1672 	ep = container_of(_ep, struct r8a66597_ep, ep);
1673 	req = get_request_from_ep(ep);
1674 
1675 	spin_lock_irqsave(&ep->r8a66597->lock, flags);
1676 	if (!list_empty(&ep->queue)) {
1677 		ret = -EAGAIN;
1678 		goto out;
1679 	}
1680 	if (value) {
1681 		ep->busy = 1;
1682 		pipe_stall(ep->r8a66597, ep->pipenum);
1683 	} else {
1684 		ep->busy = 0;
1685 		ep->wedge = 0;
1686 		pipe_stop(ep->r8a66597, ep->pipenum);
1687 	}
1688 
1689 out:
1690 	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1691 	return ret;
1692 }
1693 
r8a66597_set_wedge(struct usb_ep * _ep)1694 static int r8a66597_set_wedge(struct usb_ep *_ep)
1695 {
1696 	struct r8a66597_ep *ep;
1697 	unsigned long flags;
1698 
1699 	ep = container_of(_ep, struct r8a66597_ep, ep);
1700 
1701 	if (!ep || !ep->desc)
1702 		return -EINVAL;
1703 
1704 	spin_lock_irqsave(&ep->r8a66597->lock, flags);
1705 	ep->wedge = 1;
1706 	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1707 
1708 	return usb_ep_set_halt(_ep);
1709 }
1710 
r8a66597_fifo_flush(struct usb_ep * _ep)1711 static void r8a66597_fifo_flush(struct usb_ep *_ep)
1712 {
1713 	struct r8a66597_ep *ep;
1714 	unsigned long flags;
1715 
1716 	ep = container_of(_ep, struct r8a66597_ep, ep);
1717 	spin_lock_irqsave(&ep->r8a66597->lock, flags);
1718 	if (list_empty(&ep->queue) && !ep->busy) {
1719 		pipe_stop(ep->r8a66597, ep->pipenum);
1720 		r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1721 		r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
1722 		r8a66597_write(ep->r8a66597, 0, ep->pipectr);
1723 	}
1724 	spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1725 }
1726 
1727 static struct usb_ep_ops r8a66597_ep_ops = {
1728 	.enable		= r8a66597_enable,
1729 	.disable	= r8a66597_disable,
1730 
1731 	.alloc_request	= r8a66597_alloc_request,
1732 	.free_request	= r8a66597_free_request,
1733 
1734 	.queue		= r8a66597_queue,
1735 	.dequeue	= r8a66597_dequeue,
1736 
1737 	.set_halt	= r8a66597_set_halt,
1738 	.set_wedge	= r8a66597_set_wedge,
1739 	.fifo_flush	= r8a66597_fifo_flush,
1740 };
1741 
1742 /*-------------------------------------------------------------------------*/
r8a66597_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1743 static int r8a66597_start(struct usb_gadget *gadget,
1744 		struct usb_gadget_driver *driver)
1745 {
1746 	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1747 
1748 	if (!driver
1749 			|| driver->max_speed < USB_SPEED_HIGH
1750 			|| !driver->setup)
1751 		return -EINVAL;
1752 	if (!r8a66597)
1753 		return -ENODEV;
1754 
1755 	/* hook up the driver */
1756 	r8a66597->driver = driver;
1757 
1758 	init_controller(r8a66597);
1759 	r8a66597_bset(r8a66597, VBSE, INTENB0);
1760 	if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1761 		r8a66597_start_xclock(r8a66597);
1762 		/* start vbus sampling */
1763 		r8a66597->old_vbus = r8a66597_read(r8a66597,
1764 					 INTSTS0) & VBSTS;
1765 		r8a66597->scount = R8A66597_MAX_SAMPLING;
1766 		mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1767 	}
1768 
1769 	return 0;
1770 }
1771 
r8a66597_stop(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1772 static int r8a66597_stop(struct usb_gadget *gadget,
1773 		struct usb_gadget_driver *driver)
1774 {
1775 	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1776 	unsigned long flags;
1777 
1778 	spin_lock_irqsave(&r8a66597->lock, flags);
1779 	r8a66597_bclr(r8a66597, VBSE, INTENB0);
1780 	disable_controller(r8a66597);
1781 	spin_unlock_irqrestore(&r8a66597->lock, flags);
1782 
1783 	r8a66597->driver = NULL;
1784 	return 0;
1785 }
1786 
1787 /*-------------------------------------------------------------------------*/
r8a66597_get_frame(struct usb_gadget * _gadget)1788 static int r8a66597_get_frame(struct usb_gadget *_gadget)
1789 {
1790 	struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1791 	return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1792 }
1793 
r8a66597_pullup(struct usb_gadget * gadget,int is_on)1794 static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1795 {
1796 	struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1797 	unsigned long flags;
1798 
1799 	spin_lock_irqsave(&r8a66597->lock, flags);
1800 	if (is_on)
1801 		r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1802 	else
1803 		r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1804 	spin_unlock_irqrestore(&r8a66597->lock, flags);
1805 
1806 	return 0;
1807 }
1808 
1809 static struct usb_gadget_ops r8a66597_gadget_ops = {
1810 	.get_frame		= r8a66597_get_frame,
1811 	.udc_start		= r8a66597_start,
1812 	.udc_stop		= r8a66597_stop,
1813 	.pullup			= r8a66597_pullup,
1814 };
1815 
r8a66597_remove(struct platform_device * pdev)1816 static int __exit r8a66597_remove(struct platform_device *pdev)
1817 {
1818 	struct r8a66597		*r8a66597 = dev_get_drvdata(&pdev->dev);
1819 
1820 	usb_del_gadget_udc(&r8a66597->gadget);
1821 	del_timer_sync(&r8a66597->timer);
1822 	iounmap(r8a66597->reg);
1823 	if (r8a66597->pdata->sudmac)
1824 		iounmap(r8a66597->sudmac_reg);
1825 	free_irq(platform_get_irq(pdev, 0), r8a66597);
1826 	r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1827 #ifdef CONFIG_HAVE_CLK
1828 	if (r8a66597->pdata->on_chip) {
1829 		clk_disable(r8a66597->clk);
1830 		clk_put(r8a66597->clk);
1831 	}
1832 #endif
1833 	device_unregister(&r8a66597->gadget.dev);
1834 	kfree(r8a66597);
1835 	return 0;
1836 }
1837 
nop_completion(struct usb_ep * ep,struct usb_request * r)1838 static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1839 {
1840 }
1841 
r8a66597_sudmac_ioremap(struct r8a66597 * r8a66597,struct platform_device * pdev)1842 static int __init r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
1843 					  struct platform_device *pdev)
1844 {
1845 	struct resource *res;
1846 
1847 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
1848 	if (!res) {
1849 		dev_err(&pdev->dev, "platform_get_resource error(sudmac).\n");
1850 		return -ENODEV;
1851 	}
1852 
1853 	r8a66597->sudmac_reg = ioremap(res->start, resource_size(res));
1854 	if (r8a66597->sudmac_reg == NULL) {
1855 		dev_err(&pdev->dev, "ioremap error(sudmac).\n");
1856 		return -ENOMEM;
1857 	}
1858 
1859 	return 0;
1860 }
1861 
r8a66597_probe(struct platform_device * pdev)1862 static int __init r8a66597_probe(struct platform_device *pdev)
1863 {
1864 #ifdef CONFIG_HAVE_CLK
1865 	char clk_name[8];
1866 #endif
1867 	struct resource *res, *ires;
1868 	int irq;
1869 	void __iomem *reg = NULL;
1870 	struct r8a66597 *r8a66597 = NULL;
1871 	int ret = 0;
1872 	int i;
1873 	unsigned long irq_trigger;
1874 
1875 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1876 	if (!res) {
1877 		ret = -ENODEV;
1878 		dev_err(&pdev->dev, "platform_get_resource error.\n");
1879 		goto clean_up;
1880 	}
1881 
1882 	ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1883 	irq = ires->start;
1884 	irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1885 
1886 	if (irq < 0) {
1887 		ret = -ENODEV;
1888 		dev_err(&pdev->dev, "platform_get_irq error.\n");
1889 		goto clean_up;
1890 	}
1891 
1892 	reg = ioremap(res->start, resource_size(res));
1893 	if (reg == NULL) {
1894 		ret = -ENOMEM;
1895 		dev_err(&pdev->dev, "ioremap error.\n");
1896 		goto clean_up;
1897 	}
1898 
1899 	/* initialize ucd */
1900 	r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1901 	if (r8a66597 == NULL) {
1902 		ret = -ENOMEM;
1903 		dev_err(&pdev->dev, "kzalloc error\n");
1904 		goto clean_up;
1905 	}
1906 
1907 	spin_lock_init(&r8a66597->lock);
1908 	dev_set_drvdata(&pdev->dev, r8a66597);
1909 	r8a66597->pdata = pdev->dev.platform_data;
1910 	r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1911 
1912 	r8a66597->gadget.ops = &r8a66597_gadget_ops;
1913 	dev_set_name(&r8a66597->gadget.dev, "gadget");
1914 	r8a66597->gadget.max_speed = USB_SPEED_HIGH;
1915 	r8a66597->gadget.dev.parent = &pdev->dev;
1916 	r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1917 	r8a66597->gadget.dev.release = pdev->dev.release;
1918 	r8a66597->gadget.name = udc_name;
1919 	ret = device_register(&r8a66597->gadget.dev);
1920 	if (ret < 0) {
1921 		dev_err(&pdev->dev, "device_register failed\n");
1922 		goto clean_up;
1923 	}
1924 
1925 	init_timer(&r8a66597->timer);
1926 	r8a66597->timer.function = r8a66597_timer;
1927 	r8a66597->timer.data = (unsigned long)r8a66597;
1928 	r8a66597->reg = reg;
1929 
1930 #ifdef CONFIG_HAVE_CLK
1931 	if (r8a66597->pdata->on_chip) {
1932 		snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1933 		r8a66597->clk = clk_get(&pdev->dev, clk_name);
1934 		if (IS_ERR(r8a66597->clk)) {
1935 			dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1936 				clk_name);
1937 			ret = PTR_ERR(r8a66597->clk);
1938 			goto clean_up_dev;
1939 		}
1940 		clk_enable(r8a66597->clk);
1941 	}
1942 #endif
1943 	if (r8a66597->pdata->sudmac) {
1944 		ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
1945 		if (ret < 0)
1946 			goto clean_up2;
1947 	}
1948 
1949 	disable_controller(r8a66597); /* make sure controller is disabled */
1950 
1951 	ret = request_irq(irq, r8a66597_irq, IRQF_SHARED,
1952 			udc_name, r8a66597);
1953 	if (ret < 0) {
1954 		dev_err(&pdev->dev, "request_irq error (%d)\n", ret);
1955 		goto clean_up2;
1956 	}
1957 
1958 	INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1959 	r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1960 	INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1961 	for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1962 		struct r8a66597_ep *ep = &r8a66597->ep[i];
1963 
1964 		if (i != 0) {
1965 			INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1966 			list_add_tail(&r8a66597->ep[i].ep.ep_list,
1967 					&r8a66597->gadget.ep_list);
1968 		}
1969 		ep->r8a66597 = r8a66597;
1970 		INIT_LIST_HEAD(&ep->queue);
1971 		ep->ep.name = r8a66597_ep_name[i];
1972 		ep->ep.ops = &r8a66597_ep_ops;
1973 		ep->ep.maxpacket = 512;
1974 	}
1975 	r8a66597->ep[0].ep.maxpacket = 64;
1976 	r8a66597->ep[0].pipenum = 0;
1977 	r8a66597->ep[0].fifoaddr = CFIFO;
1978 	r8a66597->ep[0].fifosel = CFIFOSEL;
1979 	r8a66597->ep[0].fifoctr = CFIFOCTR;
1980 	r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1981 	r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1982 	r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1983 
1984 	r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1985 							GFP_KERNEL);
1986 	if (r8a66597->ep0_req == NULL)
1987 		goto clean_up3;
1988 	r8a66597->ep0_req->complete = nop_completion;
1989 
1990 	ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
1991 	if (ret)
1992 		goto err_add_udc;
1993 
1994 	dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1995 	return 0;
1996 
1997 err_add_udc:
1998 	r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
1999 clean_up3:
2000 	free_irq(irq, r8a66597);
2001 clean_up2:
2002 #ifdef CONFIG_HAVE_CLK
2003 	if (r8a66597->pdata->on_chip) {
2004 		clk_disable(r8a66597->clk);
2005 		clk_put(r8a66597->clk);
2006 	}
2007 clean_up_dev:
2008 #endif
2009 	device_unregister(&r8a66597->gadget.dev);
2010 clean_up:
2011 	if (r8a66597) {
2012 		if (r8a66597->sudmac_reg)
2013 			iounmap(r8a66597->sudmac_reg);
2014 		if (r8a66597->ep0_req)
2015 			r8a66597_free_request(&r8a66597->ep[0].ep,
2016 						r8a66597->ep0_req);
2017 		kfree(r8a66597);
2018 	}
2019 	if (reg)
2020 		iounmap(reg);
2021 
2022 	return ret;
2023 }
2024 
2025 /*-------------------------------------------------------------------------*/
2026 static struct platform_driver r8a66597_driver = {
2027 	.remove =	__exit_p(r8a66597_remove),
2028 	.driver		= {
2029 		.name =	(char *) udc_name,
2030 	},
2031 };
2032 MODULE_ALIAS("platform:r8a66597_udc");
2033 
r8a66597_udc_init(void)2034 static int __init r8a66597_udc_init(void)
2035 {
2036 	return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
2037 }
2038 module_init(r8a66597_udc_init);
2039 
r8a66597_udc_cleanup(void)2040 static void __exit r8a66597_udc_cleanup(void)
2041 {
2042 	platform_driver_unregister(&r8a66597_driver);
2043 }
2044 module_exit(r8a66597_udc_cleanup);
2045 
2046 MODULE_DESCRIPTION("R8A66597 USB gadget driver");
2047 MODULE_LICENSE("GPL");
2048 MODULE_AUTHOR("Yoshihiro Shimoda");
2049 
2050