1 /*
2  * Handles the Intel 27x USB Device Controller (UDC)
3  *
4  * Inspired by original driver by Frank Becker, David Brownell, and others.
5  * Copyright (C) 2008 Robert Jarzmik
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/platform_device.h>
17 #include <linux/delay.h>
18 #include <linux/list.h>
19 #include <linux/interrupt.h>
20 #include <linux/proc_fs.h>
21 #include <linux/clk.h>
22 #include <linux/irq.h>
23 #include <linux/gpio.h>
24 #include <linux/slab.h>
25 #include <linux/prefetch.h>
26 
27 #include <asm/byteorder.h>
28 #include <mach/hardware.h>
29 
30 #include <linux/usb.h>
31 #include <linux/usb/ch9.h>
32 #include <linux/usb/gadget.h>
33 #include <mach/udc.h>
34 
35 #include "pxa27x_udc.h"
36 
37 /*
38  * This driver handles the USB Device Controller (UDC) in Intel's PXA 27x
39  * series processors.
40  *
41  * Such controller drivers work with a gadget driver.  The gadget driver
42  * returns descriptors, implements configuration and data protocols used
43  * by the host to interact with this device, and allocates endpoints to
44  * the different protocol interfaces.  The controller driver virtualizes
45  * usb hardware so that the gadget drivers will be more portable.
46  *
47  * This UDC hardware wants to implement a bit too much USB protocol. The
48  * biggest issues are:  that the endpoints have to be set up before the
49  * controller can be enabled (minor, and not uncommon); and each endpoint
50  * can only have one configuration, interface and alternative interface
51  * number (major, and very unusual). Once set up, these cannot be changed
52  * without a controller reset.
53  *
54  * The workaround is to setup all combinations necessary for the gadgets which
55  * will work with this driver. This is done in pxa_udc structure, statically.
56  * See pxa_udc, udc_usb_ep versus pxa_ep, and matching function find_pxa_ep.
57  * (You could modify this if needed.  Some drivers have a "fifo_mode" module
58  * parameter to facilitate such changes.)
59  *
60  * The combinations have been tested with these gadgets :
61  *  - zero gadget
62  *  - file storage gadget
63  *  - ether gadget
64  *
65  * The driver doesn't use DMA, only IO access and IRQ callbacks. No use is
66  * made of UDC's double buffering either. USB "On-The-Go" is not implemented.
67  *
68  * All the requests are handled the same way :
69  *  - the drivers tries to handle the request directly to the IO
70  *  - if the IO fifo is not big enough, the remaining is send/received in
71  *    interrupt handling.
72  */
73 
74 #define	DRIVER_VERSION	"2008-04-18"
75 #define	DRIVER_DESC	"PXA 27x USB Device Controller driver"
76 
77 static const char driver_name[] = "pxa27x_udc";
78 static struct pxa_udc *the_controller;
79 
80 static void handle_ep(struct pxa_ep *ep);
81 
82 /*
83  * Debug filesystem
84  */
85 #ifdef CONFIG_USB_GADGET_DEBUG_FS
86 
87 #include <linux/debugfs.h>
88 #include <linux/uaccess.h>
89 #include <linux/seq_file.h>
90 
state_dbg_show(struct seq_file * s,void * p)91 static int state_dbg_show(struct seq_file *s, void *p)
92 {
93 	struct pxa_udc *udc = s->private;
94 	int pos = 0, ret;
95 	u32 tmp;
96 
97 	ret = -ENODEV;
98 	if (!udc->driver)
99 		goto out;
100 
101 	/* basic device status */
102 	pos += seq_printf(s, DRIVER_DESC "\n"
103 			 "%s version: %s\nGadget driver: %s\n",
104 			 driver_name, DRIVER_VERSION,
105 			 udc->driver ? udc->driver->driver.name : "(none)");
106 
107 	tmp = udc_readl(udc, UDCCR);
108 	pos += seq_printf(s,
109 			 "udccr=0x%0x(%s%s%s%s%s%s%s%s%s%s), "
110 			 "con=%d,inter=%d,altinter=%d\n", tmp,
111 			 (tmp & UDCCR_OEN) ? " oen":"",
112 			 (tmp & UDCCR_AALTHNP) ? " aalthnp":"",
113 			 (tmp & UDCCR_AHNP) ? " rem" : "",
114 			 (tmp & UDCCR_BHNP) ? " rstir" : "",
115 			 (tmp & UDCCR_DWRE) ? " dwre" : "",
116 			 (tmp & UDCCR_SMAC) ? " smac" : "",
117 			 (tmp & UDCCR_EMCE) ? " emce" : "",
118 			 (tmp & UDCCR_UDR) ? " udr" : "",
119 			 (tmp & UDCCR_UDA) ? " uda" : "",
120 			 (tmp & UDCCR_UDE) ? " ude" : "",
121 			 (tmp & UDCCR_ACN) >> UDCCR_ACN_S,
122 			 (tmp & UDCCR_AIN) >> UDCCR_AIN_S,
123 			 (tmp & UDCCR_AAISN) >> UDCCR_AAISN_S);
124 	/* registers for device and ep0 */
125 	pos += seq_printf(s, "udcicr0=0x%08x udcicr1=0x%08x\n",
126 			udc_readl(udc, UDCICR0), udc_readl(udc, UDCICR1));
127 	pos += seq_printf(s, "udcisr0=0x%08x udcisr1=0x%08x\n",
128 			udc_readl(udc, UDCISR0), udc_readl(udc, UDCISR1));
129 	pos += seq_printf(s, "udcfnr=%d\n", udc_readl(udc, UDCFNR));
130 	pos += seq_printf(s, "irqs: reset=%lu, suspend=%lu, resume=%lu, "
131 			"reconfig=%lu\n",
132 			udc->stats.irqs_reset, udc->stats.irqs_suspend,
133 			udc->stats.irqs_resume, udc->stats.irqs_reconfig);
134 
135 	ret = 0;
136 out:
137 	return ret;
138 }
139 
queues_dbg_show(struct seq_file * s,void * p)140 static int queues_dbg_show(struct seq_file *s, void *p)
141 {
142 	struct pxa_udc *udc = s->private;
143 	struct pxa_ep *ep;
144 	struct pxa27x_request *req;
145 	int pos = 0, i, maxpkt, ret;
146 
147 	ret = -ENODEV;
148 	if (!udc->driver)
149 		goto out;
150 
151 	/* dump endpoint queues */
152 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
153 		ep = &udc->pxa_ep[i];
154 		maxpkt = ep->fifo_size;
155 		pos += seq_printf(s,  "%-12s max_pkt=%d %s\n",
156 				EPNAME(ep), maxpkt, "pio");
157 
158 		if (list_empty(&ep->queue)) {
159 			pos += seq_printf(s, "\t(nothing queued)\n");
160 			continue;
161 		}
162 
163 		list_for_each_entry(req, &ep->queue, queue) {
164 			pos += seq_printf(s,  "\treq %p len %d/%d buf %p\n",
165 					&req->req, req->req.actual,
166 					req->req.length, req->req.buf);
167 		}
168 	}
169 
170 	ret = 0;
171 out:
172 	return ret;
173 }
174 
eps_dbg_show(struct seq_file * s,void * p)175 static int eps_dbg_show(struct seq_file *s, void *p)
176 {
177 	struct pxa_udc *udc = s->private;
178 	struct pxa_ep *ep;
179 	int pos = 0, i, ret;
180 	u32 tmp;
181 
182 	ret = -ENODEV;
183 	if (!udc->driver)
184 		goto out;
185 
186 	ep = &udc->pxa_ep[0];
187 	tmp = udc_ep_readl(ep, UDCCSR);
188 	pos += seq_printf(s, "udccsr0=0x%03x(%s%s%s%s%s%s%s)\n", tmp,
189 			 (tmp & UDCCSR0_SA) ? " sa" : "",
190 			 (tmp & UDCCSR0_RNE) ? " rne" : "",
191 			 (tmp & UDCCSR0_FST) ? " fst" : "",
192 			 (tmp & UDCCSR0_SST) ? " sst" : "",
193 			 (tmp & UDCCSR0_DME) ? " dme" : "",
194 			 (tmp & UDCCSR0_IPR) ? " ipr" : "",
195 			 (tmp & UDCCSR0_OPC) ? " opc" : "");
196 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
197 		ep = &udc->pxa_ep[i];
198 		tmp = i? udc_ep_readl(ep, UDCCR) : udc_readl(udc, UDCCR);
199 		pos += seq_printf(s, "%-12s: "
200 				"IN %lu(%lu reqs), OUT %lu(%lu reqs), "
201 				"irqs=%lu, udccr=0x%08x, udccsr=0x%03x, "
202 				"udcbcr=%d\n",
203 				EPNAME(ep),
204 				ep->stats.in_bytes, ep->stats.in_ops,
205 				ep->stats.out_bytes, ep->stats.out_ops,
206 				ep->stats.irqs,
207 				tmp, udc_ep_readl(ep, UDCCSR),
208 				udc_ep_readl(ep, UDCBCR));
209 	}
210 
211 	ret = 0;
212 out:
213 	return ret;
214 }
215 
eps_dbg_open(struct inode * inode,struct file * file)216 static int eps_dbg_open(struct inode *inode, struct file *file)
217 {
218 	return single_open(file, eps_dbg_show, inode->i_private);
219 }
220 
queues_dbg_open(struct inode * inode,struct file * file)221 static int queues_dbg_open(struct inode *inode, struct file *file)
222 {
223 	return single_open(file, queues_dbg_show, inode->i_private);
224 }
225 
state_dbg_open(struct inode * inode,struct file * file)226 static int state_dbg_open(struct inode *inode, struct file *file)
227 {
228 	return single_open(file, state_dbg_show, inode->i_private);
229 }
230 
231 static const struct file_operations state_dbg_fops = {
232 	.owner		= THIS_MODULE,
233 	.open		= state_dbg_open,
234 	.llseek		= seq_lseek,
235 	.read		= seq_read,
236 	.release	= single_release,
237 };
238 
239 static const struct file_operations queues_dbg_fops = {
240 	.owner		= THIS_MODULE,
241 	.open		= queues_dbg_open,
242 	.llseek		= seq_lseek,
243 	.read		= seq_read,
244 	.release	= single_release,
245 };
246 
247 static const struct file_operations eps_dbg_fops = {
248 	.owner		= THIS_MODULE,
249 	.open		= eps_dbg_open,
250 	.llseek		= seq_lseek,
251 	.read		= seq_read,
252 	.release	= single_release,
253 };
254 
pxa_init_debugfs(struct pxa_udc * udc)255 static void pxa_init_debugfs(struct pxa_udc *udc)
256 {
257 	struct dentry *root, *state, *queues, *eps;
258 
259 	root = debugfs_create_dir(udc->gadget.name, NULL);
260 	if (IS_ERR(root) || !root)
261 		goto err_root;
262 
263 	state = debugfs_create_file("udcstate", 0400, root, udc,
264 			&state_dbg_fops);
265 	if (!state)
266 		goto err_state;
267 	queues = debugfs_create_file("queues", 0400, root, udc,
268 			&queues_dbg_fops);
269 	if (!queues)
270 		goto err_queues;
271 	eps = debugfs_create_file("epstate", 0400, root, udc,
272 			&eps_dbg_fops);
273 	if (!eps)
274 		goto err_eps;
275 
276 	udc->debugfs_root = root;
277 	udc->debugfs_state = state;
278 	udc->debugfs_queues = queues;
279 	udc->debugfs_eps = eps;
280 	return;
281 err_eps:
282 	debugfs_remove(eps);
283 err_queues:
284 	debugfs_remove(queues);
285 err_state:
286 	debugfs_remove(root);
287 err_root:
288 	dev_err(udc->dev, "debugfs is not available\n");
289 }
290 
pxa_cleanup_debugfs(struct pxa_udc * udc)291 static void pxa_cleanup_debugfs(struct pxa_udc *udc)
292 {
293 	debugfs_remove(udc->debugfs_eps);
294 	debugfs_remove(udc->debugfs_queues);
295 	debugfs_remove(udc->debugfs_state);
296 	debugfs_remove(udc->debugfs_root);
297 	udc->debugfs_eps = NULL;
298 	udc->debugfs_queues = NULL;
299 	udc->debugfs_state = NULL;
300 	udc->debugfs_root = NULL;
301 }
302 
303 #else
pxa_init_debugfs(struct pxa_udc * udc)304 static inline void pxa_init_debugfs(struct pxa_udc *udc)
305 {
306 }
307 
pxa_cleanup_debugfs(struct pxa_udc * udc)308 static inline void pxa_cleanup_debugfs(struct pxa_udc *udc)
309 {
310 }
311 #endif
312 
313 /**
314  * is_match_usb_pxa - check if usb_ep and pxa_ep match
315  * @udc_usb_ep: usb endpoint
316  * @ep: pxa endpoint
317  * @config: configuration required in pxa_ep
318  * @interface: interface required in pxa_ep
319  * @altsetting: altsetting required in pxa_ep
320  *
321  * Returns 1 if all criteria match between pxa and usb endpoint, 0 otherwise
322  */
is_match_usb_pxa(struct udc_usb_ep * udc_usb_ep,struct pxa_ep * ep,int config,int interface,int altsetting)323 static int is_match_usb_pxa(struct udc_usb_ep *udc_usb_ep, struct pxa_ep *ep,
324 		int config, int interface, int altsetting)
325 {
326 	if (usb_endpoint_num(&udc_usb_ep->desc) != ep->addr)
327 		return 0;
328 	if (usb_endpoint_dir_in(&udc_usb_ep->desc) != ep->dir_in)
329 		return 0;
330 	if (usb_endpoint_type(&udc_usb_ep->desc) != ep->type)
331 		return 0;
332 	if ((ep->config != config) || (ep->interface != interface)
333 			|| (ep->alternate != altsetting))
334 		return 0;
335 	return 1;
336 }
337 
338 /**
339  * find_pxa_ep - find pxa_ep structure matching udc_usb_ep
340  * @udc: pxa udc
341  * @udc_usb_ep: udc_usb_ep structure
342  *
343  * Match udc_usb_ep and all pxa_ep available, to see if one matches.
344  * This is necessary because of the strong pxa hardware restriction requiring
345  * that once pxa endpoints are initialized, their configuration is freezed, and
346  * no change can be made to their address, direction, or in which configuration,
347  * interface or altsetting they are active ... which differs from more usual
348  * models which have endpoints be roughly just addressable fifos, and leave
349  * configuration events up to gadget drivers (like all control messages).
350  *
351  * Note that there is still a blurred point here :
352  *   - we rely on UDCCR register "active interface" and "active altsetting".
353  *     This is a nonsense in regard of USB spec, where multiple interfaces are
354  *     active at the same time.
355  *   - if we knew for sure that the pxa can handle multiple interface at the
356  *     same time, assuming Intel's Developer Guide is wrong, this function
357  *     should be reviewed, and a cache of couples (iface, altsetting) should
358  *     be kept in the pxa_udc structure. In this case this function would match
359  *     against the cache of couples instead of the "last altsetting" set up.
360  *
361  * Returns the matched pxa_ep structure or NULL if none found
362  */
find_pxa_ep(struct pxa_udc * udc,struct udc_usb_ep * udc_usb_ep)363 static struct pxa_ep *find_pxa_ep(struct pxa_udc *udc,
364 		struct udc_usb_ep *udc_usb_ep)
365 {
366 	int i;
367 	struct pxa_ep *ep;
368 	int cfg = udc->config;
369 	int iface = udc->last_interface;
370 	int alt = udc->last_alternate;
371 
372 	if (udc_usb_ep == &udc->udc_usb_ep[0])
373 		return &udc->pxa_ep[0];
374 
375 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
376 		ep = &udc->pxa_ep[i];
377 		if (is_match_usb_pxa(udc_usb_ep, ep, cfg, iface, alt))
378 			return ep;
379 	}
380 	return NULL;
381 }
382 
383 /**
384  * update_pxa_ep_matches - update pxa_ep cached values in all udc_usb_ep
385  * @udc: pxa udc
386  *
387  * Context: in_interrupt()
388  *
389  * Updates all pxa_ep fields in udc_usb_ep structures, if this field was
390  * previously set up (and is not NULL). The update is necessary is a
391  * configuration change or altsetting change was issued by the USB host.
392  */
update_pxa_ep_matches(struct pxa_udc * udc)393 static void update_pxa_ep_matches(struct pxa_udc *udc)
394 {
395 	int i;
396 	struct udc_usb_ep *udc_usb_ep;
397 
398 	for (i = 1; i < NR_USB_ENDPOINTS; i++) {
399 		udc_usb_ep = &udc->udc_usb_ep[i];
400 		if (udc_usb_ep->pxa_ep)
401 			udc_usb_ep->pxa_ep = find_pxa_ep(udc, udc_usb_ep);
402 	}
403 }
404 
405 /**
406  * pio_irq_enable - Enables irq generation for one endpoint
407  * @ep: udc endpoint
408  */
pio_irq_enable(struct pxa_ep * ep)409 static void pio_irq_enable(struct pxa_ep *ep)
410 {
411 	struct pxa_udc *udc = ep->dev;
412 	int index = EPIDX(ep);
413 	u32 udcicr0 = udc_readl(udc, UDCICR0);
414 	u32 udcicr1 = udc_readl(udc, UDCICR1);
415 
416 	if (index < 16)
417 		udc_writel(udc, UDCICR0, udcicr0 | (3 << (index * 2)));
418 	else
419 		udc_writel(udc, UDCICR1, udcicr1 | (3 << ((index - 16) * 2)));
420 }
421 
422 /**
423  * pio_irq_disable - Disables irq generation for one endpoint
424  * @ep: udc endpoint
425  */
pio_irq_disable(struct pxa_ep * ep)426 static void pio_irq_disable(struct pxa_ep *ep)
427 {
428 	struct pxa_udc *udc = ep->dev;
429 	int index = EPIDX(ep);
430 	u32 udcicr0 = udc_readl(udc, UDCICR0);
431 	u32 udcicr1 = udc_readl(udc, UDCICR1);
432 
433 	if (index < 16)
434 		udc_writel(udc, UDCICR0, udcicr0 & ~(3 << (index * 2)));
435 	else
436 		udc_writel(udc, UDCICR1, udcicr1 & ~(3 << ((index - 16) * 2)));
437 }
438 
439 /**
440  * udc_set_mask_UDCCR - set bits in UDCCR
441  * @udc: udc device
442  * @mask: bits to set in UDCCR
443  *
444  * Sets bits in UDCCR, leaving DME and FST bits as they were.
445  */
udc_set_mask_UDCCR(struct pxa_udc * udc,int mask)446 static inline void udc_set_mask_UDCCR(struct pxa_udc *udc, int mask)
447 {
448 	u32 udccr = udc_readl(udc, UDCCR);
449 	udc_writel(udc, UDCCR,
450 			(udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS));
451 }
452 
453 /**
454  * udc_clear_mask_UDCCR - clears bits in UDCCR
455  * @udc: udc device
456  * @mask: bit to clear in UDCCR
457  *
458  * Clears bits in UDCCR, leaving DME and FST bits as they were.
459  */
udc_clear_mask_UDCCR(struct pxa_udc * udc,int mask)460 static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
461 {
462 	u32 udccr = udc_readl(udc, UDCCR);
463 	udc_writel(udc, UDCCR,
464 			(udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
465 }
466 
467 /**
468  * ep_write_UDCCSR - set bits in UDCCSR
469  * @udc: udc device
470  * @mask: bits to set in UDCCR
471  *
472  * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
473  *
474  * A specific case is applied to ep0 : the ACM bit is always set to 1, for
475  * SET_INTERFACE and SET_CONFIGURATION.
476  */
ep_write_UDCCSR(struct pxa_ep * ep,int mask)477 static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
478 {
479 	if (is_ep0(ep))
480 		mask |= UDCCSR0_ACM;
481 	udc_ep_writel(ep, UDCCSR, mask);
482 }
483 
484 /**
485  * ep_count_bytes_remain - get how many bytes in udc endpoint
486  * @ep: udc endpoint
487  *
488  * Returns number of bytes in OUT fifos. Broken for IN fifos (-EOPNOTSUPP)
489  */
ep_count_bytes_remain(struct pxa_ep * ep)490 static int ep_count_bytes_remain(struct pxa_ep *ep)
491 {
492 	if (ep->dir_in)
493 		return -EOPNOTSUPP;
494 	return udc_ep_readl(ep, UDCBCR) & 0x3ff;
495 }
496 
497 /**
498  * ep_is_empty - checks if ep has byte ready for reading
499  * @ep: udc endpoint
500  *
501  * If endpoint is the control endpoint, checks if there are bytes in the
502  * control endpoint fifo. If endpoint is a data endpoint, checks if bytes
503  * are ready for reading on OUT endpoint.
504  *
505  * Returns 0 if ep not empty, 1 if ep empty, -EOPNOTSUPP if IN endpoint
506  */
ep_is_empty(struct pxa_ep * ep)507 static int ep_is_empty(struct pxa_ep *ep)
508 {
509 	int ret;
510 
511 	if (!is_ep0(ep) && ep->dir_in)
512 		return -EOPNOTSUPP;
513 	if (is_ep0(ep))
514 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR0_RNE);
515 	else
516 		ret = !(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNE);
517 	return ret;
518 }
519 
520 /**
521  * ep_is_full - checks if ep has place to write bytes
522  * @ep: udc endpoint
523  *
524  * If endpoint is not the control endpoint and is an IN endpoint, checks if
525  * there is place to write bytes into the endpoint.
526  *
527  * Returns 0 if ep not full, 1 if ep full, -EOPNOTSUPP if OUT endpoint
528  */
ep_is_full(struct pxa_ep * ep)529 static int ep_is_full(struct pxa_ep *ep)
530 {
531 	if (is_ep0(ep))
532 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_IPR);
533 	if (!ep->dir_in)
534 		return -EOPNOTSUPP;
535 	return (!(udc_ep_readl(ep, UDCCSR) & UDCCSR_BNF));
536 }
537 
538 /**
539  * epout_has_pkt - checks if OUT endpoint fifo has a packet available
540  * @ep: pxa endpoint
541  *
542  * Returns 1 if a complete packet is available, 0 if not, -EOPNOTSUPP for IN ep.
543  */
epout_has_pkt(struct pxa_ep * ep)544 static int epout_has_pkt(struct pxa_ep *ep)
545 {
546 	if (!is_ep0(ep) && ep->dir_in)
547 		return -EOPNOTSUPP;
548 	if (is_ep0(ep))
549 		return (udc_ep_readl(ep, UDCCSR) & UDCCSR0_OPC);
550 	return (udc_ep_readl(ep, UDCCSR) & UDCCSR_PC);
551 }
552 
553 /**
554  * set_ep0state - Set ep0 automata state
555  * @dev: udc device
556  * @state: state
557  */
set_ep0state(struct pxa_udc * udc,int state)558 static void set_ep0state(struct pxa_udc *udc, int state)
559 {
560 	struct pxa_ep *ep = &udc->pxa_ep[0];
561 	char *old_stname = EP0_STNAME(udc);
562 
563 	udc->ep0state = state;
564 	ep_dbg(ep, "state=%s->%s, udccsr0=0x%03x, udcbcr=%d\n", old_stname,
565 		EP0_STNAME(udc), udc_ep_readl(ep, UDCCSR),
566 		udc_ep_readl(ep, UDCBCR));
567 }
568 
569 /**
570  * ep0_idle - Put control endpoint into idle state
571  * @dev: udc device
572  */
ep0_idle(struct pxa_udc * dev)573 static void ep0_idle(struct pxa_udc *dev)
574 {
575 	set_ep0state(dev, WAIT_FOR_SETUP);
576 }
577 
578 /**
579  * inc_ep_stats_reqs - Update ep stats counts
580  * @ep: physical endpoint
581  * @req: usb request
582  * @is_in: ep direction (USB_DIR_IN or 0)
583  *
584  */
inc_ep_stats_reqs(struct pxa_ep * ep,int is_in)585 static void inc_ep_stats_reqs(struct pxa_ep *ep, int is_in)
586 {
587 	if (is_in)
588 		ep->stats.in_ops++;
589 	else
590 		ep->stats.out_ops++;
591 }
592 
593 /**
594  * inc_ep_stats_bytes - Update ep stats counts
595  * @ep: physical endpoint
596  * @count: bytes transferred on endpoint
597  * @is_in: ep direction (USB_DIR_IN or 0)
598  */
inc_ep_stats_bytes(struct pxa_ep * ep,int count,int is_in)599 static void inc_ep_stats_bytes(struct pxa_ep *ep, int count, int is_in)
600 {
601 	if (is_in)
602 		ep->stats.in_bytes += count;
603 	else
604 		ep->stats.out_bytes += count;
605 }
606 
607 /**
608  * pxa_ep_setup - Sets up an usb physical endpoint
609  * @ep: pxa27x physical endpoint
610  *
611  * Find the physical pxa27x ep, and setup its UDCCR
612  */
pxa_ep_setup(struct pxa_ep * ep)613 static __init void pxa_ep_setup(struct pxa_ep *ep)
614 {
615 	u32 new_udccr;
616 
617 	new_udccr = ((ep->config << UDCCONR_CN_S) & UDCCONR_CN)
618 		| ((ep->interface << UDCCONR_IN_S) & UDCCONR_IN)
619 		| ((ep->alternate << UDCCONR_AISN_S) & UDCCONR_AISN)
620 		| ((EPADDR(ep) << UDCCONR_EN_S) & UDCCONR_EN)
621 		| ((EPXFERTYPE(ep) << UDCCONR_ET_S) & UDCCONR_ET)
622 		| ((ep->dir_in) ? UDCCONR_ED : 0)
623 		| ((ep->fifo_size << UDCCONR_MPS_S) & UDCCONR_MPS)
624 		| UDCCONR_EE;
625 
626 	udc_ep_writel(ep, UDCCR, new_udccr);
627 }
628 
629 /**
630  * pxa_eps_setup - Sets up all usb physical endpoints
631  * @dev: udc device
632  *
633  * Setup all pxa physical endpoints, except ep0
634  */
pxa_eps_setup(struct pxa_udc * dev)635 static __init void pxa_eps_setup(struct pxa_udc *dev)
636 {
637 	unsigned int i;
638 
639 	dev_dbg(dev->dev, "%s: dev=%p\n", __func__, dev);
640 
641 	for (i = 1; i < NR_PXA_ENDPOINTS; i++)
642 		pxa_ep_setup(&dev->pxa_ep[i]);
643 }
644 
645 /**
646  * pxa_ep_alloc_request - Allocate usb request
647  * @_ep: usb endpoint
648  * @gfp_flags:
649  *
650  * For the pxa27x, these can just wrap kmalloc/kfree.  gadget drivers
651  * must still pass correctly initialized endpoints, since other controller
652  * drivers may care about how it's currently set up (dma issues etc).
653   */
654 static struct usb_request *
pxa_ep_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)655 pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
656 {
657 	struct pxa27x_request *req;
658 
659 	req = kzalloc(sizeof *req, gfp_flags);
660 	if (!req)
661 		return NULL;
662 
663 	INIT_LIST_HEAD(&req->queue);
664 	req->in_use = 0;
665 	req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
666 
667 	return &req->req;
668 }
669 
670 /**
671  * pxa_ep_free_request - Free usb request
672  * @_ep: usb endpoint
673  * @_req: usb request
674  *
675  * Wrapper around kfree to free _req
676  */
pxa_ep_free_request(struct usb_ep * _ep,struct usb_request * _req)677 static void pxa_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
678 {
679 	struct pxa27x_request *req;
680 
681 	req = container_of(_req, struct pxa27x_request, req);
682 	WARN_ON(!list_empty(&req->queue));
683 	kfree(req);
684 }
685 
686 /**
687  * ep_add_request - add a request to the endpoint's queue
688  * @ep: usb endpoint
689  * @req: usb request
690  *
691  * Context: ep->lock held
692  *
693  * Queues the request in the endpoint's queue, and enables the interrupts
694  * on the endpoint.
695  */
ep_add_request(struct pxa_ep * ep,struct pxa27x_request * req)696 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req)
697 {
698 	if (unlikely(!req))
699 		return;
700 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
701 		req->req.length, udc_ep_readl(ep, UDCCSR));
702 
703 	req->in_use = 1;
704 	list_add_tail(&req->queue, &ep->queue);
705 	pio_irq_enable(ep);
706 }
707 
708 /**
709  * ep_del_request - removes a request from the endpoint's queue
710  * @ep: usb endpoint
711  * @req: usb request
712  *
713  * Context: ep->lock held
714  *
715  * Unqueue the request from the endpoint's queue. If there are no more requests
716  * on the endpoint, and if it's not the control endpoint, interrupts are
717  * disabled on the endpoint.
718  */
ep_del_request(struct pxa_ep * ep,struct pxa27x_request * req)719 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
720 {
721 	if (unlikely(!req))
722 		return;
723 	ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req,
724 		req->req.length, udc_ep_readl(ep, UDCCSR));
725 
726 	list_del_init(&req->queue);
727 	req->in_use = 0;
728 	if (!is_ep0(ep) && list_empty(&ep->queue))
729 		pio_irq_disable(ep);
730 }
731 
732 /**
733  * req_done - Complete an usb request
734  * @ep: pxa physical endpoint
735  * @req: pxa request
736  * @status: usb request status sent to gadget API
737  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
738  *
739  * Context: ep->lock held if flags not NULL, else ep->lock released
740  *
741  * Retire a pxa27x usb request. Endpoint must be locked.
742  */
req_done(struct pxa_ep * ep,struct pxa27x_request * req,int status,unsigned long * pflags)743 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
744 	unsigned long *pflags)
745 {
746 	unsigned long	flags;
747 
748 	ep_del_request(ep, req);
749 	if (likely(req->req.status == -EINPROGRESS))
750 		req->req.status = status;
751 	else
752 		status = req->req.status;
753 
754 	if (status && status != -ESHUTDOWN)
755 		ep_dbg(ep, "complete req %p stat %d len %u/%u\n",
756 			&req->req, status,
757 			req->req.actual, req->req.length);
758 
759 	if (pflags)
760 		spin_unlock_irqrestore(&ep->lock, *pflags);
761 	local_irq_save(flags);
762 	req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
763 	local_irq_restore(flags);
764 	if (pflags)
765 		spin_lock_irqsave(&ep->lock, *pflags);
766 }
767 
768 /**
769  * ep_end_out_req - Ends endpoint OUT request
770  * @ep: physical endpoint
771  * @req: pxa request
772  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
773  *
774  * Context: ep->lock held or released (see req_done())
775  *
776  * Ends endpoint OUT request (completes usb request).
777  */
ep_end_out_req(struct pxa_ep * ep,struct pxa27x_request * req,unsigned long * pflags)778 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
779 	unsigned long *pflags)
780 {
781 	inc_ep_stats_reqs(ep, !USB_DIR_IN);
782 	req_done(ep, req, 0, pflags);
783 }
784 
785 /**
786  * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
787  * @ep: physical endpoint
788  * @req: pxa request
789  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
790  *
791  * Context: ep->lock held or released (see req_done())
792  *
793  * Ends control endpoint OUT request (completes usb request), and puts
794  * control endpoint into idle state
795  */
ep0_end_out_req(struct pxa_ep * ep,struct pxa27x_request * req,unsigned long * pflags)796 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
797 	unsigned long *pflags)
798 {
799 	set_ep0state(ep->dev, OUT_STATUS_STAGE);
800 	ep_end_out_req(ep, req, pflags);
801 	ep0_idle(ep->dev);
802 }
803 
804 /**
805  * ep_end_in_req - Ends endpoint IN request
806  * @ep: physical endpoint
807  * @req: pxa request
808  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
809  *
810  * Context: ep->lock held or released (see req_done())
811  *
812  * Ends endpoint IN request (completes usb request).
813  */
ep_end_in_req(struct pxa_ep * ep,struct pxa27x_request * req,unsigned long * pflags)814 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
815 	unsigned long *pflags)
816 {
817 	inc_ep_stats_reqs(ep, USB_DIR_IN);
818 	req_done(ep, req, 0, pflags);
819 }
820 
821 /**
822  * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
823  * @ep: physical endpoint
824  * @req: pxa request
825  * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
826  *
827  * Context: ep->lock held or released (see req_done())
828  *
829  * Ends control endpoint IN request (completes usb request), and puts
830  * control endpoint into status state
831  */
ep0_end_in_req(struct pxa_ep * ep,struct pxa27x_request * req,unsigned long * pflags)832 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
833 	unsigned long *pflags)
834 {
835 	set_ep0state(ep->dev, IN_STATUS_STAGE);
836 	ep_end_in_req(ep, req, pflags);
837 }
838 
839 /**
840  * nuke - Dequeue all requests
841  * @ep: pxa endpoint
842  * @status: usb request status
843  *
844  * Context: ep->lock released
845  *
846  * Dequeues all requests on an endpoint. As a side effect, interrupts will be
847  * disabled on that endpoint (because no more requests).
848  */
nuke(struct pxa_ep * ep,int status)849 static void nuke(struct pxa_ep *ep, int status)
850 {
851 	struct pxa27x_request	*req;
852 	unsigned long		flags;
853 
854 	spin_lock_irqsave(&ep->lock, flags);
855 	while (!list_empty(&ep->queue)) {
856 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
857 		req_done(ep, req, status, &flags);
858 	}
859 	spin_unlock_irqrestore(&ep->lock, flags);
860 }
861 
862 /**
863  * read_packet - transfer 1 packet from an OUT endpoint into request
864  * @ep: pxa physical endpoint
865  * @req: usb request
866  *
867  * Takes bytes from OUT endpoint and transfers them info the usb request.
868  * If there is less space in request than bytes received in OUT endpoint,
869  * bytes are left in the OUT endpoint.
870  *
871  * Returns how many bytes were actually transferred
872  */
read_packet(struct pxa_ep * ep,struct pxa27x_request * req)873 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
874 {
875 	u32 *buf;
876 	int bytes_ep, bufferspace, count, i;
877 
878 	bytes_ep = ep_count_bytes_remain(ep);
879 	bufferspace = req->req.length - req->req.actual;
880 
881 	buf = (u32 *)(req->req.buf + req->req.actual);
882 	prefetchw(buf);
883 
884 	if (likely(!ep_is_empty(ep)))
885 		count = min(bytes_ep, bufferspace);
886 	else /* zlp */
887 		count = 0;
888 
889 	for (i = count; i > 0; i -= 4)
890 		*buf++ = udc_ep_readl(ep, UDCDR);
891 	req->req.actual += count;
892 
893 	ep_write_UDCCSR(ep, UDCCSR_PC);
894 
895 	return count;
896 }
897 
898 /**
899  * write_packet - transfer 1 packet from request into an IN endpoint
900  * @ep: pxa physical endpoint
901  * @req: usb request
902  * @max: max bytes that fit into endpoint
903  *
904  * Takes bytes from usb request, and transfers them into the physical
905  * endpoint. If there are no bytes to transfer, doesn't write anything
906  * to physical endpoint.
907  *
908  * Returns how many bytes were actually transferred.
909  */
write_packet(struct pxa_ep * ep,struct pxa27x_request * req,unsigned int max)910 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req,
911 			unsigned int max)
912 {
913 	int length, count, remain, i;
914 	u32 *buf;
915 	u8 *buf_8;
916 
917 	buf = (u32 *)(req->req.buf + req->req.actual);
918 	prefetch(buf);
919 
920 	length = min(req->req.length - req->req.actual, max);
921 	req->req.actual += length;
922 
923 	remain = length & 0x3;
924 	count = length & ~(0x3);
925 	for (i = count; i > 0 ; i -= 4)
926 		udc_ep_writel(ep, UDCDR, *buf++);
927 
928 	buf_8 = (u8 *)buf;
929 	for (i = remain; i > 0; i--)
930 		udc_ep_writeb(ep, UDCDR, *buf_8++);
931 
932 	ep_vdbg(ep, "length=%d+%d, udccsr=0x%03x\n", count, remain,
933 		udc_ep_readl(ep, UDCCSR));
934 
935 	return length;
936 }
937 
938 /**
939  * read_fifo - Transfer packets from OUT endpoint into usb request
940  * @ep: pxa physical endpoint
941  * @req: usb request
942  *
943  * Context: callable when in_interrupt()
944  *
945  * Unload as many packets as possible from the fifo we use for usb OUT
946  * transfers and put them into the request. Caller should have made sure
947  * there's at least one packet ready.
948  * Doesn't complete the request, that's the caller's job
949  *
950  * Returns 1 if the request completed, 0 otherwise
951  */
read_fifo(struct pxa_ep * ep,struct pxa27x_request * req)952 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
953 {
954 	int count, is_short, completed = 0;
955 
956 	while (epout_has_pkt(ep)) {
957 		count = read_packet(ep, req);
958 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
959 
960 		is_short = (count < ep->fifo_size);
961 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
962 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
963 			&req->req, req->req.actual, req->req.length);
964 
965 		/* completion */
966 		if (is_short || req->req.actual == req->req.length) {
967 			completed = 1;
968 			break;
969 		}
970 		/* finished that packet.  the next one may be waiting... */
971 	}
972 	return completed;
973 }
974 
975 /**
976  * write_fifo - transfer packets from usb request into an IN endpoint
977  * @ep: pxa physical endpoint
978  * @req: pxa usb request
979  *
980  * Write to an IN endpoint fifo, as many packets as possible.
981  * irqs will use this to write the rest later.
982  * caller guarantees at least one packet buffer is ready (or a zlp).
983  * Doesn't complete the request, that's the caller's job
984  *
985  * Returns 1 if request fully transferred, 0 if partial transfer
986  */
write_fifo(struct pxa_ep * ep,struct pxa27x_request * req)987 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
988 {
989 	unsigned max;
990 	int count, is_short, is_last = 0, completed = 0, totcount = 0;
991 	u32 udccsr;
992 
993 	max = ep->fifo_size;
994 	do {
995 		is_short = 0;
996 
997 		udccsr = udc_ep_readl(ep, UDCCSR);
998 		if (udccsr & UDCCSR_PC) {
999 			ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
1000 				udccsr);
1001 			ep_write_UDCCSR(ep, UDCCSR_PC);
1002 		}
1003 		if (udccsr & UDCCSR_TRN) {
1004 			ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
1005 				udccsr);
1006 			ep_write_UDCCSR(ep, UDCCSR_TRN);
1007 		}
1008 
1009 		count = write_packet(ep, req, max);
1010 		inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1011 		totcount += count;
1012 
1013 		/* last packet is usually short (or a zlp) */
1014 		if (unlikely(count < max)) {
1015 			is_last = 1;
1016 			is_short = 1;
1017 		} else {
1018 			if (likely(req->req.length > req->req.actual)
1019 					|| req->req.zero)
1020 				is_last = 0;
1021 			else
1022 				is_last = 1;
1023 			/* interrupt/iso maxpacket may not fill the fifo */
1024 			is_short = unlikely(max < ep->fifo_size);
1025 		}
1026 
1027 		if (is_short)
1028 			ep_write_UDCCSR(ep, UDCCSR_SP);
1029 
1030 		/* requests complete when all IN data is in the FIFO */
1031 		if (is_last) {
1032 			completed = 1;
1033 			break;
1034 		}
1035 	} while (!ep_is_full(ep));
1036 
1037 	ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n",
1038 			totcount, is_last ? "/L" : "", is_short ? "/S" : "",
1039 			req->req.length - req->req.actual, &req->req);
1040 
1041 	return completed;
1042 }
1043 
1044 /**
1045  * read_ep0_fifo - Transfer packets from control endpoint into usb request
1046  * @ep: control endpoint
1047  * @req: pxa usb request
1048  *
1049  * Special ep0 version of the above read_fifo. Reads as many bytes from control
1050  * endpoint as can be read, and stores them into usb request (limited by request
1051  * maximum length).
1052  *
1053  * Returns 0 if usb request only partially filled, 1 if fully filled
1054  */
read_ep0_fifo(struct pxa_ep * ep,struct pxa27x_request * req)1055 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1056 {
1057 	int count, is_short, completed = 0;
1058 
1059 	while (epout_has_pkt(ep)) {
1060 		count = read_packet(ep, req);
1061 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1062 		inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1063 
1064 		is_short = (count < ep->fifo_size);
1065 		ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n",
1066 			udc_ep_readl(ep, UDCCSR), count, is_short ? "/S" : "",
1067 			&req->req, req->req.actual, req->req.length);
1068 
1069 		if (is_short || req->req.actual >= req->req.length) {
1070 			completed = 1;
1071 			break;
1072 		}
1073 	}
1074 
1075 	return completed;
1076 }
1077 
1078 /**
1079  * write_ep0_fifo - Send a request to control endpoint (ep0 in)
1080  * @ep: control endpoint
1081  * @req: request
1082  *
1083  * Context: callable when in_interrupt()
1084  *
1085  * Sends a request (or a part of the request) to the control endpoint (ep0 in).
1086  * If the request doesn't fit, the remaining part will be sent from irq.
1087  * The request is considered fully written only if either :
1088  *   - last write transferred all remaining bytes, but fifo was not fully filled
1089  *   - last write was a 0 length write
1090  *
1091  * Returns 1 if request fully written, 0 if request only partially sent
1092  */
write_ep0_fifo(struct pxa_ep * ep,struct pxa27x_request * req)1093 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1094 {
1095 	unsigned	count;
1096 	int		is_last, is_short;
1097 
1098 	count = write_packet(ep, req, EP0_FIFO_SIZE);
1099 	inc_ep_stats_bytes(ep, count, USB_DIR_IN);
1100 
1101 	is_short = (count < EP0_FIFO_SIZE);
1102 	is_last = ((count == 0) || (count < EP0_FIFO_SIZE));
1103 
1104 	/* Sends either a short packet or a 0 length packet */
1105 	if (unlikely(is_short))
1106 		ep_write_UDCCSR(ep, UDCCSR0_IPR);
1107 
1108 	ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1109 		count, is_short ? "/S" : "", is_last ? "/L" : "",
1110 		req->req.length - req->req.actual,
1111 		&req->req, udc_ep_readl(ep, UDCCSR));
1112 
1113 	return is_last;
1114 }
1115 
1116 /**
1117  * pxa_ep_queue - Queue a request into an IN endpoint
1118  * @_ep: usb endpoint
1119  * @_req: usb request
1120  * @gfp_flags: flags
1121  *
1122  * Context: normally called when !in_interrupt, but callable when in_interrupt()
1123  * in the special case of ep0 setup :
1124  *   (irq->handle_ep0_ctrl_req->gadget_setup->pxa_ep_queue)
1125  *
1126  * Returns 0 if succedeed, error otherwise
1127  */
pxa_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1128 static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1129 			gfp_t gfp_flags)
1130 {
1131 	struct udc_usb_ep	*udc_usb_ep;
1132 	struct pxa_ep		*ep;
1133 	struct pxa27x_request	*req;
1134 	struct pxa_udc		*dev;
1135 	unsigned long		flags;
1136 	int			rc = 0;
1137 	int			is_first_req;
1138 	unsigned		length;
1139 	int			recursion_detected;
1140 
1141 	req = container_of(_req, struct pxa27x_request, req);
1142 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1143 
1144 	if (unlikely(!_req || !_req->complete || !_req->buf))
1145 		return -EINVAL;
1146 
1147 	if (unlikely(!_ep))
1148 		return -EINVAL;
1149 
1150 	dev = udc_usb_ep->dev;
1151 	ep = udc_usb_ep->pxa_ep;
1152 	if (unlikely(!ep))
1153 		return -EINVAL;
1154 
1155 	dev = ep->dev;
1156 	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
1157 		ep_dbg(ep, "bogus device state\n");
1158 		return -ESHUTDOWN;
1159 	}
1160 
1161 	/* iso is always one packet per request, that's the only way
1162 	 * we can report per-packet status.  that also helps with dma.
1163 	 */
1164 	if (unlikely(EPXFERTYPE_is_ISO(ep)
1165 			&& req->req.length > ep->fifo_size))
1166 		return -EMSGSIZE;
1167 
1168 	spin_lock_irqsave(&ep->lock, flags);
1169 	recursion_detected = ep->in_handle_ep;
1170 
1171 	is_first_req = list_empty(&ep->queue);
1172 	ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
1173 			_req, is_first_req ? "yes" : "no",
1174 			_req->length, _req->buf);
1175 
1176 	if (!ep->enabled) {
1177 		_req->status = -ESHUTDOWN;
1178 		rc = -ESHUTDOWN;
1179 		goto out_locked;
1180 	}
1181 
1182 	if (req->in_use) {
1183 		ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1184 		goto out_locked;
1185 	}
1186 
1187 	length = _req->length;
1188 	_req->status = -EINPROGRESS;
1189 	_req->actual = 0;
1190 
1191 	ep_add_request(ep, req);
1192 	spin_unlock_irqrestore(&ep->lock, flags);
1193 
1194 	if (is_ep0(ep)) {
1195 		switch (dev->ep0state) {
1196 		case WAIT_ACK_SET_CONF_INTERF:
1197 			if (length == 0) {
1198 				ep_end_in_req(ep, req, NULL);
1199 			} else {
1200 				ep_err(ep, "got a request of %d bytes while"
1201 					"in state WAIT_ACK_SET_CONF_INTERF\n",
1202 					length);
1203 				ep_del_request(ep, req);
1204 				rc = -EL2HLT;
1205 			}
1206 			ep0_idle(ep->dev);
1207 			break;
1208 		case IN_DATA_STAGE:
1209 			if (!ep_is_full(ep))
1210 				if (write_ep0_fifo(ep, req))
1211 					ep0_end_in_req(ep, req, NULL);
1212 			break;
1213 		case OUT_DATA_STAGE:
1214 			if ((length == 0) || !epout_has_pkt(ep))
1215 				if (read_ep0_fifo(ep, req))
1216 					ep0_end_out_req(ep, req, NULL);
1217 			break;
1218 		default:
1219 			ep_err(ep, "odd state %s to send me a request\n",
1220 				EP0_STNAME(ep->dev));
1221 			ep_del_request(ep, req);
1222 			rc = -EL2HLT;
1223 			break;
1224 		}
1225 	} else {
1226 		if (!recursion_detected)
1227 			handle_ep(ep);
1228 	}
1229 
1230 out:
1231 	return rc;
1232 out_locked:
1233 	spin_unlock_irqrestore(&ep->lock, flags);
1234 	goto out;
1235 }
1236 
1237 /**
1238  * pxa_ep_dequeue - Dequeue one request
1239  * @_ep: usb endpoint
1240  * @_req: usb request
1241  *
1242  * Return 0 if no error, -EINVAL or -ECONNRESET otherwise
1243  */
pxa_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)1244 static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1245 {
1246 	struct pxa_ep		*ep;
1247 	struct udc_usb_ep	*udc_usb_ep;
1248 	struct pxa27x_request	*req;
1249 	unsigned long		flags;
1250 	int			rc = -EINVAL;
1251 
1252 	if (!_ep)
1253 		return rc;
1254 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1255 	ep = udc_usb_ep->pxa_ep;
1256 	if (!ep || is_ep0(ep))
1257 		return rc;
1258 
1259 	spin_lock_irqsave(&ep->lock, flags);
1260 
1261 	/* make sure it's actually queued on this endpoint */
1262 	list_for_each_entry(req, &ep->queue, queue) {
1263 		if (&req->req == _req) {
1264 			rc = 0;
1265 			break;
1266 		}
1267 	}
1268 
1269 	spin_unlock_irqrestore(&ep->lock, flags);
1270 	if (!rc)
1271 		req_done(ep, req, -ECONNRESET, NULL);
1272 	return rc;
1273 }
1274 
1275 /**
1276  * pxa_ep_set_halt - Halts operations on one endpoint
1277  * @_ep: usb endpoint
1278  * @value:
1279  *
1280  * Returns 0 if no error, -EINVAL, -EROFS, -EAGAIN otherwise
1281  */
pxa_ep_set_halt(struct usb_ep * _ep,int value)1282 static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1283 {
1284 	struct pxa_ep		*ep;
1285 	struct udc_usb_ep	*udc_usb_ep;
1286 	unsigned long flags;
1287 	int rc;
1288 
1289 
1290 	if (!_ep)
1291 		return -EINVAL;
1292 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1293 	ep = udc_usb_ep->pxa_ep;
1294 	if (!ep || is_ep0(ep))
1295 		return -EINVAL;
1296 
1297 	if (value == 0) {
1298 		/*
1299 		 * This path (reset toggle+halt) is needed to implement
1300 		 * SET_INTERFACE on normal hardware.  but it can't be
1301 		 * done from software on the PXA UDC, and the hardware
1302 		 * forgets to do it as part of SET_INTERFACE automagic.
1303 		 */
1304 		ep_dbg(ep, "only host can clear halt\n");
1305 		return -EROFS;
1306 	}
1307 
1308 	spin_lock_irqsave(&ep->lock, flags);
1309 
1310 	rc = -EAGAIN;
1311 	if (ep->dir_in	&& (ep_is_full(ep) || !list_empty(&ep->queue)))
1312 		goto out;
1313 
1314 	/* FST, FEF bits are the same for control and non control endpoints */
1315 	rc = 0;
1316 	ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
1317 	if (is_ep0(ep))
1318 		set_ep0state(ep->dev, STALL);
1319 
1320 out:
1321 	spin_unlock_irqrestore(&ep->lock, flags);
1322 	return rc;
1323 }
1324 
1325 /**
1326  * pxa_ep_fifo_status - Get how many bytes in physical endpoint
1327  * @_ep: usb endpoint
1328  *
1329  * Returns number of bytes in OUT fifos. Broken for IN fifos.
1330  */
pxa_ep_fifo_status(struct usb_ep * _ep)1331 static int pxa_ep_fifo_status(struct usb_ep *_ep)
1332 {
1333 	struct pxa_ep		*ep;
1334 	struct udc_usb_ep	*udc_usb_ep;
1335 
1336 	if (!_ep)
1337 		return -ENODEV;
1338 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1339 	ep = udc_usb_ep->pxa_ep;
1340 	if (!ep || is_ep0(ep))
1341 		return -ENODEV;
1342 
1343 	if (ep->dir_in)
1344 		return -EOPNOTSUPP;
1345 	if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN || ep_is_empty(ep))
1346 		return 0;
1347 	else
1348 		return ep_count_bytes_remain(ep) + 1;
1349 }
1350 
1351 /**
1352  * pxa_ep_fifo_flush - Flushes one endpoint
1353  * @_ep: usb endpoint
1354  *
1355  * Discards all data in one endpoint(IN or OUT), except control endpoint.
1356  */
pxa_ep_fifo_flush(struct usb_ep * _ep)1357 static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1358 {
1359 	struct pxa_ep		*ep;
1360 	struct udc_usb_ep	*udc_usb_ep;
1361 	unsigned long		flags;
1362 
1363 	if (!_ep)
1364 		return;
1365 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1366 	ep = udc_usb_ep->pxa_ep;
1367 	if (!ep || is_ep0(ep))
1368 		return;
1369 
1370 	spin_lock_irqsave(&ep->lock, flags);
1371 
1372 	if (unlikely(!list_empty(&ep->queue)))
1373 		ep_dbg(ep, "called while queue list not empty\n");
1374 	ep_dbg(ep, "called\n");
1375 
1376 	/* for OUT, just read and discard the FIFO contents. */
1377 	if (!ep->dir_in) {
1378 		while (!ep_is_empty(ep))
1379 			udc_ep_readl(ep, UDCDR);
1380 	} else {
1381 		/* most IN status is the same, but ISO can't stall */
1382 		ep_write_UDCCSR(ep,
1383 				UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1384 				| (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1385 	}
1386 
1387 	spin_unlock_irqrestore(&ep->lock, flags);
1388 }
1389 
1390 /**
1391  * pxa_ep_enable - Enables usb endpoint
1392  * @_ep: usb endpoint
1393  * @desc: usb endpoint descriptor
1394  *
1395  * Nothing much to do here, as ep configuration is done once and for all
1396  * before udc is enabled. After udc enable, no physical endpoint configuration
1397  * can be changed.
1398  * Function makes sanity checks and flushes the endpoint.
1399  */
pxa_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1400 static int pxa_ep_enable(struct usb_ep *_ep,
1401 	const struct usb_endpoint_descriptor *desc)
1402 {
1403 	struct pxa_ep		*ep;
1404 	struct udc_usb_ep	*udc_usb_ep;
1405 	struct pxa_udc		*udc;
1406 
1407 	if (!_ep || !desc)
1408 		return -EINVAL;
1409 
1410 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1411 	if (udc_usb_ep->pxa_ep) {
1412 		ep = udc_usb_ep->pxa_ep;
1413 		ep_warn(ep, "usb_ep %s already enabled, doing nothing\n",
1414 			_ep->name);
1415 	} else {
1416 		ep = find_pxa_ep(udc_usb_ep->dev, udc_usb_ep);
1417 	}
1418 
1419 	if (!ep || is_ep0(ep)) {
1420 		dev_err(udc_usb_ep->dev->dev,
1421 			"unable to match pxa_ep for ep %s\n",
1422 			_ep->name);
1423 		return -EINVAL;
1424 	}
1425 
1426 	if ((desc->bDescriptorType != USB_DT_ENDPOINT)
1427 			|| (ep->type != usb_endpoint_type(desc))) {
1428 		ep_err(ep, "type mismatch\n");
1429 		return -EINVAL;
1430 	}
1431 
1432 	if (ep->fifo_size < usb_endpoint_maxp(desc)) {
1433 		ep_err(ep, "bad maxpacket\n");
1434 		return -ERANGE;
1435 	}
1436 
1437 	udc_usb_ep->pxa_ep = ep;
1438 	udc = ep->dev;
1439 
1440 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
1441 		ep_err(ep, "bogus device state\n");
1442 		return -ESHUTDOWN;
1443 	}
1444 
1445 	ep->enabled = 1;
1446 
1447 	/* flush fifo (mostly for OUT buffers) */
1448 	pxa_ep_fifo_flush(_ep);
1449 
1450 	ep_dbg(ep, "enabled\n");
1451 	return 0;
1452 }
1453 
1454 /**
1455  * pxa_ep_disable - Disable usb endpoint
1456  * @_ep: usb endpoint
1457  *
1458  * Same as for pxa_ep_enable, no physical endpoint configuration can be
1459  * changed.
1460  * Function flushes the endpoint and related requests.
1461  */
pxa_ep_disable(struct usb_ep * _ep)1462 static int pxa_ep_disable(struct usb_ep *_ep)
1463 {
1464 	struct pxa_ep		*ep;
1465 	struct udc_usb_ep	*udc_usb_ep;
1466 
1467 	if (!_ep)
1468 		return -EINVAL;
1469 
1470 	udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
1471 	ep = udc_usb_ep->pxa_ep;
1472 	if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1473 		return -EINVAL;
1474 
1475 	ep->enabled = 0;
1476 	nuke(ep, -ESHUTDOWN);
1477 
1478 	pxa_ep_fifo_flush(_ep);
1479 	udc_usb_ep->pxa_ep = NULL;
1480 
1481 	ep_dbg(ep, "disabled\n");
1482 	return 0;
1483 }
1484 
1485 static struct usb_ep_ops pxa_ep_ops = {
1486 	.enable		= pxa_ep_enable,
1487 	.disable	= pxa_ep_disable,
1488 
1489 	.alloc_request	= pxa_ep_alloc_request,
1490 	.free_request	= pxa_ep_free_request,
1491 
1492 	.queue		= pxa_ep_queue,
1493 	.dequeue	= pxa_ep_dequeue,
1494 
1495 	.set_halt	= pxa_ep_set_halt,
1496 	.fifo_status	= pxa_ep_fifo_status,
1497 	.fifo_flush	= pxa_ep_fifo_flush,
1498 };
1499 
1500 /**
1501  * dplus_pullup - Connect or disconnect pullup resistor to D+ pin
1502  * @udc: udc device
1503  * @on: 0 if disconnect pullup resistor, 1 otherwise
1504  * Context: any
1505  *
1506  * Handle D+ pullup resistor, make the device visible to the usb bus, and
1507  * declare it as a full speed usb device
1508  */
dplus_pullup(struct pxa_udc * udc,int on)1509 static void dplus_pullup(struct pxa_udc *udc, int on)
1510 {
1511 	if (on) {
1512 		if (gpio_is_valid(udc->mach->gpio_pullup))
1513 			gpio_set_value(udc->mach->gpio_pullup,
1514 				       !udc->mach->gpio_pullup_inverted);
1515 		if (udc->mach->udc_command)
1516 			udc->mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
1517 	} else {
1518 		if (gpio_is_valid(udc->mach->gpio_pullup))
1519 			gpio_set_value(udc->mach->gpio_pullup,
1520 				       udc->mach->gpio_pullup_inverted);
1521 		if (udc->mach->udc_command)
1522 			udc->mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
1523 	}
1524 	udc->pullup_on = on;
1525 }
1526 
1527 /**
1528  * pxa_udc_get_frame - Returns usb frame number
1529  * @_gadget: usb gadget
1530  */
pxa_udc_get_frame(struct usb_gadget * _gadget)1531 static int pxa_udc_get_frame(struct usb_gadget *_gadget)
1532 {
1533 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1534 
1535 	return (udc_readl(udc, UDCFNR) & 0x7ff);
1536 }
1537 
1538 /**
1539  * pxa_udc_wakeup - Force udc device out of suspend
1540  * @_gadget: usb gadget
1541  *
1542  * Returns 0 if successful, error code otherwise
1543  */
pxa_udc_wakeup(struct usb_gadget * _gadget)1544 static int pxa_udc_wakeup(struct usb_gadget *_gadget)
1545 {
1546 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1547 
1548 	/* host may not have enabled remote wakeup */
1549 	if ((udc_readl(udc, UDCCR) & UDCCR_DWRE) == 0)
1550 		return -EHOSTUNREACH;
1551 	udc_set_mask_UDCCR(udc, UDCCR_UDR);
1552 	return 0;
1553 }
1554 
1555 static void udc_enable(struct pxa_udc *udc);
1556 static void udc_disable(struct pxa_udc *udc);
1557 
1558 /**
1559  * should_enable_udc - Tells if UDC should be enabled
1560  * @udc: udc device
1561  * Context: any
1562  *
1563  * The UDC should be enabled if :
1564 
1565  *  - the pullup resistor is connected
1566  *  - and a gadget driver is bound
1567  *  - and vbus is sensed (or no vbus sense is available)
1568  *
1569  * Returns 1 if UDC should be enabled, 0 otherwise
1570  */
should_enable_udc(struct pxa_udc * udc)1571 static int should_enable_udc(struct pxa_udc *udc)
1572 {
1573 	int put_on;
1574 
1575 	put_on = ((udc->pullup_on) && (udc->driver));
1576 	put_on &= ((udc->vbus_sensed) || (!udc->transceiver));
1577 	return put_on;
1578 }
1579 
1580 /**
1581  * should_disable_udc - Tells if UDC should be disabled
1582  * @udc: udc device
1583  * Context: any
1584  *
1585  * The UDC should be disabled if :
1586  *  - the pullup resistor is not connected
1587  *  - or no gadget driver is bound
1588  *  - or no vbus is sensed (when vbus sesing is available)
1589  *
1590  * Returns 1 if UDC should be disabled
1591  */
should_disable_udc(struct pxa_udc * udc)1592 static int should_disable_udc(struct pxa_udc *udc)
1593 {
1594 	int put_off;
1595 
1596 	put_off = ((!udc->pullup_on) || (!udc->driver));
1597 	put_off |= ((!udc->vbus_sensed) && (udc->transceiver));
1598 	return put_off;
1599 }
1600 
1601 /**
1602  * pxa_udc_pullup - Offer manual D+ pullup control
1603  * @_gadget: usb gadget using the control
1604  * @is_active: 0 if disconnect, else connect D+ pullup resistor
1605  * Context: !in_interrupt()
1606  *
1607  * Returns 0 if OK, -EOPNOTSUPP if udc driver doesn't handle D+ pullup
1608  */
pxa_udc_pullup(struct usb_gadget * _gadget,int is_active)1609 static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
1610 {
1611 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1612 
1613 	if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
1614 		return -EOPNOTSUPP;
1615 
1616 	dplus_pullup(udc, is_active);
1617 
1618 	if (should_enable_udc(udc))
1619 		udc_enable(udc);
1620 	if (should_disable_udc(udc))
1621 		udc_disable(udc);
1622 	return 0;
1623 }
1624 
1625 static void udc_enable(struct pxa_udc *udc);
1626 static void udc_disable(struct pxa_udc *udc);
1627 
1628 /**
1629  * pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
1630  * @_gadget: usb gadget
1631  * @is_active: 0 if should disable the udc, 1 if should enable
1632  *
1633  * Enables the udc, and optionnaly activates D+ pullup resistor. Or disables the
1634  * udc, and deactivates D+ pullup resistor.
1635  *
1636  * Returns 0
1637  */
pxa_udc_vbus_session(struct usb_gadget * _gadget,int is_active)1638 static int pxa_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1639 {
1640 	struct pxa_udc *udc = to_gadget_udc(_gadget);
1641 
1642 	udc->vbus_sensed = is_active;
1643 	if (should_enable_udc(udc))
1644 		udc_enable(udc);
1645 	if (should_disable_udc(udc))
1646 		udc_disable(udc);
1647 
1648 	return 0;
1649 }
1650 
1651 /**
1652  * pxa_udc_vbus_draw - Called by gadget driver after SET_CONFIGURATION completed
1653  * @_gadget: usb gadget
1654  * @mA: current drawn
1655  *
1656  * Context: !in_interrupt()
1657  *
1658  * Called after a configuration was chosen by a USB host, to inform how much
1659  * current can be drawn by the device from VBus line.
1660  *
1661  * Returns 0 or -EOPNOTSUPP if no transceiver is handling the udc
1662  */
pxa_udc_vbus_draw(struct usb_gadget * _gadget,unsigned mA)1663 static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1664 {
1665 	struct pxa_udc *udc;
1666 
1667 	udc = to_gadget_udc(_gadget);
1668 	if (udc->transceiver)
1669 		return otg_set_power(udc->transceiver, mA);
1670 	return -EOPNOTSUPP;
1671 }
1672 
1673 static int pxa27x_udc_start(struct usb_gadget_driver *driver,
1674 		int (*bind)(struct usb_gadget *));
1675 static int pxa27x_udc_stop(struct usb_gadget_driver *driver);
1676 
1677 static const struct usb_gadget_ops pxa_udc_ops = {
1678 	.get_frame	= pxa_udc_get_frame,
1679 	.wakeup		= pxa_udc_wakeup,
1680 	.pullup		= pxa_udc_pullup,
1681 	.vbus_session	= pxa_udc_vbus_session,
1682 	.vbus_draw	= pxa_udc_vbus_draw,
1683 	.start		= pxa27x_udc_start,
1684 	.stop		= pxa27x_udc_stop,
1685 };
1686 
1687 /**
1688  * udc_disable - disable udc device controller
1689  * @udc: udc device
1690  * Context: any
1691  *
1692  * Disables the udc device : disables clocks, udc interrupts, control endpoint
1693  * interrupts.
1694  */
udc_disable(struct pxa_udc * udc)1695 static void udc_disable(struct pxa_udc *udc)
1696 {
1697 	if (!udc->enabled)
1698 		return;
1699 
1700 	udc_writel(udc, UDCICR0, 0);
1701 	udc_writel(udc, UDCICR1, 0);
1702 
1703 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1704 	clk_disable(udc->clk);
1705 
1706 	ep0_idle(udc);
1707 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1708 
1709 	udc->enabled = 0;
1710 }
1711 
1712 /**
1713  * udc_init_data - Initialize udc device data structures
1714  * @dev: udc device
1715  *
1716  * Initializes gadget endpoint list, endpoints locks. No action is taken
1717  * on the hardware.
1718  */
udc_init_data(struct pxa_udc * dev)1719 static __init void udc_init_data(struct pxa_udc *dev)
1720 {
1721 	int i;
1722 	struct pxa_ep *ep;
1723 
1724 	/* device/ep0 records init */
1725 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1726 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1727 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
1728 	ep0_idle(dev);
1729 
1730 	/* PXA endpoints init */
1731 	for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
1732 		ep = &dev->pxa_ep[i];
1733 
1734 		ep->enabled = is_ep0(ep);
1735 		INIT_LIST_HEAD(&ep->queue);
1736 		spin_lock_init(&ep->lock);
1737 	}
1738 
1739 	/* USB endpoints init */
1740 	for (i = 1; i < NR_USB_ENDPOINTS; i++)
1741 		list_add_tail(&dev->udc_usb_ep[i].usb_ep.ep_list,
1742 				&dev->gadget.ep_list);
1743 }
1744 
1745 /**
1746  * udc_enable - Enables the udc device
1747  * @dev: udc device
1748  *
1749  * Enables the udc device : enables clocks, udc interrupts, control endpoint
1750  * interrupts, sets usb as UDC client and setups endpoints.
1751  */
udc_enable(struct pxa_udc * udc)1752 static void udc_enable(struct pxa_udc *udc)
1753 {
1754 	if (udc->enabled)
1755 		return;
1756 
1757 	udc_writel(udc, UDCICR0, 0);
1758 	udc_writel(udc, UDCICR1, 0);
1759 	udc_clear_mask_UDCCR(udc, UDCCR_UDE);
1760 
1761 	clk_enable(udc->clk);
1762 
1763 	ep0_idle(udc);
1764 	udc->gadget.speed = USB_SPEED_FULL;
1765 	memset(&udc->stats, 0, sizeof(udc->stats));
1766 
1767 	udc_set_mask_UDCCR(udc, UDCCR_UDE);
1768 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
1769 	udelay(2);
1770 	if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1771 		dev_err(udc->dev, "Configuration errors, udc disabled\n");
1772 
1773 	/*
1774 	 * Caller must be able to sleep in order to cope with startup transients
1775 	 */
1776 	msleep(100);
1777 
1778 	/* enable suspend/resume and reset irqs */
1779 	udc_writel(udc, UDCICR1,
1780 			UDCICR1_IECC | UDCICR1_IERU
1781 			| UDCICR1_IESU | UDCICR1_IERS);
1782 
1783 	/* enable ep0 irqs */
1784 	pio_irq_enable(&udc->pxa_ep[0]);
1785 
1786 	udc->enabled = 1;
1787 }
1788 
1789 /**
1790  * pxa27x_start - Register gadget driver
1791  * @driver: gadget driver
1792  * @bind: bind function
1793  *
1794  * When a driver is successfully registered, it will receive control requests
1795  * including set_configuration(), which enables non-control requests.  Then
1796  * usb traffic follows until a disconnect is reported.  Then a host may connect
1797  * again, or the driver might get unbound.
1798  *
1799  * Note that the udc is not automatically enabled. Check function
1800  * should_enable_udc().
1801  *
1802  * Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
1803  */
pxa27x_udc_start(struct usb_gadget_driver * driver,int (* bind)(struct usb_gadget *))1804 static int pxa27x_udc_start(struct usb_gadget_driver *driver,
1805 		int (*bind)(struct usb_gadget *))
1806 {
1807 	struct pxa_udc *udc = the_controller;
1808 	int retval;
1809 
1810 	if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
1811 			|| !driver->disconnect || !driver->setup)
1812 		return -EINVAL;
1813 	if (!udc)
1814 		return -ENODEV;
1815 	if (udc->driver)
1816 		return -EBUSY;
1817 
1818 	/* first hook up the driver ... */
1819 	udc->driver = driver;
1820 	udc->gadget.dev.driver = &driver->driver;
1821 	dplus_pullup(udc, 1);
1822 
1823 	retval = device_add(&udc->gadget.dev);
1824 	if (retval) {
1825 		dev_err(udc->dev, "device_add error %d\n", retval);
1826 		goto add_fail;
1827 	}
1828 	retval = bind(&udc->gadget);
1829 	if (retval) {
1830 		dev_err(udc->dev, "bind to driver %s --> error %d\n",
1831 			driver->driver.name, retval);
1832 		goto bind_fail;
1833 	}
1834 	dev_dbg(udc->dev, "registered gadget driver '%s'\n",
1835 		driver->driver.name);
1836 
1837 	if (udc->transceiver) {
1838 		retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
1839 		if (retval) {
1840 			dev_err(udc->dev, "can't bind to transceiver\n");
1841 			goto transceiver_fail;
1842 		}
1843 	}
1844 
1845 	if (should_enable_udc(udc))
1846 		udc_enable(udc);
1847 	return 0;
1848 
1849 transceiver_fail:
1850 	if (driver->unbind)
1851 		driver->unbind(&udc->gadget);
1852 bind_fail:
1853 	device_del(&udc->gadget.dev);
1854 add_fail:
1855 	udc->driver = NULL;
1856 	udc->gadget.dev.driver = NULL;
1857 	return retval;
1858 }
1859 
1860 /**
1861  * stop_activity - Stops udc endpoints
1862  * @udc: udc device
1863  * @driver: gadget driver
1864  *
1865  * Disables all udc endpoints (even control endpoint), report disconnect to
1866  * the gadget user.
1867  */
stop_activity(struct pxa_udc * udc,struct usb_gadget_driver * driver)1868 static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
1869 {
1870 	int i;
1871 
1872 	/* don't disconnect drivers more than once */
1873 	if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1874 		driver = NULL;
1875 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1876 
1877 	for (i = 0; i < NR_USB_ENDPOINTS; i++)
1878 		pxa_ep_disable(&udc->udc_usb_ep[i].usb_ep);
1879 
1880 	if (driver)
1881 		driver->disconnect(&udc->gadget);
1882 }
1883 
1884 /**
1885  * pxa27x_udc_stop - Unregister the gadget driver
1886  * @driver: gadget driver
1887  *
1888  * Returns 0 if no error, -ENODEV, -EINVAL otherwise
1889  */
pxa27x_udc_stop(struct usb_gadget_driver * driver)1890 static int pxa27x_udc_stop(struct usb_gadget_driver *driver)
1891 {
1892 	struct pxa_udc *udc = the_controller;
1893 
1894 	if (!udc)
1895 		return -ENODEV;
1896 	if (!driver || driver != udc->driver || !driver->unbind)
1897 		return -EINVAL;
1898 
1899 	stop_activity(udc, driver);
1900 	udc_disable(udc);
1901 	dplus_pullup(udc, 0);
1902 
1903 	driver->unbind(&udc->gadget);
1904 	udc->driver = NULL;
1905 
1906 	device_del(&udc->gadget.dev);
1907 	dev_info(udc->dev, "unregistered gadget driver '%s'\n",
1908 		 driver->driver.name);
1909 
1910 	if (udc->transceiver)
1911 		return otg_set_peripheral(udc->transceiver, NULL);
1912 	return 0;
1913 }
1914 
1915 /**
1916  * handle_ep0_ctrl_req - handle control endpoint control request
1917  * @udc: udc device
1918  * @req: control request
1919  */
handle_ep0_ctrl_req(struct pxa_udc * udc,struct pxa27x_request * req)1920 static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1921 				struct pxa27x_request *req)
1922 {
1923 	struct pxa_ep *ep = &udc->pxa_ep[0];
1924 	union {
1925 		struct usb_ctrlrequest	r;
1926 		u32			word[2];
1927 	} u;
1928 	int i;
1929 	int have_extrabytes = 0;
1930 	unsigned long flags;
1931 
1932 	nuke(ep, -EPROTO);
1933 	spin_lock_irqsave(&ep->lock, flags);
1934 
1935 	/*
1936 	 * In the PXA320 manual, in the section about Back-to-Back setup
1937 	 * packets, it describes this situation.  The solution is to set OPC to
1938 	 * get rid of the status packet, and then continue with the setup
1939 	 * packet. Generalize to pxa27x CPUs.
1940 	 */
1941 	if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
1942 		ep_write_UDCCSR(ep, UDCCSR0_OPC);
1943 
1944 	/* read SETUP packet */
1945 	for (i = 0; i < 2; i++) {
1946 		if (unlikely(ep_is_empty(ep)))
1947 			goto stall;
1948 		u.word[i] = udc_ep_readl(ep, UDCDR);
1949 	}
1950 
1951 	have_extrabytes = !ep_is_empty(ep);
1952 	while (!ep_is_empty(ep)) {
1953 		i = udc_ep_readl(ep, UDCDR);
1954 		ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
1955 	}
1956 
1957 	ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1958 		u.r.bRequestType, u.r.bRequest,
1959 		le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
1960 		le16_to_cpu(u.r.wLength));
1961 	if (unlikely(have_extrabytes))
1962 		goto stall;
1963 
1964 	if (u.r.bRequestType & USB_DIR_IN)
1965 		set_ep0state(udc, IN_DATA_STAGE);
1966 	else
1967 		set_ep0state(udc, OUT_DATA_STAGE);
1968 
1969 	/* Tell UDC to enter Data Stage */
1970 	ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1971 
1972 	spin_unlock_irqrestore(&ep->lock, flags);
1973 	i = udc->driver->setup(&udc->gadget, &u.r);
1974 	spin_lock_irqsave(&ep->lock, flags);
1975 	if (i < 0)
1976 		goto stall;
1977 out:
1978 	spin_unlock_irqrestore(&ep->lock, flags);
1979 	return;
1980 stall:
1981 	ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1982 		udc_ep_readl(ep, UDCCSR), i);
1983 	ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
1984 	set_ep0state(udc, STALL);
1985 	goto out;
1986 }
1987 
1988 /**
1989  * handle_ep0 - Handle control endpoint data transfers
1990  * @udc: udc device
1991  * @fifo_irq: 1 if triggered by fifo service type irq
1992  * @opc_irq: 1 if triggered by output packet complete type irq
1993  *
1994  * Context : when in_interrupt() or with ep->lock held
1995  *
1996  * Tries to transfer all pending request data into the endpoint and/or
1997  * transfer all pending data in the endpoint into usb requests.
1998  * Handles states of ep0 automata.
1999  *
2000  * PXA27x hardware handles several standard usb control requests without
2001  * driver notification.  The requests fully handled by hardware are :
2002  *  SET_ADDRESS, SET_FEATURE, CLEAR_FEATURE, GET_CONFIGURATION, GET_INTERFACE,
2003  *  GET_STATUS
2004  * The requests handled by hardware, but with irq notification are :
2005  *  SYNCH_FRAME, SET_CONFIGURATION, SET_INTERFACE
2006  * The remaining standard requests really handled by handle_ep0 are :
2007  *  GET_DESCRIPTOR, SET_DESCRIPTOR, specific requests.
2008  * Requests standardized outside of USB 2.0 chapter 9 are handled more
2009  * uniformly, by gadget drivers.
2010  *
2011  * The control endpoint state machine is _not_ USB spec compliant, it's even
2012  * hardly compliant with Intel PXA270 developers guide.
2013  * The key points which inferred this state machine are :
2014  *   - on every setup token, bit UDCCSR0_SA is raised and held until cleared by
2015  *     software.
2016  *   - on every OUT packet received, UDCCSR0_OPC is raised and held until
2017  *     cleared by software.
2018  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
2019  *     before reading ep0.
2020  *     This is true only for PXA27x. This is not true anymore for PXA3xx family
2021  *     (check Back-to-Back setup packet in developers guide).
2022  *   - irq can be called on a "packet complete" event (opc_irq=1), while
2023  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
2024  *     from experimentation).
2025  *   - as UDCCSR0_SA can be activated while in irq handling, and clearing
2026  *     UDCCSR0_OPC would flush the setup data, we almost never clear UDCCSR0_OPC
2027  *     => we never actually read the "status stage" packet of an IN data stage
2028  *     => this is not documented in Intel documentation
2029  *   - hardware as no idea of STATUS STAGE, it only handle SETUP STAGE and DATA
2030  *     STAGE. The driver add STATUS STAGE to send last zero length packet in
2031  *     OUT_STATUS_STAGE.
2032  *   - special attention was needed for IN_STATUS_STAGE. If a packet complete
2033  *     event is detected, we terminate the status stage without ackowledging the
2034  *     packet (not to risk to loose a potential SETUP packet)
2035  */
handle_ep0(struct pxa_udc * udc,int fifo_irq,int opc_irq)2036 static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2037 {
2038 	u32			udccsr0;
2039 	struct pxa_ep		*ep = &udc->pxa_ep[0];
2040 	struct pxa27x_request	*req = NULL;
2041 	int			completed = 0;
2042 
2043 	if (!list_empty(&ep->queue))
2044 		req = list_entry(ep->queue.next, struct pxa27x_request, queue);
2045 
2046 	udccsr0 = udc_ep_readl(ep, UDCCSR);
2047 	ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n",
2048 		EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR),
2049 		(fifo_irq << 1 | opc_irq));
2050 
2051 	if (udccsr0 & UDCCSR0_SST) {
2052 		ep_dbg(ep, "clearing stall status\n");
2053 		nuke(ep, -EPIPE);
2054 		ep_write_UDCCSR(ep, UDCCSR0_SST);
2055 		ep0_idle(udc);
2056 	}
2057 
2058 	if (udccsr0 & UDCCSR0_SA) {
2059 		nuke(ep, 0);
2060 		set_ep0state(udc, SETUP_STAGE);
2061 	}
2062 
2063 	switch (udc->ep0state) {
2064 	case WAIT_FOR_SETUP:
2065 		/*
2066 		 * Hardware bug : beware, we cannot clear OPC, since we would
2067 		 * miss a potential OPC irq for a setup packet.
2068 		 * So, we only do ... nothing, and hope for a next irq with
2069 		 * UDCCSR0_SA set.
2070 		 */
2071 		break;
2072 	case SETUP_STAGE:
2073 		udccsr0 &= UDCCSR0_CTRL_REQ_MASK;
2074 		if (likely(udccsr0 == UDCCSR0_CTRL_REQ_MASK))
2075 			handle_ep0_ctrl_req(udc, req);
2076 		break;
2077 	case IN_DATA_STAGE:			/* GET_DESCRIPTOR */
2078 		if (epout_has_pkt(ep))
2079 			ep_write_UDCCSR(ep, UDCCSR0_OPC);
2080 		if (req && !ep_is_full(ep))
2081 			completed = write_ep0_fifo(ep, req);
2082 		if (completed)
2083 			ep0_end_in_req(ep, req, NULL);
2084 		break;
2085 	case OUT_DATA_STAGE:			/* SET_DESCRIPTOR */
2086 		if (epout_has_pkt(ep) && req)
2087 			completed = read_ep0_fifo(ep, req);
2088 		if (completed)
2089 			ep0_end_out_req(ep, req, NULL);
2090 		break;
2091 	case STALL:
2092 		ep_write_UDCCSR(ep, UDCCSR0_FST);
2093 		break;
2094 	case IN_STATUS_STAGE:
2095 		/*
2096 		 * Hardware bug : beware, we cannot clear OPC, since we would
2097 		 * miss a potential PC irq for a setup packet.
2098 		 * So, we only put the ep0 into WAIT_FOR_SETUP state.
2099 		 */
2100 		if (opc_irq)
2101 			ep0_idle(udc);
2102 		break;
2103 	case OUT_STATUS_STAGE:
2104 	case WAIT_ACK_SET_CONF_INTERF:
2105 		ep_warn(ep, "should never get in %s state here!!!\n",
2106 				EP0_STNAME(ep->dev));
2107 		ep0_idle(udc);
2108 		break;
2109 	}
2110 }
2111 
2112 /**
2113  * handle_ep - Handle endpoint data tranfers
2114  * @ep: pxa physical endpoint
2115  *
2116  * Tries to transfer all pending request data into the endpoint and/or
2117  * transfer all pending data in the endpoint into usb requests.
2118  *
2119  * Is always called when in_interrupt() and with ep->lock released.
2120  */
handle_ep(struct pxa_ep * ep)2121 static void handle_ep(struct pxa_ep *ep)
2122 {
2123 	struct pxa27x_request	*req;
2124 	int completed;
2125 	u32 udccsr;
2126 	int is_in = ep->dir_in;
2127 	int loop = 0;
2128 	unsigned long		flags;
2129 
2130 	spin_lock_irqsave(&ep->lock, flags);
2131 	if (ep->in_handle_ep)
2132 		goto recursion_detected;
2133 	ep->in_handle_ep = 1;
2134 
2135 	do {
2136 		completed = 0;
2137 		udccsr = udc_ep_readl(ep, UDCCSR);
2138 
2139 		if (likely(!list_empty(&ep->queue)))
2140 			req = list_entry(ep->queue.next,
2141 					struct pxa27x_request, queue);
2142 		else
2143 			req = NULL;
2144 
2145 		ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n",
2146 				req, udccsr, loop++);
2147 
2148 		if (unlikely(udccsr & (UDCCSR_SST | UDCCSR_TRN)))
2149 			udc_ep_writel(ep, UDCCSR,
2150 					udccsr & (UDCCSR_SST | UDCCSR_TRN));
2151 		if (!req)
2152 			break;
2153 
2154 		if (unlikely(is_in)) {
2155 			if (likely(!ep_is_full(ep)))
2156 				completed = write_fifo(ep, req);
2157 		} else {
2158 			if (likely(epout_has_pkt(ep)))
2159 				completed = read_fifo(ep, req);
2160 		}
2161 
2162 		if (completed) {
2163 			if (is_in)
2164 				ep_end_in_req(ep, req, &flags);
2165 			else
2166 				ep_end_out_req(ep, req, &flags);
2167 		}
2168 	} while (completed);
2169 
2170 	ep->in_handle_ep = 0;
2171 recursion_detected:
2172 	spin_unlock_irqrestore(&ep->lock, flags);
2173 }
2174 
2175 /**
2176  * pxa27x_change_configuration - Handle SET_CONF usb request notification
2177  * @udc: udc device
2178  * @config: usb configuration
2179  *
2180  * Post the request to upper level.
2181  * Don't use any pxa specific harware configuration capabilities
2182  */
pxa27x_change_configuration(struct pxa_udc * udc,int config)2183 static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
2184 {
2185 	struct usb_ctrlrequest req ;
2186 
2187 	dev_dbg(udc->dev, "config=%d\n", config);
2188 
2189 	udc->config = config;
2190 	udc->last_interface = 0;
2191 	udc->last_alternate = 0;
2192 
2193 	req.bRequestType = 0;
2194 	req.bRequest = USB_REQ_SET_CONFIGURATION;
2195 	req.wValue = config;
2196 	req.wIndex = 0;
2197 	req.wLength = 0;
2198 
2199 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2200 	udc->driver->setup(&udc->gadget, &req);
2201 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2202 }
2203 
2204 /**
2205  * pxa27x_change_interface - Handle SET_INTERF usb request notification
2206  * @udc: udc device
2207  * @iface: interface number
2208  * @alt: alternate setting number
2209  *
2210  * Post the request to upper level.
2211  * Don't use any pxa specific harware configuration capabilities
2212  */
pxa27x_change_interface(struct pxa_udc * udc,int iface,int alt)2213 static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
2214 {
2215 	struct usb_ctrlrequest  req;
2216 
2217 	dev_dbg(udc->dev, "interface=%d, alternate setting=%d\n", iface, alt);
2218 
2219 	udc->last_interface = iface;
2220 	udc->last_alternate = alt;
2221 
2222 	req.bRequestType = USB_RECIP_INTERFACE;
2223 	req.bRequest = USB_REQ_SET_INTERFACE;
2224 	req.wValue = alt;
2225 	req.wIndex = iface;
2226 	req.wLength = 0;
2227 
2228 	set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2229 	udc->driver->setup(&udc->gadget, &req);
2230 	ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2231 }
2232 
2233 /*
2234  * irq_handle_data - Handle data transfer
2235  * @irq: irq IRQ number
2236  * @udc: dev pxa_udc device structure
2237  *
2238  * Called from irq handler, transferts data to or from endpoint to queue
2239  */
irq_handle_data(int irq,struct pxa_udc * udc)2240 static void irq_handle_data(int irq, struct pxa_udc *udc)
2241 {
2242 	int i;
2243 	struct pxa_ep *ep;
2244 	u32 udcisr0 = udc_readl(udc, UDCISR0) & UDCCISR0_EP_MASK;
2245 	u32 udcisr1 = udc_readl(udc, UDCISR1) & UDCCISR1_EP_MASK;
2246 
2247 	if (udcisr0 & UDCISR_INT_MASK) {
2248 		udc->pxa_ep[0].stats.irqs++;
2249 		udc_writel(udc, UDCISR0, UDCISR_INT(0, UDCISR_INT_MASK));
2250 		handle_ep0(udc, !!(udcisr0 & UDCICR_FIFOERR),
2251 				!!(udcisr0 & UDCICR_PKTCOMPL));
2252 	}
2253 
2254 	udcisr0 >>= 2;
2255 	for (i = 1; udcisr0 != 0 && i < 16; udcisr0 >>= 2, i++) {
2256 		if (!(udcisr0 & UDCISR_INT_MASK))
2257 			continue;
2258 
2259 		udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2260 
2261 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2262 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2263 			ep = &udc->pxa_ep[i];
2264 			ep->stats.irqs++;
2265 			handle_ep(ep);
2266 		}
2267 	}
2268 
2269 	for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
2270 		udc_writel(udc, UDCISR1, UDCISR_INT(i - 16, UDCISR_INT_MASK));
2271 		if (!(udcisr1 & UDCISR_INT_MASK))
2272 			continue;
2273 
2274 		WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2275 		if (i < ARRAY_SIZE(udc->pxa_ep)) {
2276 			ep = &udc->pxa_ep[i];
2277 			ep->stats.irqs++;
2278 			handle_ep(ep);
2279 		}
2280 	}
2281 
2282 }
2283 
2284 /**
2285  * irq_udc_suspend - Handle IRQ "UDC Suspend"
2286  * @udc: udc device
2287  */
irq_udc_suspend(struct pxa_udc * udc)2288 static void irq_udc_suspend(struct pxa_udc *udc)
2289 {
2290 	udc_writel(udc, UDCISR1, UDCISR1_IRSU);
2291 	udc->stats.irqs_suspend++;
2292 
2293 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2294 			&& udc->driver && udc->driver->suspend)
2295 		udc->driver->suspend(&udc->gadget);
2296 	ep0_idle(udc);
2297 }
2298 
2299 /**
2300   * irq_udc_resume - Handle IRQ "UDC Resume"
2301   * @udc: udc device
2302   */
irq_udc_resume(struct pxa_udc * udc)2303 static void irq_udc_resume(struct pxa_udc *udc)
2304 {
2305 	udc_writel(udc, UDCISR1, UDCISR1_IRRU);
2306 	udc->stats.irqs_resume++;
2307 
2308 	if (udc->gadget.speed != USB_SPEED_UNKNOWN
2309 			&& udc->driver && udc->driver->resume)
2310 		udc->driver->resume(&udc->gadget);
2311 }
2312 
2313 /**
2314  * irq_udc_reconfig - Handle IRQ "UDC Change Configuration"
2315  * @udc: udc device
2316  */
irq_udc_reconfig(struct pxa_udc * udc)2317 static void irq_udc_reconfig(struct pxa_udc *udc)
2318 {
2319 	unsigned config, interface, alternate, config_change;
2320 	u32 udccr = udc_readl(udc, UDCCR);
2321 
2322 	udc_writel(udc, UDCISR1, UDCISR1_IRCC);
2323 	udc->stats.irqs_reconfig++;
2324 
2325 	config = (udccr & UDCCR_ACN) >> UDCCR_ACN_S;
2326 	config_change = (config != udc->config);
2327 	pxa27x_change_configuration(udc, config);
2328 
2329 	interface = (udccr & UDCCR_AIN) >> UDCCR_AIN_S;
2330 	alternate = (udccr & UDCCR_AAISN) >> UDCCR_AAISN_S;
2331 	pxa27x_change_interface(udc, interface, alternate);
2332 
2333 	if (config_change)
2334 		update_pxa_ep_matches(udc);
2335 	udc_set_mask_UDCCR(udc, UDCCR_SMAC);
2336 }
2337 
2338 /**
2339  * irq_udc_reset - Handle IRQ "UDC Reset"
2340  * @udc: udc device
2341  */
irq_udc_reset(struct pxa_udc * udc)2342 static void irq_udc_reset(struct pxa_udc *udc)
2343 {
2344 	u32 udccr = udc_readl(udc, UDCCR);
2345 	struct pxa_ep *ep = &udc->pxa_ep[0];
2346 
2347 	dev_info(udc->dev, "USB reset\n");
2348 	udc_writel(udc, UDCISR1, UDCISR1_IRRS);
2349 	udc->stats.irqs_reset++;
2350 
2351 	if ((udccr & UDCCR_UDA) == 0) {
2352 		dev_dbg(udc->dev, "USB reset start\n");
2353 		stop_activity(udc, udc->driver);
2354 	}
2355 	udc->gadget.speed = USB_SPEED_FULL;
2356 	memset(&udc->stats, 0, sizeof udc->stats);
2357 
2358 	nuke(ep, -EPROTO);
2359 	ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
2360 	ep0_idle(udc);
2361 }
2362 
2363 /**
2364  * pxa_udc_irq - Main irq handler
2365  * @irq: irq number
2366  * @_dev: udc device
2367  *
2368  * Handles all udc interrupts
2369  */
pxa_udc_irq(int irq,void * _dev)2370 static irqreturn_t pxa_udc_irq(int irq, void *_dev)
2371 {
2372 	struct pxa_udc *udc = _dev;
2373 	u32 udcisr0 = udc_readl(udc, UDCISR0);
2374 	u32 udcisr1 = udc_readl(udc, UDCISR1);
2375 	u32 udccr = udc_readl(udc, UDCCR);
2376 	u32 udcisr1_spec;
2377 
2378 	dev_vdbg(udc->dev, "Interrupt, UDCISR0:0x%08x, UDCISR1:0x%08x, "
2379 		 "UDCCR:0x%08x\n", udcisr0, udcisr1, udccr);
2380 
2381 	udcisr1_spec = udcisr1 & 0xf8000000;
2382 	if (unlikely(udcisr1_spec & UDCISR1_IRSU))
2383 		irq_udc_suspend(udc);
2384 	if (unlikely(udcisr1_spec & UDCISR1_IRRU))
2385 		irq_udc_resume(udc);
2386 	if (unlikely(udcisr1_spec & UDCISR1_IRCC))
2387 		irq_udc_reconfig(udc);
2388 	if (unlikely(udcisr1_spec & UDCISR1_IRRS))
2389 		irq_udc_reset(udc);
2390 
2391 	if ((udcisr0 & UDCCISR0_EP_MASK) | (udcisr1 & UDCCISR1_EP_MASK))
2392 		irq_handle_data(irq, udc);
2393 
2394 	return IRQ_HANDLED;
2395 }
2396 
2397 static struct pxa_udc memory = {
2398 	.gadget = {
2399 		.ops		= &pxa_udc_ops,
2400 		.ep0		= &memory.udc_usb_ep[0].usb_ep,
2401 		.name		= driver_name,
2402 		.dev = {
2403 			.init_name	= "gadget",
2404 		},
2405 	},
2406 
2407 	.udc_usb_ep = {
2408 		USB_EP_CTRL,
2409 		USB_EP_OUT_BULK(1),
2410 		USB_EP_IN_BULK(2),
2411 		USB_EP_IN_ISO(3),
2412 		USB_EP_OUT_ISO(4),
2413 		USB_EP_IN_INT(5),
2414 	},
2415 
2416 	.pxa_ep = {
2417 		PXA_EP_CTRL,
2418 		/* Endpoints for gadget zero */
2419 		PXA_EP_OUT_BULK(1, 1, 3, 0, 0),
2420 		PXA_EP_IN_BULK(2,  2, 3, 0, 0),
2421 		/* Endpoints for ether gadget, file storage gadget */
2422 		PXA_EP_OUT_BULK(3, 1, 1, 0, 0),
2423 		PXA_EP_IN_BULK(4,  2, 1, 0, 0),
2424 		PXA_EP_IN_ISO(5,   3, 1, 0, 0),
2425 		PXA_EP_OUT_ISO(6,  4, 1, 0, 0),
2426 		PXA_EP_IN_INT(7,   5, 1, 0, 0),
2427 		/* Endpoints for RNDIS, serial */
2428 		PXA_EP_OUT_BULK(8, 1, 2, 0, 0),
2429 		PXA_EP_IN_BULK(9,  2, 2, 0, 0),
2430 		PXA_EP_IN_INT(10,  5, 2, 0, 0),
2431 		/*
2432 		 * All the following endpoints are only for completion.  They
2433 		 * won't never work, as multiple interfaces are really broken on
2434 		 * the pxa.
2435 		*/
2436 		PXA_EP_OUT_BULK(11, 1, 2, 1, 0),
2437 		PXA_EP_IN_BULK(12,  2, 2, 1, 0),
2438 		/* Endpoint for CDC Ether */
2439 		PXA_EP_OUT_BULK(13, 1, 1, 1, 1),
2440 		PXA_EP_IN_BULK(14,  2, 1, 1, 1),
2441 	}
2442 };
2443 
2444 /**
2445  * pxa_udc_probe - probes the udc device
2446  * @_dev: platform device
2447  *
2448  * Perform basic init : allocates udc clock, creates sysfs files, requests
2449  * irq.
2450  */
pxa_udc_probe(struct platform_device * pdev)2451 static int __init pxa_udc_probe(struct platform_device *pdev)
2452 {
2453 	struct resource *regs;
2454 	struct pxa_udc *udc = &memory;
2455 	int retval = 0, gpio;
2456 
2457 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2458 	if (!regs)
2459 		return -ENXIO;
2460 	udc->irq = platform_get_irq(pdev, 0);
2461 	if (udc->irq < 0)
2462 		return udc->irq;
2463 
2464 	udc->dev = &pdev->dev;
2465 	udc->mach = pdev->dev.platform_data;
2466 	udc->transceiver = otg_get_transceiver();
2467 
2468 	gpio = udc->mach->gpio_pullup;
2469 	if (gpio_is_valid(gpio)) {
2470 		retval = gpio_request(gpio, "USB D+ pullup");
2471 		if (retval == 0)
2472 			gpio_direction_output(gpio,
2473 				       udc->mach->gpio_pullup_inverted);
2474 	}
2475 	if (retval) {
2476 		dev_err(&pdev->dev, "Couldn't request gpio %d : %d\n",
2477 			gpio, retval);
2478 		return retval;
2479 	}
2480 
2481 	udc->clk = clk_get(&pdev->dev, NULL);
2482 	if (IS_ERR(udc->clk)) {
2483 		retval = PTR_ERR(udc->clk);
2484 		goto err_clk;
2485 	}
2486 
2487 	retval = -ENOMEM;
2488 	udc->regs = ioremap(regs->start, resource_size(regs));
2489 	if (!udc->regs) {
2490 		dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
2491 		goto err_map;
2492 	}
2493 
2494 	device_initialize(&udc->gadget.dev);
2495 	udc->gadget.dev.parent = &pdev->dev;
2496 	udc->gadget.dev.dma_mask = NULL;
2497 	udc->vbus_sensed = 0;
2498 
2499 	the_controller = udc;
2500 	platform_set_drvdata(pdev, udc);
2501 	udc_init_data(udc);
2502 	pxa_eps_setup(udc);
2503 
2504 	/* irq setup after old hardware state is cleaned up */
2505 	retval = request_irq(udc->irq, pxa_udc_irq,
2506 			IRQF_SHARED, driver_name, udc);
2507 	if (retval != 0) {
2508 		dev_err(udc->dev, "%s: can't get irq %i, err %d\n",
2509 			driver_name, IRQ_USB, retval);
2510 		goto err_irq;
2511 	}
2512 	retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2513 	if (retval)
2514 		goto err_add_udc;
2515 
2516 	pxa_init_debugfs(udc);
2517 	return 0;
2518 err_add_udc:
2519 	free_irq(udc->irq, udc);
2520 err_irq:
2521 	iounmap(udc->regs);
2522 err_map:
2523 	clk_put(udc->clk);
2524 	udc->clk = NULL;
2525 err_clk:
2526 	return retval;
2527 }
2528 
2529 /**
2530  * pxa_udc_remove - removes the udc device driver
2531  * @_dev: platform device
2532  */
pxa_udc_remove(struct platform_device * _dev)2533 static int __exit pxa_udc_remove(struct platform_device *_dev)
2534 {
2535 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2536 	int gpio = udc->mach->gpio_pullup;
2537 
2538 	usb_del_gadget_udc(&udc->gadget);
2539 	usb_gadget_unregister_driver(udc->driver);
2540 	free_irq(udc->irq, udc);
2541 	pxa_cleanup_debugfs(udc);
2542 	if (gpio_is_valid(gpio))
2543 		gpio_free(gpio);
2544 
2545 	otg_put_transceiver(udc->transceiver);
2546 
2547 	udc->transceiver = NULL;
2548 	platform_set_drvdata(_dev, NULL);
2549 	the_controller = NULL;
2550 	clk_put(udc->clk);
2551 	iounmap(udc->regs);
2552 
2553 	return 0;
2554 }
2555 
pxa_udc_shutdown(struct platform_device * _dev)2556 static void pxa_udc_shutdown(struct platform_device *_dev)
2557 {
2558 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2559 
2560 	if (udc_readl(udc, UDCCR) & UDCCR_UDE)
2561 		udc_disable(udc);
2562 }
2563 
2564 #ifdef CONFIG_PXA27x
2565 extern void pxa27x_clear_otgph(void);
2566 #else
2567 #define pxa27x_clear_otgph()   do {} while (0)
2568 #endif
2569 
2570 #ifdef CONFIG_PM
2571 /**
2572  * pxa_udc_suspend - Suspend udc device
2573  * @_dev: platform device
2574  * @state: suspend state
2575  *
2576  * Suspends udc : saves configuration registers (UDCCR*), then disables the udc
2577  * device.
2578  */
pxa_udc_suspend(struct platform_device * _dev,pm_message_t state)2579 static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2580 {
2581 	int i;
2582 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2583 	struct pxa_ep *ep;
2584 
2585 	ep = &udc->pxa_ep[0];
2586 	udc->udccsr0 = udc_ep_readl(ep, UDCCSR);
2587 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
2588 		ep = &udc->pxa_ep[i];
2589 		ep->udccsr_value = udc_ep_readl(ep, UDCCSR);
2590 		ep->udccr_value  = udc_ep_readl(ep, UDCCR);
2591 		ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
2592 				ep->udccsr_value, ep->udccr_value);
2593 	}
2594 
2595 	udc_disable(udc);
2596 	udc->pullup_resume = udc->pullup_on;
2597 	dplus_pullup(udc, 0);
2598 
2599 	return 0;
2600 }
2601 
2602 /**
2603  * pxa_udc_resume - Resume udc device
2604  * @_dev: platform device
2605  *
2606  * Resumes udc : restores configuration registers (UDCCR*), then enables the udc
2607  * device.
2608  */
pxa_udc_resume(struct platform_device * _dev)2609 static int pxa_udc_resume(struct platform_device *_dev)
2610 {
2611 	int i;
2612 	struct pxa_udc *udc = platform_get_drvdata(_dev);
2613 	struct pxa_ep *ep;
2614 
2615 	ep = &udc->pxa_ep[0];
2616 	udc_ep_writel(ep, UDCCSR, udc->udccsr0 & (UDCCSR0_FST | UDCCSR0_DME));
2617 	for (i = 1; i < NR_PXA_ENDPOINTS; i++) {
2618 		ep = &udc->pxa_ep[i];
2619 		udc_ep_writel(ep, UDCCSR, ep->udccsr_value);
2620 		udc_ep_writel(ep, UDCCR,  ep->udccr_value);
2621 		ep_dbg(ep, "udccsr:0x%03x, udccr:0x%x\n",
2622 				ep->udccsr_value, ep->udccr_value);
2623 	}
2624 
2625 	dplus_pullup(udc, udc->pullup_resume);
2626 	if (should_enable_udc(udc))
2627 		udc_enable(udc);
2628 	/*
2629 	 * We do not handle OTG yet.
2630 	 *
2631 	 * OTGPH bit is set when sleep mode is entered.
2632 	 * it indicates that OTG pad is retaining its state.
2633 	 * Upon exit from sleep mode and before clearing OTGPH,
2634 	 * Software must configure the USB OTG pad, UDC, and UHC
2635 	 * to the state they were in before entering sleep mode.
2636 	 */
2637 	pxa27x_clear_otgph();
2638 
2639 	return 0;
2640 }
2641 #endif
2642 
2643 /* work with hotplug and coldplug */
2644 MODULE_ALIAS("platform:pxa27x-udc");
2645 
2646 static struct platform_driver udc_driver = {
2647 	.driver		= {
2648 		.name	= "pxa27x-udc",
2649 		.owner	= THIS_MODULE,
2650 	},
2651 	.remove		= __exit_p(pxa_udc_remove),
2652 	.shutdown	= pxa_udc_shutdown,
2653 #ifdef CONFIG_PM
2654 	.suspend	= pxa_udc_suspend,
2655 	.resume		= pxa_udc_resume
2656 #endif
2657 };
2658 
udc_init(void)2659 static int __init udc_init(void)
2660 {
2661 	if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
2662 		return -ENODEV;
2663 
2664 	printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
2665 	return platform_driver_probe(&udc_driver, pxa_udc_probe);
2666 }
2667 module_init(udc_init);
2668 
2669 
udc_exit(void)2670 static void __exit udc_exit(void)
2671 {
2672 	platform_driver_unregister(&udc_driver);
2673 }
2674 module_exit(udc_exit);
2675 
2676 MODULE_DESCRIPTION(DRIVER_DESC);
2677 MODULE_AUTHOR("Robert Jarzmik");
2678 MODULE_LICENSE("GPL");
2679