1 /*
2  * Intel Langwell USB Device Controller driver
3  * Copyright (C) 2008-2009, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  */
9 
10 
11 /* #undef	DEBUG */
12 /* #undef	VERBOSE_DEBUG */
13 
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/list.h>
26 #include <linux/interrupt.h>
27 #include <linux/moduleparam.h>
28 #include <linux/device.h>
29 #include <linux/usb/ch9.h>
30 #include <linux/usb/gadget.h>
31 #include <linux/usb/otg.h>
32 #include <linux/pm.h>
33 #include <linux/io.h>
34 #include <linux/irq.h>
35 #include <asm/system.h>
36 #include <asm/unaligned.h>
37 
38 #include "langwell_udc.h"
39 
40 
41 #define	DRIVER_DESC		"Intel Langwell USB Device Controller driver"
42 #define	DRIVER_VERSION		"16 May 2009"
43 
44 static const char driver_name[] = "langwell_udc";
45 static const char driver_desc[] = DRIVER_DESC;
46 
47 
48 /* for endpoint 0 operations */
49 static const struct usb_endpoint_descriptor
50 langwell_ep0_desc = {
51 	.bLength =		USB_DT_ENDPOINT_SIZE,
52 	.bDescriptorType =	USB_DT_ENDPOINT,
53 	.bEndpointAddress =	0,
54 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
55 	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
56 };
57 
58 
59 /*-------------------------------------------------------------------------*/
60 /* debugging */
61 
62 #ifdef	VERBOSE_DEBUG
print_all_registers(struct langwell_udc * dev)63 static inline void print_all_registers(struct langwell_udc *dev)
64 {
65 	int	i;
66 
67 	/* Capability Registers */
68 	dev_dbg(&dev->pdev->dev,
69 		"Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
70 		CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
71 	dev_dbg(&dev->pdev->dev, "caplength=0x%02x\n",
72 			readb(&dev->cap_regs->caplength));
73 	dev_dbg(&dev->pdev->dev, "hciversion=0x%04x\n",
74 			readw(&dev->cap_regs->hciversion));
75 	dev_dbg(&dev->pdev->dev, "hcsparams=0x%08x\n",
76 			readl(&dev->cap_regs->hcsparams));
77 	dev_dbg(&dev->pdev->dev, "hccparams=0x%08x\n",
78 			readl(&dev->cap_regs->hccparams));
79 	dev_dbg(&dev->pdev->dev, "dciversion=0x%04x\n",
80 			readw(&dev->cap_regs->dciversion));
81 	dev_dbg(&dev->pdev->dev, "dccparams=0x%08x\n",
82 			readl(&dev->cap_regs->dccparams));
83 
84 	/* Operational Registers */
85 	dev_dbg(&dev->pdev->dev,
86 		"Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
87 		OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
88 	dev_dbg(&dev->pdev->dev, "extsts=0x%08x\n",
89 			readl(&dev->op_regs->extsts));
90 	dev_dbg(&dev->pdev->dev, "extintr=0x%08x\n",
91 			readl(&dev->op_regs->extintr));
92 	dev_dbg(&dev->pdev->dev, "usbcmd=0x%08x\n",
93 			readl(&dev->op_regs->usbcmd));
94 	dev_dbg(&dev->pdev->dev, "usbsts=0x%08x\n",
95 			readl(&dev->op_regs->usbsts));
96 	dev_dbg(&dev->pdev->dev, "usbintr=0x%08x\n",
97 			readl(&dev->op_regs->usbintr));
98 	dev_dbg(&dev->pdev->dev, "frindex=0x%08x\n",
99 			readl(&dev->op_regs->frindex));
100 	dev_dbg(&dev->pdev->dev, "ctrldssegment=0x%08x\n",
101 			readl(&dev->op_regs->ctrldssegment));
102 	dev_dbg(&dev->pdev->dev, "deviceaddr=0x%08x\n",
103 			readl(&dev->op_regs->deviceaddr));
104 	dev_dbg(&dev->pdev->dev, "endpointlistaddr=0x%08x\n",
105 			readl(&dev->op_regs->endpointlistaddr));
106 	dev_dbg(&dev->pdev->dev, "ttctrl=0x%08x\n",
107 			readl(&dev->op_regs->ttctrl));
108 	dev_dbg(&dev->pdev->dev, "burstsize=0x%08x\n",
109 			readl(&dev->op_regs->burstsize));
110 	dev_dbg(&dev->pdev->dev, "txfilltuning=0x%08x\n",
111 			readl(&dev->op_regs->txfilltuning));
112 	dev_dbg(&dev->pdev->dev, "txttfilltuning=0x%08x\n",
113 			readl(&dev->op_regs->txttfilltuning));
114 	dev_dbg(&dev->pdev->dev, "ic_usb=0x%08x\n",
115 			readl(&dev->op_regs->ic_usb));
116 	dev_dbg(&dev->pdev->dev, "ulpi_viewport=0x%08x\n",
117 			readl(&dev->op_regs->ulpi_viewport));
118 	dev_dbg(&dev->pdev->dev, "configflag=0x%08x\n",
119 			readl(&dev->op_regs->configflag));
120 	dev_dbg(&dev->pdev->dev, "portsc1=0x%08x\n",
121 			readl(&dev->op_regs->portsc1));
122 	dev_dbg(&dev->pdev->dev, "devlc=0x%08x\n",
123 			readl(&dev->op_regs->devlc));
124 	dev_dbg(&dev->pdev->dev, "otgsc=0x%08x\n",
125 			readl(&dev->op_regs->otgsc));
126 	dev_dbg(&dev->pdev->dev, "usbmode=0x%08x\n",
127 			readl(&dev->op_regs->usbmode));
128 	dev_dbg(&dev->pdev->dev, "endptnak=0x%08x\n",
129 			readl(&dev->op_regs->endptnak));
130 	dev_dbg(&dev->pdev->dev, "endptnaken=0x%08x\n",
131 			readl(&dev->op_regs->endptnaken));
132 	dev_dbg(&dev->pdev->dev, "endptsetupstat=0x%08x\n",
133 			readl(&dev->op_regs->endptsetupstat));
134 	dev_dbg(&dev->pdev->dev, "endptprime=0x%08x\n",
135 			readl(&dev->op_regs->endptprime));
136 	dev_dbg(&dev->pdev->dev, "endptflush=0x%08x\n",
137 			readl(&dev->op_regs->endptflush));
138 	dev_dbg(&dev->pdev->dev, "endptstat=0x%08x\n",
139 			readl(&dev->op_regs->endptstat));
140 	dev_dbg(&dev->pdev->dev, "endptcomplete=0x%08x\n",
141 			readl(&dev->op_regs->endptcomplete));
142 
143 	for (i = 0; i < dev->ep_max / 2; i++) {
144 		dev_dbg(&dev->pdev->dev, "endptctrl[%d]=0x%08x\n",
145 				i, readl(&dev->op_regs->endptctrl[i]));
146 	}
147 }
148 #else
149 
150 #define	print_all_registers(dev)	do { } while (0)
151 
152 #endif /* VERBOSE_DEBUG */
153 
154 
155 /*-------------------------------------------------------------------------*/
156 
157 #define	is_in(ep)	(((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir ==	\
158 			USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
159 
160 #define	DIR_STRING(ep)	(is_in(ep) ? "in" : "out")
161 
162 
type_string(const struct usb_endpoint_descriptor * desc)163 static char *type_string(const struct usb_endpoint_descriptor *desc)
164 {
165 	switch (usb_endpoint_type(desc)) {
166 	case USB_ENDPOINT_XFER_BULK:
167 		return "bulk";
168 	case USB_ENDPOINT_XFER_ISOC:
169 		return "iso";
170 	case USB_ENDPOINT_XFER_INT:
171 		return "int";
172 	};
173 
174 	return "control";
175 }
176 
177 
178 /* configure endpoint control registers */
ep_reset(struct langwell_ep * ep,unsigned char ep_num,unsigned char is_in,unsigned char ep_type)179 static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
180 		unsigned char is_in, unsigned char ep_type)
181 {
182 	struct langwell_udc	*dev;
183 	u32			endptctrl;
184 
185 	dev = ep->dev;
186 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
187 
188 	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
189 	if (is_in) {	/* TX */
190 		if (ep_num)
191 			endptctrl |= EPCTRL_TXR;
192 		endptctrl |= EPCTRL_TXE;
193 		endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
194 	} else {	/* RX */
195 		if (ep_num)
196 			endptctrl |= EPCTRL_RXR;
197 		endptctrl |= EPCTRL_RXE;
198 		endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
199 	}
200 
201 	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
202 
203 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
204 }
205 
206 
207 /* reset ep0 dQH and endptctrl */
ep0_reset(struct langwell_udc * dev)208 static void ep0_reset(struct langwell_udc *dev)
209 {
210 	struct langwell_ep	*ep;
211 	int			i;
212 
213 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
214 
215 	/* ep0 in and out */
216 	for (i = 0; i < 2; i++) {
217 		ep = &dev->ep[i];
218 		ep->dev = dev;
219 
220 		/* ep0 dQH */
221 		ep->dqh = &dev->ep_dqh[i];
222 
223 		/* configure ep0 endpoint capabilities in dQH */
224 		ep->dqh->dqh_ios = 1;
225 		ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
226 
227 		/* enable ep0-in HW zero length termination select */
228 		if (is_in(ep))
229 			ep->dqh->dqh_zlt = 0;
230 		ep->dqh->dqh_mult = 0;
231 
232 		ep->dqh->dtd_next = DTD_TERM;
233 
234 		/* configure ep0 control registers */
235 		ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
236 	}
237 
238 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
239 }
240 
241 
242 /*-------------------------------------------------------------------------*/
243 
244 /* endpoints operations */
245 
246 /* configure endpoint, making it usable */
langwell_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)247 static int langwell_ep_enable(struct usb_ep *_ep,
248 		const struct usb_endpoint_descriptor *desc)
249 {
250 	struct langwell_udc	*dev;
251 	struct langwell_ep	*ep;
252 	u16			max = 0;
253 	unsigned long		flags;
254 	int			i, retval = 0;
255 	unsigned char		zlt, ios = 0, mult = 0;
256 
257 	ep = container_of(_ep, struct langwell_ep, ep);
258 	dev = ep->dev;
259 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
260 
261 	if (!_ep || !desc || ep->desc
262 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
263 		return -EINVAL;
264 
265 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
266 		return -ESHUTDOWN;
267 
268 	max = usb_endpoint_maxp(desc);
269 
270 	/*
271 	 * disable HW zero length termination select
272 	 * driver handles zero length packet through req->req.zero
273 	 */
274 	zlt = 1;
275 
276 	/*
277 	 * sanity check type, direction, address, and then
278 	 * initialize the endpoint capabilities fields in dQH
279 	 */
280 	switch (usb_endpoint_type(desc)) {
281 	case USB_ENDPOINT_XFER_CONTROL:
282 		ios = 1;
283 		break;
284 	case USB_ENDPOINT_XFER_BULK:
285 		if ((dev->gadget.speed == USB_SPEED_HIGH
286 					&& max != 512)
287 				|| (dev->gadget.speed == USB_SPEED_FULL
288 					&& max > 64)) {
289 			goto done;
290 		}
291 		break;
292 	case USB_ENDPOINT_XFER_INT:
293 		if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
294 			goto done;
295 
296 		switch (dev->gadget.speed) {
297 		case USB_SPEED_HIGH:
298 			if (max <= 1024)
299 				break;
300 		case USB_SPEED_FULL:
301 			if (max <= 64)
302 				break;
303 		default:
304 			if (max <= 8)
305 				break;
306 			goto done;
307 		}
308 		break;
309 	case USB_ENDPOINT_XFER_ISOC:
310 		if (strstr(ep->ep.name, "-bulk")
311 				|| strstr(ep->ep.name, "-int"))
312 			goto done;
313 
314 		switch (dev->gadget.speed) {
315 		case USB_SPEED_HIGH:
316 			if (max <= 1024)
317 				break;
318 		case USB_SPEED_FULL:
319 			if (max <= 1023)
320 				break;
321 		default:
322 			goto done;
323 		}
324 		/*
325 		 * FIXME:
326 		 * calculate transactions needed for high bandwidth iso
327 		 */
328 		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
329 		max = max & 0x8ff;	/* bit 0~10 */
330 		/* 3 transactions at most */
331 		if (mult > 3)
332 			goto done;
333 		break;
334 	default:
335 		goto done;
336 	}
337 
338 	spin_lock_irqsave(&dev->lock, flags);
339 
340 	ep->ep.maxpacket = max;
341 	ep->desc = desc;
342 	ep->stopped = 0;
343 	ep->ep_num = usb_endpoint_num(desc);
344 
345 	/* ep_type */
346 	ep->ep_type = usb_endpoint_type(desc);
347 
348 	/* configure endpoint control registers */
349 	ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
350 
351 	/* configure endpoint capabilities in dQH */
352 	i = ep->ep_num * 2 + is_in(ep);
353 	ep->dqh = &dev->ep_dqh[i];
354 	ep->dqh->dqh_ios = ios;
355 	ep->dqh->dqh_mpl = cpu_to_le16(max);
356 	ep->dqh->dqh_zlt = zlt;
357 	ep->dqh->dqh_mult = mult;
358 	ep->dqh->dtd_next = DTD_TERM;
359 
360 	dev_dbg(&dev->pdev->dev, "enabled %s (ep%d%s-%s), max %04x\n",
361 			_ep->name,
362 			ep->ep_num,
363 			DIR_STRING(ep),
364 			type_string(desc),
365 			max);
366 
367 	spin_unlock_irqrestore(&dev->lock, flags);
368 done:
369 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
370 	return retval;
371 }
372 
373 
374 /*-------------------------------------------------------------------------*/
375 
376 /* retire a request */
done(struct langwell_ep * ep,struct langwell_request * req,int status)377 static void done(struct langwell_ep *ep, struct langwell_request *req,
378 		int status)
379 {
380 	struct langwell_udc	*dev = ep->dev;
381 	unsigned		stopped = ep->stopped;
382 	struct langwell_dtd	*curr_dtd, *next_dtd;
383 	int			i;
384 
385 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
386 
387 	/* remove the req from ep->queue */
388 	list_del_init(&req->queue);
389 
390 	if (req->req.status == -EINPROGRESS)
391 		req->req.status = status;
392 	else
393 		status = req->req.status;
394 
395 	/* free dTD for the request */
396 	next_dtd = req->head;
397 	for (i = 0; i < req->dtd_count; i++) {
398 		curr_dtd = next_dtd;
399 		if (i != req->dtd_count - 1)
400 			next_dtd = curr_dtd->next_dtd_virt;
401 		dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
402 	}
403 
404 	if (req->mapped) {
405 		dma_unmap_single(&dev->pdev->dev,
406 			req->req.dma, req->req.length,
407 			is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
408 		req->req.dma = DMA_ADDR_INVALID;
409 		req->mapped = 0;
410 	} else
411 		dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
412 				req->req.length,
413 				is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
414 
415 	if (status != -ESHUTDOWN)
416 		dev_dbg(&dev->pdev->dev,
417 				"complete %s, req %p, stat %d, len %u/%u\n",
418 				ep->ep.name, &req->req, status,
419 				req->req.actual, req->req.length);
420 
421 	/* don't modify queue heads during completion callback */
422 	ep->stopped = 1;
423 
424 	spin_unlock(&dev->lock);
425 	/* complete routine from gadget driver */
426 	if (req->req.complete)
427 		req->req.complete(&ep->ep, &req->req);
428 
429 	spin_lock(&dev->lock);
430 	ep->stopped = stopped;
431 
432 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
433 }
434 
435 
436 static void langwell_ep_fifo_flush(struct usb_ep *_ep);
437 
438 /* delete all endpoint requests, called with spinlock held */
nuke(struct langwell_ep * ep,int status)439 static void nuke(struct langwell_ep *ep, int status)
440 {
441 	/* called with spinlock held */
442 	ep->stopped = 1;
443 
444 	/* endpoint fifo flush */
445 	if (&ep->ep && ep->desc)
446 		langwell_ep_fifo_flush(&ep->ep);
447 
448 	while (!list_empty(&ep->queue)) {
449 		struct langwell_request	*req = NULL;
450 		req = list_entry(ep->queue.next, struct langwell_request,
451 				queue);
452 		done(ep, req, status);
453 	}
454 }
455 
456 
457 /*-------------------------------------------------------------------------*/
458 
459 /* endpoint is no longer usable */
langwell_ep_disable(struct usb_ep * _ep)460 static int langwell_ep_disable(struct usb_ep *_ep)
461 {
462 	struct langwell_ep	*ep;
463 	unsigned long		flags;
464 	struct langwell_udc	*dev;
465 	int			ep_num;
466 	u32			endptctrl;
467 
468 	ep = container_of(_ep, struct langwell_ep, ep);
469 	dev = ep->dev;
470 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
471 
472 	if (!_ep || !ep->desc)
473 		return -EINVAL;
474 
475 	spin_lock_irqsave(&dev->lock, flags);
476 
477 	/* disable endpoint control register */
478 	ep_num = ep->ep_num;
479 	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
480 	if (is_in(ep))
481 		endptctrl &= ~EPCTRL_TXE;
482 	else
483 		endptctrl &= ~EPCTRL_RXE;
484 	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
485 
486 	/* nuke all pending requests (does flush) */
487 	nuke(ep, -ESHUTDOWN);
488 
489 	ep->desc = NULL;
490 	ep->stopped = 1;
491 
492 	spin_unlock_irqrestore(&dev->lock, flags);
493 
494 	dev_dbg(&dev->pdev->dev, "disabled %s\n", _ep->name);
495 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
496 
497 	return 0;
498 }
499 
500 
501 /* allocate a request object to use with this endpoint */
langwell_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)502 static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
503 		gfp_t gfp_flags)
504 {
505 	struct langwell_ep	*ep;
506 	struct langwell_udc	*dev;
507 	struct langwell_request	*req = NULL;
508 
509 	if (!_ep)
510 		return NULL;
511 
512 	ep = container_of(_ep, struct langwell_ep, ep);
513 	dev = ep->dev;
514 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
515 
516 	req = kzalloc(sizeof(*req), gfp_flags);
517 	if (!req)
518 		return NULL;
519 
520 	req->req.dma = DMA_ADDR_INVALID;
521 	INIT_LIST_HEAD(&req->queue);
522 
523 	dev_vdbg(&dev->pdev->dev, "alloc request for %s\n", _ep->name);
524 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
525 	return &req->req;
526 }
527 
528 
529 /* free a request object */
langwell_free_request(struct usb_ep * _ep,struct usb_request * _req)530 static void langwell_free_request(struct usb_ep *_ep,
531 		struct usb_request *_req)
532 {
533 	struct langwell_ep	*ep;
534 	struct langwell_udc	*dev;
535 	struct langwell_request	*req = NULL;
536 
537 	ep = container_of(_ep, struct langwell_ep, ep);
538 	dev = ep->dev;
539 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
540 
541 	if (!_ep || !_req)
542 		return;
543 
544 	req = container_of(_req, struct langwell_request, req);
545 	WARN_ON(!list_empty(&req->queue));
546 
547 	if (_req)
548 		kfree(req);
549 
550 	dev_vdbg(&dev->pdev->dev, "free request for %s\n", _ep->name);
551 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
552 }
553 
554 
555 /*-------------------------------------------------------------------------*/
556 
557 /* queue dTD and PRIME endpoint */
queue_dtd(struct langwell_ep * ep,struct langwell_request * req)558 static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
559 {
560 	u32			bit_mask, usbcmd, endptstat, dtd_dma;
561 	u8			dtd_status;
562 	int			i;
563 	struct langwell_dqh	*dqh;
564 	struct langwell_udc	*dev;
565 
566 	dev = ep->dev;
567 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
568 
569 	i = ep->ep_num * 2 + is_in(ep);
570 	dqh = &dev->ep_dqh[i];
571 
572 	if (ep->ep_num)
573 		dev_vdbg(&dev->pdev->dev, "%s\n", ep->name);
574 	else
575 		/* ep0 */
576 		dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
577 
578 	dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%p\n",
579 			i, &(dev->ep_dqh[i]));
580 
581 	bit_mask = is_in(ep) ?
582 		(1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
583 
584 	dev_vdbg(&dev->pdev->dev, "bit_mask = 0x%08x\n", bit_mask);
585 
586 	/* check if the pipe is empty */
587 	if (!(list_empty(&ep->queue))) {
588 		/* add dTD to the end of linked list */
589 		struct langwell_request	*lastreq;
590 		lastreq = list_entry(ep->queue.prev,
591 				struct langwell_request, queue);
592 
593 		lastreq->tail->dtd_next =
594 			cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
595 
596 		/* read prime bit, if 1 goto out */
597 		if (readl(&dev->op_regs->endptprime) & bit_mask)
598 			goto out;
599 
600 		do {
601 			/* set ATDTW bit in USBCMD */
602 			usbcmd = readl(&dev->op_regs->usbcmd);
603 			writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
604 
605 			/* read correct status bit */
606 			endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
607 
608 		} while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
609 
610 		/* write ATDTW bit to 0 */
611 		usbcmd = readl(&dev->op_regs->usbcmd);
612 		writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
613 
614 		if (endptstat)
615 			goto out;
616 	}
617 
618 	/* write dQH next pointer and terminate bit to 0 */
619 	dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
620 	dqh->dtd_next = cpu_to_le32(dtd_dma);
621 
622 	/* clear active and halt bit */
623 	dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
624 	dqh->dtd_status &= dtd_status;
625 	dev_vdbg(&dev->pdev->dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
626 
627 	/* ensure that updates to the dQH will occur before priming */
628 	wmb();
629 
630 	/* write 1 to endptprime register to PRIME endpoint */
631 	bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
632 	dev_vdbg(&dev->pdev->dev, "endprime bit_mask = 0x%08x\n", bit_mask);
633 	writel(bit_mask, &dev->op_regs->endptprime);
634 out:
635 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
636 	return 0;
637 }
638 
639 
640 /* fill in the dTD structure to build a transfer descriptor */
build_dtd(struct langwell_request * req,unsigned * length,dma_addr_t * dma,int * is_last)641 static struct langwell_dtd *build_dtd(struct langwell_request *req,
642 		unsigned *length, dma_addr_t *dma, int *is_last)
643 {
644 	u32			 buf_ptr;
645 	struct langwell_dtd	*dtd;
646 	struct langwell_udc	*dev;
647 	int			i;
648 
649 	dev = req->ep->dev;
650 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
651 
652 	/* the maximum transfer length, up to 16k bytes */
653 	*length = min(req->req.length - req->req.actual,
654 			(unsigned)DTD_MAX_TRANSFER_LENGTH);
655 
656 	/* create dTD dma_pool resource */
657 	dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
658 	if (dtd == NULL)
659 		return dtd;
660 	dtd->dtd_dma = *dma;
661 
662 	/* initialize buffer page pointers */
663 	buf_ptr = (u32)(req->req.dma + req->req.actual);
664 	for (i = 0; i < 5; i++)
665 		dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
666 
667 	req->req.actual += *length;
668 
669 	/* fill in total bytes with transfer size */
670 	dtd->dtd_total = cpu_to_le16(*length);
671 	dev_vdbg(&dev->pdev->dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
672 
673 	/* set is_last flag if req->req.zero is set or not */
674 	if (req->req.zero) {
675 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
676 			*is_last = 1;
677 		else
678 			*is_last = 0;
679 	} else if (req->req.length == req->req.actual) {
680 		*is_last = 1;
681 	} else
682 		*is_last = 0;
683 
684 	if (*is_last == 0)
685 		dev_vdbg(&dev->pdev->dev, "multi-dtd request!\n");
686 
687 	/* set interrupt on complete bit for the last dTD */
688 	if (*is_last && !req->req.no_interrupt)
689 		dtd->dtd_ioc = 1;
690 
691 	/* set multiplier override 0 for non-ISO and non-TX endpoint */
692 	dtd->dtd_multo = 0;
693 
694 	/* set the active bit of status field to 1 */
695 	dtd->dtd_status = DTD_STS_ACTIVE;
696 	dev_vdbg(&dev->pdev->dev, "dtd->dtd_status = 0x%02x\n",
697 			dtd->dtd_status);
698 
699 	dev_vdbg(&dev->pdev->dev, "length = %d, dma addr= 0x%08x\n",
700 			*length, (int)*dma);
701 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
702 	return dtd;
703 }
704 
705 
706 /* generate dTD linked list for a request */
req_to_dtd(struct langwell_request * req)707 static int req_to_dtd(struct langwell_request *req)
708 {
709 	unsigned		count;
710 	int			is_last, is_first = 1;
711 	struct langwell_dtd	*dtd, *last_dtd = NULL;
712 	struct langwell_udc	*dev;
713 	dma_addr_t		dma;
714 
715 	dev = req->ep->dev;
716 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
717 	do {
718 		dtd = build_dtd(req, &count, &dma, &is_last);
719 		if (dtd == NULL)
720 			return -ENOMEM;
721 
722 		if (is_first) {
723 			is_first = 0;
724 			req->head = dtd;
725 		} else {
726 			last_dtd->dtd_next = cpu_to_le32(dma);
727 			last_dtd->next_dtd_virt = dtd;
728 		}
729 		last_dtd = dtd;
730 		req->dtd_count++;
731 	} while (!is_last);
732 
733 	/* set terminate bit to 1 for the last dTD */
734 	dtd->dtd_next = DTD_TERM;
735 
736 	req->tail = dtd;
737 
738 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
739 	return 0;
740 }
741 
742 /*-------------------------------------------------------------------------*/
743 
744 /* queue (submits) an I/O requests to an endpoint */
langwell_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)745 static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
746 		gfp_t gfp_flags)
747 {
748 	struct langwell_request	*req;
749 	struct langwell_ep	*ep;
750 	struct langwell_udc	*dev;
751 	unsigned long		flags;
752 	int			is_iso = 0, zlflag = 0;
753 
754 	/* always require a cpu-view buffer */
755 	req = container_of(_req, struct langwell_request, req);
756 	ep = container_of(_ep, struct langwell_ep, ep);
757 
758 	if (!_req || !_req->complete || !_req->buf
759 			|| !list_empty(&req->queue)) {
760 		return -EINVAL;
761 	}
762 
763 	if (unlikely(!_ep || !ep->desc))
764 		return -EINVAL;
765 
766 	dev = ep->dev;
767 	req->ep = ep;
768 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
769 
770 	if (usb_endpoint_xfer_isoc(ep->desc)) {
771 		if (req->req.length > ep->ep.maxpacket)
772 			return -EMSGSIZE;
773 		is_iso = 1;
774 	}
775 
776 	if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
777 		return -ESHUTDOWN;
778 
779 	/* set up dma mapping in case the caller didn't */
780 	if (_req->dma == DMA_ADDR_INVALID) {
781 		/* WORKAROUND: WARN_ON(size == 0) */
782 		if (_req->length == 0) {
783 			dev_vdbg(&dev->pdev->dev, "req->length: 0->1\n");
784 			zlflag = 1;
785 			_req->length++;
786 		}
787 
788 		_req->dma = dma_map_single(&dev->pdev->dev,
789 				_req->buf, _req->length,
790 				is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
791 		if (zlflag && (_req->length == 1)) {
792 			dev_vdbg(&dev->pdev->dev, "req->length: 1->0\n");
793 			zlflag = 0;
794 			_req->length = 0;
795 		}
796 
797 		req->mapped = 1;
798 		dev_vdbg(&dev->pdev->dev, "req->mapped = 1\n");
799 	} else {
800 		dma_sync_single_for_device(&dev->pdev->dev,
801 				_req->dma, _req->length,
802 				is_in(ep) ?  DMA_TO_DEVICE : DMA_FROM_DEVICE);
803 		req->mapped = 0;
804 		dev_vdbg(&dev->pdev->dev, "req->mapped = 0\n");
805 	}
806 
807 	dev_dbg(&dev->pdev->dev,
808 			"%s queue req %p, len %u, buf %p, dma 0x%08x\n",
809 			_ep->name,
810 			_req, _req->length, _req->buf, (int)_req->dma);
811 
812 	_req->status = -EINPROGRESS;
813 	_req->actual = 0;
814 	req->dtd_count = 0;
815 
816 	spin_lock_irqsave(&dev->lock, flags);
817 
818 	/* build and put dTDs to endpoint queue */
819 	if (!req_to_dtd(req)) {
820 		queue_dtd(ep, req);
821 	} else {
822 		spin_unlock_irqrestore(&dev->lock, flags);
823 		return -ENOMEM;
824 	}
825 
826 	/* update ep0 state */
827 	if (ep->ep_num == 0)
828 		dev->ep0_state = DATA_STATE_XMIT;
829 
830 	if (likely(req != NULL)) {
831 		list_add_tail(&req->queue, &ep->queue);
832 		dev_vdbg(&dev->pdev->dev, "list_add_tail()\n");
833 	}
834 
835 	spin_unlock_irqrestore(&dev->lock, flags);
836 
837 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
838 	return 0;
839 }
840 
841 
842 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
langwell_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)843 static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
844 {
845 	struct langwell_ep	*ep;
846 	struct langwell_udc	*dev;
847 	struct langwell_request	*req;
848 	unsigned long		flags;
849 	int			stopped, ep_num, retval = 0;
850 	u32			endptctrl;
851 
852 	ep = container_of(_ep, struct langwell_ep, ep);
853 	dev = ep->dev;
854 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
855 
856 	if (!_ep || !ep->desc || !_req)
857 		return -EINVAL;
858 
859 	if (!dev->driver)
860 		return -ESHUTDOWN;
861 
862 	spin_lock_irqsave(&dev->lock, flags);
863 	stopped = ep->stopped;
864 
865 	/* quiesce dma while we patch the queue */
866 	ep->stopped = 1;
867 	ep_num = ep->ep_num;
868 
869 	/* disable endpoint control register */
870 	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
871 	if (is_in(ep))
872 		endptctrl &= ~EPCTRL_TXE;
873 	else
874 		endptctrl &= ~EPCTRL_RXE;
875 	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
876 
877 	/* make sure it's still queued on this endpoint */
878 	list_for_each_entry(req, &ep->queue, queue) {
879 		if (&req->req == _req)
880 			break;
881 	}
882 
883 	if (&req->req != _req) {
884 		retval = -EINVAL;
885 		goto done;
886 	}
887 
888 	/* queue head may be partially complete. */
889 	if (ep->queue.next == &req->queue) {
890 		dev_dbg(&dev->pdev->dev, "unlink (%s) dma\n", _ep->name);
891 		_req->status = -ECONNRESET;
892 		langwell_ep_fifo_flush(&ep->ep);
893 
894 		/* not the last request in endpoint queue */
895 		if (likely(ep->queue.next == &req->queue)) {
896 			struct langwell_dqh	*dqh;
897 			struct langwell_request	*next_req;
898 
899 			dqh = ep->dqh;
900 			next_req = list_entry(req->queue.next,
901 					struct langwell_request, queue);
902 
903 			/* point the dQH to the first dTD of next request */
904 			writel((u32) next_req->head, &dqh->dqh_current);
905 		}
906 	} else {
907 		struct langwell_request	*prev_req;
908 
909 		prev_req = list_entry(req->queue.prev,
910 				struct langwell_request, queue);
911 		writel(readl(&req->tail->dtd_next),
912 				&prev_req->tail->dtd_next);
913 	}
914 
915 	done(ep, req, -ECONNRESET);
916 
917 done:
918 	/* enable endpoint again */
919 	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
920 	if (is_in(ep))
921 		endptctrl |= EPCTRL_TXE;
922 	else
923 		endptctrl |= EPCTRL_RXE;
924 	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
925 
926 	ep->stopped = stopped;
927 	spin_unlock_irqrestore(&dev->lock, flags);
928 
929 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
930 	return retval;
931 }
932 
933 
934 /*-------------------------------------------------------------------------*/
935 
936 /* endpoint set/clear halt */
ep_set_halt(struct langwell_ep * ep,int value)937 static void ep_set_halt(struct langwell_ep *ep, int value)
938 {
939 	u32			endptctrl = 0;
940 	int			ep_num;
941 	struct langwell_udc	*dev = ep->dev;
942 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
943 
944 	ep_num = ep->ep_num;
945 	endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
946 
947 	/* value: 1 - set halt, 0 - clear halt */
948 	if (value) {
949 		/* set the stall bit */
950 		if (is_in(ep))
951 			endptctrl |= EPCTRL_TXS;
952 		else
953 			endptctrl |= EPCTRL_RXS;
954 	} else {
955 		/* clear the stall bit and reset data toggle */
956 		if (is_in(ep)) {
957 			endptctrl &= ~EPCTRL_TXS;
958 			endptctrl |= EPCTRL_TXR;
959 		} else {
960 			endptctrl &= ~EPCTRL_RXS;
961 			endptctrl |= EPCTRL_RXR;
962 		}
963 	}
964 
965 	writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
966 
967 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
968 }
969 
970 
971 /* set the endpoint halt feature */
langwell_ep_set_halt(struct usb_ep * _ep,int value)972 static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
973 {
974 	struct langwell_ep	*ep;
975 	struct langwell_udc	*dev;
976 	unsigned long		flags;
977 	int			retval = 0;
978 
979 	ep = container_of(_ep, struct langwell_ep, ep);
980 	dev = ep->dev;
981 
982 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
983 
984 	if (!_ep || !ep->desc)
985 		return -EINVAL;
986 
987 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
988 		return -ESHUTDOWN;
989 
990 	if (usb_endpoint_xfer_isoc(ep->desc))
991 		return  -EOPNOTSUPP;
992 
993 	spin_lock_irqsave(&dev->lock, flags);
994 
995 	/*
996 	 * attempt to halt IN ep will fail if any transfer requests
997 	 * are still queue
998 	 */
999 	if (!list_empty(&ep->queue) && is_in(ep) && value) {
1000 		/* IN endpoint FIFO holds bytes */
1001 		dev_dbg(&dev->pdev->dev, "%s FIFO holds bytes\n", _ep->name);
1002 		retval = -EAGAIN;
1003 		goto done;
1004 	}
1005 
1006 	/* endpoint set/clear halt */
1007 	if (ep->ep_num) {
1008 		ep_set_halt(ep, value);
1009 	} else { /* endpoint 0 */
1010 		dev->ep0_state = WAIT_FOR_SETUP;
1011 		dev->ep0_dir = USB_DIR_OUT;
1012 	}
1013 done:
1014 	spin_unlock_irqrestore(&dev->lock, flags);
1015 	dev_dbg(&dev->pdev->dev, "%s %s halt\n",
1016 			_ep->name, value ? "set" : "clear");
1017 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1018 	return retval;
1019 }
1020 
1021 
1022 /* set the halt feature and ignores clear requests */
langwell_ep_set_wedge(struct usb_ep * _ep)1023 static int langwell_ep_set_wedge(struct usb_ep *_ep)
1024 {
1025 	struct langwell_ep	*ep;
1026 	struct langwell_udc	*dev;
1027 
1028 	ep = container_of(_ep, struct langwell_ep, ep);
1029 	dev = ep->dev;
1030 
1031 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1032 
1033 	if (!_ep || !ep->desc)
1034 		return -EINVAL;
1035 
1036 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1037 	return usb_ep_set_halt(_ep);
1038 }
1039 
1040 
1041 /* flush contents of a fifo */
langwell_ep_fifo_flush(struct usb_ep * _ep)1042 static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1043 {
1044 	struct langwell_ep	*ep;
1045 	struct langwell_udc	*dev;
1046 	u32			flush_bit;
1047 	unsigned long		timeout;
1048 
1049 	ep = container_of(_ep, struct langwell_ep, ep);
1050 	dev = ep->dev;
1051 
1052 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1053 
1054 	if (!_ep || !ep->desc) {
1055 		dev_vdbg(&dev->pdev->dev, "ep or ep->desc is NULL\n");
1056 		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1057 		return;
1058 	}
1059 
1060 	dev_vdbg(&dev->pdev->dev, "%s-%s fifo flush\n",
1061 			_ep->name, DIR_STRING(ep));
1062 
1063 	/* flush endpoint buffer */
1064 	if (ep->ep_num == 0)
1065 		flush_bit = (1 << 16) | 1;
1066 	else if (is_in(ep))
1067 		flush_bit = 1 << (ep->ep_num + 16);	/* TX */
1068 	else
1069 		flush_bit = 1 << ep->ep_num;		/* RX */
1070 
1071 	/* wait until flush complete */
1072 	timeout = jiffies + FLUSH_TIMEOUT;
1073 	do {
1074 		writel(flush_bit, &dev->op_regs->endptflush);
1075 		while (readl(&dev->op_regs->endptflush)) {
1076 			if (time_after(jiffies, timeout)) {
1077 				dev_err(&dev->pdev->dev, "ep flush timeout\n");
1078 				goto done;
1079 			}
1080 			cpu_relax();
1081 		}
1082 	} while (readl(&dev->op_regs->endptstat) & flush_bit);
1083 done:
1084 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1085 }
1086 
1087 
1088 /* endpoints operations structure */
1089 static const struct usb_ep_ops langwell_ep_ops = {
1090 
1091 	/* configure endpoint, making it usable */
1092 	.enable		= langwell_ep_enable,
1093 
1094 	/* endpoint is no longer usable */
1095 	.disable	= langwell_ep_disable,
1096 
1097 	/* allocate a request object to use with this endpoint */
1098 	.alloc_request	= langwell_alloc_request,
1099 
1100 	/* free a request object */
1101 	.free_request	= langwell_free_request,
1102 
1103 	/* queue (submits) an I/O requests to an endpoint */
1104 	.queue		= langwell_ep_queue,
1105 
1106 	/* dequeue (cancels, unlinks) an I/O request from an endpoint */
1107 	.dequeue	= langwell_ep_dequeue,
1108 
1109 	/* set the endpoint halt feature */
1110 	.set_halt	= langwell_ep_set_halt,
1111 
1112 	/* set the halt feature and ignores clear requests */
1113 	.set_wedge	= langwell_ep_set_wedge,
1114 
1115 	/* flush contents of a fifo */
1116 	.fifo_flush	= langwell_ep_fifo_flush,
1117 };
1118 
1119 
1120 /*-------------------------------------------------------------------------*/
1121 
1122 /* device controller usb_gadget_ops structure */
1123 
1124 /* returns the current frame number */
langwell_get_frame(struct usb_gadget * _gadget)1125 static int langwell_get_frame(struct usb_gadget *_gadget)
1126 {
1127 	struct langwell_udc	*dev;
1128 	u16			retval;
1129 
1130 	if (!_gadget)
1131 		return -ENODEV;
1132 
1133 	dev = container_of(_gadget, struct langwell_udc, gadget);
1134 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1135 
1136 	retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
1137 
1138 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1139 	return retval;
1140 }
1141 
1142 
1143 /* enter or exit PHY low power state */
langwell_phy_low_power(struct langwell_udc * dev,bool flag)1144 static void langwell_phy_low_power(struct langwell_udc *dev, bool flag)
1145 {
1146 	u32		devlc;
1147 	u8		devlc_byte2;
1148 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1149 
1150 	devlc = readl(&dev->op_regs->devlc);
1151 	dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1152 
1153 	if (flag)
1154 		devlc |= LPM_PHCD;
1155 	else
1156 		devlc &= ~LPM_PHCD;
1157 
1158 	/* FIXME: workaround for Langwell A1/A2/A3 sighting */
1159 	devlc_byte2 = (devlc >> 16) & 0xff;
1160 	writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1161 
1162 	devlc = readl(&dev->op_regs->devlc);
1163 	dev_vdbg(&dev->pdev->dev,
1164 			"%s PHY low power suspend, devlc = 0x%08x\n",
1165 			flag ? "enter" : "exit", devlc);
1166 }
1167 
1168 
1169 /* tries to wake up the host connected to this gadget */
langwell_wakeup(struct usb_gadget * _gadget)1170 static int langwell_wakeup(struct usb_gadget *_gadget)
1171 {
1172 	struct langwell_udc	*dev;
1173 	u32			portsc1;
1174 	unsigned long		flags;
1175 
1176 	if (!_gadget)
1177 		return 0;
1178 
1179 	dev = container_of(_gadget, struct langwell_udc, gadget);
1180 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1181 
1182 	/* remote wakeup feature not enabled by host */
1183 	if (!dev->remote_wakeup) {
1184 		dev_info(&dev->pdev->dev, "remote wakeup is disabled\n");
1185 		return -ENOTSUPP;
1186 	}
1187 
1188 	spin_lock_irqsave(&dev->lock, flags);
1189 
1190 	portsc1 = readl(&dev->op_regs->portsc1);
1191 	if (!(portsc1 & PORTS_SUSP)) {
1192 		spin_unlock_irqrestore(&dev->lock, flags);
1193 		return 0;
1194 	}
1195 
1196 	/* LPM L1 to L0 or legacy remote wakeup */
1197 	if (dev->lpm && dev->lpm_state == LPM_L1)
1198 		dev_info(&dev->pdev->dev, "LPM L1 to L0 remote wakeup\n");
1199 	else
1200 		dev_info(&dev->pdev->dev, "device remote wakeup\n");
1201 
1202 	/* exit PHY low power suspend */
1203 	if (dev->pdev->device != 0x0829)
1204 		langwell_phy_low_power(dev, 0);
1205 
1206 	/* force port resume */
1207 	portsc1 |= PORTS_FPR;
1208 	writel(portsc1, &dev->op_regs->portsc1);
1209 
1210 	spin_unlock_irqrestore(&dev->lock, flags);
1211 
1212 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1213 	return 0;
1214 }
1215 
1216 
1217 /* notify controller that VBUS is powered or not */
langwell_vbus_session(struct usb_gadget * _gadget,int is_active)1218 static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1219 {
1220 	struct langwell_udc	*dev;
1221 	unsigned long		flags;
1222 	u32			usbcmd;
1223 
1224 	if (!_gadget)
1225 		return -ENODEV;
1226 
1227 	dev = container_of(_gadget, struct langwell_udc, gadget);
1228 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1229 
1230 	spin_lock_irqsave(&dev->lock, flags);
1231 	dev_vdbg(&dev->pdev->dev, "VBUS status: %s\n",
1232 			is_active ? "on" : "off");
1233 
1234 	dev->vbus_active = (is_active != 0);
1235 	if (dev->driver && dev->softconnected && dev->vbus_active) {
1236 		usbcmd = readl(&dev->op_regs->usbcmd);
1237 		usbcmd |= CMD_RUNSTOP;
1238 		writel(usbcmd, &dev->op_regs->usbcmd);
1239 	} else {
1240 		usbcmd = readl(&dev->op_regs->usbcmd);
1241 		usbcmd &= ~CMD_RUNSTOP;
1242 		writel(usbcmd, &dev->op_regs->usbcmd);
1243 	}
1244 
1245 	spin_unlock_irqrestore(&dev->lock, flags);
1246 
1247 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1248 	return 0;
1249 }
1250 
1251 
1252 /* constrain controller's VBUS power usage */
langwell_vbus_draw(struct usb_gadget * _gadget,unsigned mA)1253 static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1254 {
1255 	struct langwell_udc	*dev;
1256 
1257 	if (!_gadget)
1258 		return -ENODEV;
1259 
1260 	dev = container_of(_gadget, struct langwell_udc, gadget);
1261 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1262 
1263 	if (dev->transceiver) {
1264 		dev_vdbg(&dev->pdev->dev, "otg_set_power\n");
1265 		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1266 		return otg_set_power(dev->transceiver, mA);
1267 	}
1268 
1269 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1270 	return -ENOTSUPP;
1271 }
1272 
1273 
1274 /* D+ pullup, software-controlled connect/disconnect to USB host */
langwell_pullup(struct usb_gadget * _gadget,int is_on)1275 static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1276 {
1277 	struct langwell_udc	*dev;
1278 	u32			usbcmd;
1279 	unsigned long		flags;
1280 
1281 	if (!_gadget)
1282 		return -ENODEV;
1283 
1284 	dev = container_of(_gadget, struct langwell_udc, gadget);
1285 
1286 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1287 
1288 	spin_lock_irqsave(&dev->lock, flags);
1289 	dev->softconnected = (is_on != 0);
1290 
1291 	if (dev->driver && dev->softconnected && dev->vbus_active) {
1292 		usbcmd = readl(&dev->op_regs->usbcmd);
1293 		usbcmd |= CMD_RUNSTOP;
1294 		writel(usbcmd, &dev->op_regs->usbcmd);
1295 	} else {
1296 		usbcmd = readl(&dev->op_regs->usbcmd);
1297 		usbcmd &= ~CMD_RUNSTOP;
1298 		writel(usbcmd, &dev->op_regs->usbcmd);
1299 	}
1300 	spin_unlock_irqrestore(&dev->lock, flags);
1301 
1302 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1303 	return 0;
1304 }
1305 
1306 static int langwell_start(struct usb_gadget *g,
1307 		struct usb_gadget_driver *driver);
1308 
1309 static int langwell_stop(struct usb_gadget *g,
1310 		struct usb_gadget_driver *driver);
1311 
1312 /* device controller usb_gadget_ops structure */
1313 static const struct usb_gadget_ops langwell_ops = {
1314 
1315 	/* returns the current frame number */
1316 	.get_frame	= langwell_get_frame,
1317 
1318 	/* tries to wake up the host connected to this gadget */
1319 	.wakeup		= langwell_wakeup,
1320 
1321 	/* set the device selfpowered feature, always selfpowered */
1322 	/* .set_selfpowered = langwell_set_selfpowered, */
1323 
1324 	/* notify controller that VBUS is powered or not */
1325 	.vbus_session	= langwell_vbus_session,
1326 
1327 	/* constrain controller's VBUS power usage */
1328 	.vbus_draw	= langwell_vbus_draw,
1329 
1330 	/* D+ pullup, software-controlled connect/disconnect to USB host */
1331 	.pullup		= langwell_pullup,
1332 
1333 	.udc_start	= langwell_start,
1334 	.udc_stop	= langwell_stop,
1335 };
1336 
1337 
1338 /*-------------------------------------------------------------------------*/
1339 
1340 /* device controller operations */
1341 
1342 /* reset device controller */
langwell_udc_reset(struct langwell_udc * dev)1343 static int langwell_udc_reset(struct langwell_udc *dev)
1344 {
1345 	u32		usbcmd, usbmode, devlc, endpointlistaddr;
1346 	u8		devlc_byte0, devlc_byte2;
1347 	unsigned long	timeout;
1348 
1349 	if (!dev)
1350 		return -EINVAL;
1351 
1352 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1353 
1354 	/* set controller to stop state */
1355 	usbcmd = readl(&dev->op_regs->usbcmd);
1356 	usbcmd &= ~CMD_RUNSTOP;
1357 	writel(usbcmd, &dev->op_regs->usbcmd);
1358 
1359 	/* reset device controller */
1360 	usbcmd = readl(&dev->op_regs->usbcmd);
1361 	usbcmd |= CMD_RST;
1362 	writel(usbcmd, &dev->op_regs->usbcmd);
1363 
1364 	/* wait for reset to complete */
1365 	timeout = jiffies + RESET_TIMEOUT;
1366 	while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
1367 		if (time_after(jiffies, timeout)) {
1368 			dev_err(&dev->pdev->dev, "device reset timeout\n");
1369 			return -ETIMEDOUT;
1370 		}
1371 		cpu_relax();
1372 	}
1373 
1374 	/* set controller to device mode */
1375 	usbmode = readl(&dev->op_regs->usbmode);
1376 	usbmode |= MODE_DEVICE;
1377 
1378 	/* turn setup lockout off, require setup tripwire in usbcmd */
1379 	usbmode |= MODE_SLOM;
1380 
1381 	writel(usbmode, &dev->op_regs->usbmode);
1382 	usbmode = readl(&dev->op_regs->usbmode);
1383 	dev_vdbg(&dev->pdev->dev, "usbmode=0x%08x\n", usbmode);
1384 
1385 	/* Write-Clear setup status */
1386 	writel(0, &dev->op_regs->usbsts);
1387 
1388 	/* if support USB LPM, ACK all LPM token */
1389 	if (dev->lpm) {
1390 		devlc = readl(&dev->op_regs->devlc);
1391 		dev_vdbg(&dev->pdev->dev, "devlc = 0x%08x\n", devlc);
1392 		/* FIXME: workaround for Langwell A1/A2/A3 sighting */
1393 		devlc &= ~LPM_STL;	/* don't STALL LPM token */
1394 		devlc &= ~LPM_NYT_ACK;	/* ACK LPM token */
1395 		devlc_byte0 = devlc & 0xff;
1396 		devlc_byte2 = (devlc >> 16) & 0xff;
1397 		writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
1398 		writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
1399 		devlc = readl(&dev->op_regs->devlc);
1400 		dev_vdbg(&dev->pdev->dev,
1401 				"ACK LPM token, devlc = 0x%08x\n", devlc);
1402 	}
1403 
1404 	/* fill endpointlistaddr register */
1405 	endpointlistaddr = dev->ep_dqh_dma;
1406 	endpointlistaddr &= ENDPOINTLISTADDR_MASK;
1407 	writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
1408 
1409 	dev_vdbg(&dev->pdev->dev,
1410 		"dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
1411 		dev->ep_dqh, endpointlistaddr,
1412 		readl(&dev->op_regs->endpointlistaddr));
1413 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1414 	return 0;
1415 }
1416 
1417 
1418 /* reinitialize device controller endpoints */
eps_reinit(struct langwell_udc * dev)1419 static int eps_reinit(struct langwell_udc *dev)
1420 {
1421 	struct langwell_ep	*ep;
1422 	char			name[14];
1423 	int			i;
1424 
1425 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1426 
1427 	/* initialize ep0 */
1428 	ep = &dev->ep[0];
1429 	ep->dev = dev;
1430 	strncpy(ep->name, "ep0", sizeof(ep->name));
1431 	ep->ep.name = ep->name;
1432 	ep->ep.ops = &langwell_ep_ops;
1433 	ep->stopped = 0;
1434 	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1435 	ep->ep_num = 0;
1436 	ep->desc = &langwell_ep0_desc;
1437 	INIT_LIST_HEAD(&ep->queue);
1438 
1439 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1440 
1441 	/* initialize other endpoints */
1442 	for (i = 2; i < dev->ep_max; i++) {
1443 		ep = &dev->ep[i];
1444 		if (i % 2)
1445 			snprintf(name, sizeof(name), "ep%din", i / 2);
1446 		else
1447 			snprintf(name, sizeof(name), "ep%dout", i / 2);
1448 		ep->dev = dev;
1449 		strncpy(ep->name, name, sizeof(ep->name));
1450 		ep->ep.name = ep->name;
1451 
1452 		ep->ep.ops = &langwell_ep_ops;
1453 		ep->stopped = 0;
1454 		ep->ep.maxpacket = (unsigned short) ~0;
1455 		ep->ep_num = i / 2;
1456 
1457 		INIT_LIST_HEAD(&ep->queue);
1458 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
1459 	}
1460 
1461 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1462 	return 0;
1463 }
1464 
1465 
1466 /* enable interrupt and set controller to run state */
langwell_udc_start(struct langwell_udc * dev)1467 static void langwell_udc_start(struct langwell_udc *dev)
1468 {
1469 	u32	usbintr, usbcmd;
1470 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1471 
1472 	/* enable interrupts */
1473 	usbintr = INTR_ULPIE	/* ULPI */
1474 		| INTR_SLE	/* suspend */
1475 		/* | INTR_SRE	SOF received */
1476 		| INTR_URE	/* USB reset */
1477 		| INTR_AAE	/* async advance */
1478 		| INTR_SEE	/* system error */
1479 		| INTR_FRE	/* frame list rollover */
1480 		| INTR_PCE	/* port change detect */
1481 		| INTR_UEE	/* USB error interrupt */
1482 		| INTR_UE;	/* USB interrupt */
1483 	writel(usbintr, &dev->op_regs->usbintr);
1484 
1485 	/* clear stopped bit */
1486 	dev->stopped = 0;
1487 
1488 	/* set controller to run */
1489 	usbcmd = readl(&dev->op_regs->usbcmd);
1490 	usbcmd |= CMD_RUNSTOP;
1491 	writel(usbcmd, &dev->op_regs->usbcmd);
1492 
1493 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1494 }
1495 
1496 
1497 /* disable interrupt and set controller to stop state */
langwell_udc_stop(struct langwell_udc * dev)1498 static void langwell_udc_stop(struct langwell_udc *dev)
1499 {
1500 	u32	usbcmd;
1501 
1502 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1503 
1504 	/* disable all interrupts */
1505 	writel(0, &dev->op_regs->usbintr);
1506 
1507 	/* set stopped bit */
1508 	dev->stopped = 1;
1509 
1510 	/* set controller to stop state */
1511 	usbcmd = readl(&dev->op_regs->usbcmd);
1512 	usbcmd &= ~CMD_RUNSTOP;
1513 	writel(usbcmd, &dev->op_regs->usbcmd);
1514 
1515 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1516 }
1517 
1518 
1519 /* stop all USB activities */
stop_activity(struct langwell_udc * dev)1520 static void stop_activity(struct langwell_udc *dev)
1521 {
1522 	struct langwell_ep	*ep;
1523 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1524 
1525 	nuke(&dev->ep[0], -ESHUTDOWN);
1526 
1527 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1528 		nuke(ep, -ESHUTDOWN);
1529 	}
1530 
1531 	/* report disconnect; the driver is already quiesced */
1532 	if (dev->driver) {
1533 		spin_unlock(&dev->lock);
1534 		dev->driver->disconnect(&dev->gadget);
1535 		spin_lock(&dev->lock);
1536 	}
1537 
1538 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1539 }
1540 
1541 
1542 /*-------------------------------------------------------------------------*/
1543 
1544 /* device "function" sysfs attribute file */
show_function(struct device * _dev,struct device_attribute * attr,char * buf)1545 static ssize_t show_function(struct device *_dev,
1546 		struct device_attribute *attr, char *buf)
1547 {
1548 	struct langwell_udc	*dev = dev_get_drvdata(_dev);
1549 
1550 	if (!dev->driver || !dev->driver->function
1551 			|| strlen(dev->driver->function) > PAGE_SIZE)
1552 		return 0;
1553 
1554 	return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1555 }
1556 static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
1557 
1558 
lpm_device_speed(u32 reg)1559 static inline enum usb_device_speed lpm_device_speed(u32 reg)
1560 {
1561 	switch (LPM_PSPD(reg)) {
1562 	case LPM_SPEED_HIGH:
1563 		return USB_SPEED_HIGH;
1564 	case LPM_SPEED_FULL:
1565 		return USB_SPEED_FULL;
1566 	case LPM_SPEED_LOW:
1567 		return USB_SPEED_LOW;
1568 	default:
1569 		return USB_SPEED_UNKNOWN;
1570 	}
1571 }
1572 
1573 /* device "langwell_udc" sysfs attribute file */
show_langwell_udc(struct device * _dev,struct device_attribute * attr,char * buf)1574 static ssize_t show_langwell_udc(struct device *_dev,
1575 		struct device_attribute *attr, char *buf)
1576 {
1577 	struct langwell_udc	*dev = dev_get_drvdata(_dev);
1578 	struct langwell_request *req;
1579 	struct langwell_ep	*ep = NULL;
1580 	char			*next;
1581 	unsigned		size;
1582 	unsigned		t;
1583 	unsigned		i;
1584 	unsigned long		flags;
1585 	u32			tmp_reg;
1586 
1587 	next = buf;
1588 	size = PAGE_SIZE;
1589 	spin_lock_irqsave(&dev->lock, flags);
1590 
1591 	/* driver basic information */
1592 	t = scnprintf(next, size,
1593 			DRIVER_DESC "\n"
1594 			"%s version: %s\n"
1595 			"Gadget driver: %s\n\n",
1596 			driver_name, DRIVER_VERSION,
1597 			dev->driver ? dev->driver->driver.name : "(none)");
1598 	size -= t;
1599 	next += t;
1600 
1601 	/* device registers */
1602 	tmp_reg = readl(&dev->op_regs->usbcmd);
1603 	t = scnprintf(next, size,
1604 			"USBCMD reg:\n"
1605 			"SetupTW: %d\n"
1606 			"Run/Stop: %s\n\n",
1607 			(tmp_reg & CMD_SUTW) ? 1 : 0,
1608 			(tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
1609 	size -= t;
1610 	next += t;
1611 
1612 	tmp_reg = readl(&dev->op_regs->usbsts);
1613 	t = scnprintf(next, size,
1614 			"USB Status Reg:\n"
1615 			"Device Suspend: %d\n"
1616 			"Reset Received: %d\n"
1617 			"System Error: %s\n"
1618 			"USB Error Interrupt: %s\n\n",
1619 			(tmp_reg & STS_SLI) ? 1 : 0,
1620 			(tmp_reg & STS_URI) ? 1 : 0,
1621 			(tmp_reg & STS_SEI) ? "Error" : "No error",
1622 			(tmp_reg & STS_UEI) ? "Error detected" : "No error");
1623 	size -= t;
1624 	next += t;
1625 
1626 	tmp_reg = readl(&dev->op_regs->usbintr);
1627 	t = scnprintf(next, size,
1628 			"USB Intrrupt Enable Reg:\n"
1629 			"Sleep Enable: %d\n"
1630 			"SOF Received Enable: %d\n"
1631 			"Reset Enable: %d\n"
1632 			"System Error Enable: %d\n"
1633 			"Port Change Dectected Enable: %d\n"
1634 			"USB Error Intr Enable: %d\n"
1635 			"USB Intr Enable: %d\n\n",
1636 			(tmp_reg & INTR_SLE) ? 1 : 0,
1637 			(tmp_reg & INTR_SRE) ? 1 : 0,
1638 			(tmp_reg & INTR_URE) ? 1 : 0,
1639 			(tmp_reg & INTR_SEE) ? 1 : 0,
1640 			(tmp_reg & INTR_PCE) ? 1 : 0,
1641 			(tmp_reg & INTR_UEE) ? 1 : 0,
1642 			(tmp_reg & INTR_UE) ? 1 : 0);
1643 	size -= t;
1644 	next += t;
1645 
1646 	tmp_reg = readl(&dev->op_regs->frindex);
1647 	t = scnprintf(next, size,
1648 			"USB Frame Index Reg:\n"
1649 			"Frame Number is 0x%08x\n\n",
1650 			(tmp_reg & FRINDEX_MASK));
1651 	size -= t;
1652 	next += t;
1653 
1654 	tmp_reg = readl(&dev->op_regs->deviceaddr);
1655 	t = scnprintf(next, size,
1656 			"USB Device Address Reg:\n"
1657 			"Device Addr is 0x%x\n\n",
1658 			USBADR(tmp_reg));
1659 	size -= t;
1660 	next += t;
1661 
1662 	tmp_reg = readl(&dev->op_regs->endpointlistaddr);
1663 	t = scnprintf(next, size,
1664 			"USB Endpoint List Address Reg:\n"
1665 			"Endpoint List Pointer is 0x%x\n\n",
1666 			EPBASE(tmp_reg));
1667 	size -= t;
1668 	next += t;
1669 
1670 	tmp_reg = readl(&dev->op_regs->portsc1);
1671 	t = scnprintf(next, size,
1672 		"USB Port Status & Control Reg:\n"
1673 		"Port Reset: %s\n"
1674 		"Port Suspend Mode: %s\n"
1675 		"Over-current Change: %s\n"
1676 		"Port Enable/Disable Change: %s\n"
1677 		"Port Enabled/Disabled: %s\n"
1678 		"Current Connect Status: %s\n"
1679 		"LPM Suspend Status: %s\n\n",
1680 		(tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
1681 		(tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
1682 		(tmp_reg & PORTS_OCC) ? "Detected" : "No",
1683 		(tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
1684 		(tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
1685 		(tmp_reg & PORTS_CCS) ?  "Attached" : "Not Attached",
1686 		(tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
1687 	size -= t;
1688 	next += t;
1689 
1690 	tmp_reg = readl(&dev->op_regs->devlc);
1691 	t = scnprintf(next, size,
1692 		"Device LPM Control Reg:\n"
1693 		"Parallel Transceiver : %d\n"
1694 		"Serial Transceiver : %d\n"
1695 		"Port Speed: %s\n"
1696 		"Port Force Full Speed Connenct: %s\n"
1697 		"PHY Low Power Suspend Clock: %s\n"
1698 		"BmAttributes: %d\n\n",
1699 		LPM_PTS(tmp_reg),
1700 		(tmp_reg & LPM_STS) ? 1 : 0,
1701 		usb_speed_string(lpm_device_speed(tmp_reg)),
1702 		(tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
1703 		(tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
1704 		LPM_BA(tmp_reg));
1705 	size -= t;
1706 	next += t;
1707 
1708 	tmp_reg = readl(&dev->op_regs->usbmode);
1709 	t = scnprintf(next, size,
1710 			"USB Mode Reg:\n"
1711 			"Controller Mode is : %s\n\n", ({
1712 				char *s;
1713 				switch (MODE_CM(tmp_reg)) {
1714 				case MODE_IDLE:
1715 					s = "Idle"; break;
1716 				case MODE_DEVICE:
1717 					s = "Device Controller"; break;
1718 				case MODE_HOST:
1719 					s = "Host Controller"; break;
1720 				default:
1721 					s = "None"; break;
1722 				}
1723 				s;
1724 			}));
1725 	size -= t;
1726 	next += t;
1727 
1728 	tmp_reg = readl(&dev->op_regs->endptsetupstat);
1729 	t = scnprintf(next, size,
1730 			"Endpoint Setup Status Reg:\n"
1731 			"SETUP on ep 0x%04x\n\n",
1732 			tmp_reg & SETUPSTAT_MASK);
1733 	size -= t;
1734 	next += t;
1735 
1736 	for (i = 0; i < dev->ep_max / 2; i++) {
1737 		tmp_reg = readl(&dev->op_regs->endptctrl[i]);
1738 		t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
1739 				i, tmp_reg);
1740 		size -= t;
1741 		next += t;
1742 	}
1743 	tmp_reg = readl(&dev->op_regs->endptprime);
1744 	t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
1745 	size -= t;
1746 	next += t;
1747 
1748 	/* langwell_udc, langwell_ep, langwell_request structure information */
1749 	ep = &dev->ep[0];
1750 	t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
1751 			ep->ep.name, ep->ep.maxpacket, ep->ep_num);
1752 	size -= t;
1753 	next += t;
1754 
1755 	if (list_empty(&ep->queue)) {
1756 		t = scnprintf(next, size, "its req queue is empty\n\n");
1757 		size -= t;
1758 		next += t;
1759 	} else {
1760 		list_for_each_entry(req, &ep->queue, queue) {
1761 			t = scnprintf(next, size,
1762 				"req %p actual 0x%x length 0x%x  buf %p\n",
1763 				&req->req, req->req.actual,
1764 				req->req.length, req->req.buf);
1765 			size -= t;
1766 			next += t;
1767 		}
1768 	}
1769 	/* other gadget->eplist ep */
1770 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1771 		if (ep->desc) {
1772 			t = scnprintf(next, size,
1773 					"\n%s MaxPacketSize: 0x%x, "
1774 					"ep_num: %d\n",
1775 					ep->ep.name, ep->ep.maxpacket,
1776 					ep->ep_num);
1777 			size -= t;
1778 			next += t;
1779 
1780 			if (list_empty(&ep->queue)) {
1781 				t = scnprintf(next, size,
1782 						"its req queue is empty\n\n");
1783 				size -= t;
1784 				next += t;
1785 			} else {
1786 				list_for_each_entry(req, &ep->queue, queue) {
1787 					t = scnprintf(next, size,
1788 						"req %p actual 0x%x length "
1789 						"0x%x  buf %p\n",
1790 						&req->req, req->req.actual,
1791 						req->req.length, req->req.buf);
1792 					size -= t;
1793 					next += t;
1794 				}
1795 			}
1796 		}
1797 	}
1798 
1799 	spin_unlock_irqrestore(&dev->lock, flags);
1800 	return PAGE_SIZE - size;
1801 }
1802 static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1803 
1804 
1805 /* device "remote_wakeup" sysfs attribute file */
store_remote_wakeup(struct device * _dev,struct device_attribute * attr,const char * buf,size_t count)1806 static ssize_t store_remote_wakeup(struct device *_dev,
1807 		struct device_attribute *attr, const char *buf, size_t count)
1808 {
1809 	struct langwell_udc	*dev = dev_get_drvdata(_dev);
1810 	unsigned long		flags;
1811 	ssize_t			rc = count;
1812 
1813 	if (count > 2)
1814 		return -EINVAL;
1815 
1816 	if (count > 0 && buf[count-1] == '\n')
1817 		((char *) buf)[count-1] = 0;
1818 
1819 	if (buf[0] != '1')
1820 		return -EINVAL;
1821 
1822 	/* force remote wakeup enabled in case gadget driver doesn't support */
1823 	spin_lock_irqsave(&dev->lock, flags);
1824 	dev->remote_wakeup = 1;
1825 	dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
1826 	spin_unlock_irqrestore(&dev->lock, flags);
1827 
1828 	langwell_wakeup(&dev->gadget);
1829 
1830 	return rc;
1831 }
1832 static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
1833 
1834 
1835 /*-------------------------------------------------------------------------*/
1836 
1837 /*
1838  * when a driver is successfully registered, it will receive
1839  * control requests including set_configuration(), which enables
1840  * non-control requests.  then usb traffic follows until a
1841  * disconnect is reported.  then a host may connect again, or
1842  * the driver might get unbound.
1843  */
1844 
langwell_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1845 static int langwell_start(struct usb_gadget *g,
1846 		struct usb_gadget_driver *driver)
1847 {
1848 	struct langwell_udc	*dev = gadget_to_langwell(g);
1849 	unsigned long		flags;
1850 	int			retval;
1851 
1852 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1853 
1854 	spin_lock_irqsave(&dev->lock, flags);
1855 
1856 	/* hook up the driver ... */
1857 	driver->driver.bus = NULL;
1858 	dev->driver = driver;
1859 	dev->gadget.dev.driver = &driver->driver;
1860 
1861 	spin_unlock_irqrestore(&dev->lock, flags);
1862 
1863 	retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
1864 	if (retval)
1865 		goto err;
1866 
1867 	dev->usb_state = USB_STATE_ATTACHED;
1868 	dev->ep0_state = WAIT_FOR_SETUP;
1869 	dev->ep0_dir = USB_DIR_OUT;
1870 
1871 	/* enable interrupt and set controller to run state */
1872 	if (dev->got_irq)
1873 		langwell_udc_start(dev);
1874 
1875 	dev_vdbg(&dev->pdev->dev,
1876 			"After langwell_udc_start(), print all registers:\n");
1877 	print_all_registers(dev);
1878 
1879 	dev_info(&dev->pdev->dev, "register driver: %s\n",
1880 			driver->driver.name);
1881 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1882 
1883 	return 0;
1884 
1885 err:
1886 	dev->gadget.dev.driver = NULL;
1887 	dev->driver = NULL;
1888 
1889 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1890 
1891 	return retval;
1892 }
1893 
1894 /* unregister gadget driver */
langwell_stop(struct usb_gadget * g,struct usb_gadget_driver * driver)1895 static int langwell_stop(struct usb_gadget *g,
1896 		struct usb_gadget_driver *driver)
1897 {
1898 	struct langwell_udc	*dev = gadget_to_langwell(g);
1899 	unsigned long		flags;
1900 
1901 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
1902 
1903 	/* exit PHY low power suspend */
1904 	if (dev->pdev->device != 0x0829)
1905 		langwell_phy_low_power(dev, 0);
1906 
1907 	/* unbind OTG transceiver */
1908 	if (dev->transceiver)
1909 		(void)otg_set_peripheral(dev->transceiver, 0);
1910 
1911 	/* disable interrupt and set controller to stop state */
1912 	langwell_udc_stop(dev);
1913 
1914 	dev->usb_state = USB_STATE_ATTACHED;
1915 	dev->ep0_state = WAIT_FOR_SETUP;
1916 	dev->ep0_dir = USB_DIR_OUT;
1917 
1918 	spin_lock_irqsave(&dev->lock, flags);
1919 
1920 	/* stop all usb activities */
1921 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1922 	dev->gadget.dev.driver = NULL;
1923 	dev->driver = NULL;
1924 	stop_activity(dev);
1925 	spin_unlock_irqrestore(&dev->lock, flags);
1926 
1927 	device_remove_file(&dev->pdev->dev, &dev_attr_function);
1928 
1929 	dev_info(&dev->pdev->dev, "unregistered driver '%s'\n",
1930 			driver->driver.name);
1931 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1932 
1933 	return 0;
1934 }
1935 
1936 /*-------------------------------------------------------------------------*/
1937 
1938 /*
1939  * setup tripwire is used as a semaphore to ensure that the setup data
1940  * payload is extracted from a dQH without being corrupted
1941  */
setup_tripwire(struct langwell_udc * dev)1942 static void setup_tripwire(struct langwell_udc *dev)
1943 {
1944 	u32			usbcmd,
1945 				endptsetupstat;
1946 	unsigned long		timeout;
1947 	struct langwell_dqh	*dqh;
1948 
1949 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1950 
1951 	/* ep0 OUT dQH */
1952 	dqh = &dev->ep_dqh[EP_DIR_OUT];
1953 
1954 	/* Write-Clear endptsetupstat */
1955 	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
1956 	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
1957 
1958 	/* wait until endptsetupstat is cleared */
1959 	timeout = jiffies + SETUPSTAT_TIMEOUT;
1960 	while (readl(&dev->op_regs->endptsetupstat)) {
1961 		if (time_after(jiffies, timeout)) {
1962 			dev_err(&dev->pdev->dev, "setup_tripwire timeout\n");
1963 			break;
1964 		}
1965 		cpu_relax();
1966 	}
1967 
1968 	/* while a hazard exists when setup packet arrives */
1969 	do {
1970 		/* set setup tripwire bit */
1971 		usbcmd = readl(&dev->op_regs->usbcmd);
1972 		writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
1973 
1974 		/* copy the setup packet to local buffer */
1975 		memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
1976 	} while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
1977 
1978 	/* Write-Clear setup tripwire bit */
1979 	usbcmd = readl(&dev->op_regs->usbcmd);
1980 	writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
1981 
1982 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
1983 }
1984 
1985 
1986 /* protocol ep0 stall, will automatically be cleared on new transaction */
ep0_stall(struct langwell_udc * dev)1987 static void ep0_stall(struct langwell_udc *dev)
1988 {
1989 	u32	endptctrl;
1990 
1991 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
1992 
1993 	/* set TX and RX to stall */
1994 	endptctrl = readl(&dev->op_regs->endptctrl[0]);
1995 	endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
1996 	writel(endptctrl, &dev->op_regs->endptctrl[0]);
1997 
1998 	/* update ep0 state */
1999 	dev->ep0_state = WAIT_FOR_SETUP;
2000 	dev->ep0_dir = USB_DIR_OUT;
2001 
2002 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2003 }
2004 
2005 
2006 /* PRIME a status phase for ep0 */
prime_status_phase(struct langwell_udc * dev,int dir)2007 static int prime_status_phase(struct langwell_udc *dev, int dir)
2008 {
2009 	struct langwell_request	*req;
2010 	struct langwell_ep	*ep;
2011 	int			status = 0;
2012 
2013 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2014 
2015 	if (dir == EP_DIR_IN)
2016 		dev->ep0_dir = USB_DIR_IN;
2017 	else
2018 		dev->ep0_dir = USB_DIR_OUT;
2019 
2020 	ep = &dev->ep[0];
2021 	dev->ep0_state = WAIT_FOR_OUT_STATUS;
2022 
2023 	req = dev->status_req;
2024 
2025 	req->ep = ep;
2026 	req->req.length = 0;
2027 	req->req.status = -EINPROGRESS;
2028 	req->req.actual = 0;
2029 	req->req.complete = NULL;
2030 	req->dtd_count = 0;
2031 
2032 	if (!req_to_dtd(req))
2033 		status = queue_dtd(ep, req);
2034 	else
2035 		return -ENOMEM;
2036 
2037 	if (status)
2038 		dev_err(&dev->pdev->dev, "can't queue ep0 status request\n");
2039 
2040 	list_add_tail(&req->queue, &ep->queue);
2041 
2042 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2043 	return status;
2044 }
2045 
2046 
2047 /* SET_ADDRESS request routine */
set_address(struct langwell_udc * dev,u16 value,u16 index,u16 length)2048 static void set_address(struct langwell_udc *dev, u16 value,
2049 		u16 index, u16 length)
2050 {
2051 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2052 
2053 	/* save the new address to device struct */
2054 	dev->dev_addr = (u8) value;
2055 	dev_vdbg(&dev->pdev->dev, "dev->dev_addr = %d\n", dev->dev_addr);
2056 
2057 	/* update usb state */
2058 	dev->usb_state = USB_STATE_ADDRESS;
2059 
2060 	/* STATUS phase */
2061 	if (prime_status_phase(dev, EP_DIR_IN))
2062 		ep0_stall(dev);
2063 
2064 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2065 }
2066 
2067 
2068 /* return endpoint by windex */
get_ep_by_windex(struct langwell_udc * dev,u16 wIndex)2069 static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2070 		u16 wIndex)
2071 {
2072 	struct langwell_ep		*ep;
2073 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2074 
2075 	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2076 		return &dev->ep[0];
2077 
2078 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2079 		u8	bEndpointAddress;
2080 		if (!ep->desc)
2081 			continue;
2082 
2083 		bEndpointAddress = ep->desc->bEndpointAddress;
2084 		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2085 			continue;
2086 
2087 		if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
2088 			== (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
2089 			return ep;
2090 	}
2091 
2092 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2093 	return NULL;
2094 }
2095 
2096 
2097 /* return whether endpoint is stalled, 0: not stalled; 1: stalled */
ep_is_stall(struct langwell_ep * ep)2098 static int ep_is_stall(struct langwell_ep *ep)
2099 {
2100 	struct langwell_udc	*dev = ep->dev;
2101 	u32			endptctrl;
2102 	int			retval;
2103 
2104 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2105 
2106 	endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
2107 	if (is_in(ep))
2108 		retval = endptctrl & EPCTRL_TXS ? 1 : 0;
2109 	else
2110 		retval = endptctrl & EPCTRL_RXS ? 1 : 0;
2111 
2112 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2113 	return retval;
2114 }
2115 
2116 
2117 /* GET_STATUS request routine */
get_status(struct langwell_udc * dev,u8 request_type,u16 value,u16 index,u16 length)2118 static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2119 		u16 index, u16 length)
2120 {
2121 	struct langwell_request	*req;
2122 	struct langwell_ep	*ep;
2123 	u16	status_data = 0;	/* 16 bits cpu view status data */
2124 	int	status = 0;
2125 
2126 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2127 
2128 	ep = &dev->ep[0];
2129 
2130 	if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2131 		/* get device status */
2132 		status_data = dev->dev_status;
2133 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
2134 		/* get interface status */
2135 		status_data = 0;
2136 	} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
2137 		/* get endpoint status */
2138 		struct langwell_ep	*epn;
2139 		epn = get_ep_by_windex(dev, index);
2140 		/* stall if endpoint doesn't exist */
2141 		if (!epn)
2142 			goto stall;
2143 
2144 		status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
2145 	}
2146 
2147 	dev_dbg(&dev->pdev->dev, "get status data: 0x%04x\n", status_data);
2148 
2149 	dev->ep0_dir = USB_DIR_IN;
2150 
2151 	/* borrow the per device status_req */
2152 	req = dev->status_req;
2153 
2154 	/* fill in the reqest structure */
2155 	*((u16 *) req->req.buf) = cpu_to_le16(status_data);
2156 	req->ep = ep;
2157 	req->req.length = 2;
2158 	req->req.status = -EINPROGRESS;
2159 	req->req.actual = 0;
2160 	req->req.complete = NULL;
2161 	req->dtd_count = 0;
2162 
2163 	/* prime the data phase */
2164 	if (!req_to_dtd(req))
2165 		status = queue_dtd(ep, req);
2166 	else			/* no mem */
2167 		goto stall;
2168 
2169 	if (status) {
2170 		dev_err(&dev->pdev->dev,
2171 				"response error on GET_STATUS request\n");
2172 		goto stall;
2173 	}
2174 
2175 	list_add_tail(&req->queue, &ep->queue);
2176 	dev->ep0_state = DATA_STATE_XMIT;
2177 
2178 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2179 	return;
2180 stall:
2181 	ep0_stall(dev);
2182 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2183 }
2184 
2185 
2186 /* setup packet interrupt handler */
handle_setup_packet(struct langwell_udc * dev,struct usb_ctrlrequest * setup)2187 static void handle_setup_packet(struct langwell_udc *dev,
2188 		struct usb_ctrlrequest *setup)
2189 {
2190 	u16	wValue = le16_to_cpu(setup->wValue);
2191 	u16	wIndex = le16_to_cpu(setup->wIndex);
2192 	u16	wLength = le16_to_cpu(setup->wLength);
2193 	u32	portsc1;
2194 
2195 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2196 
2197 	/* ep0 fifo flush */
2198 	nuke(&dev->ep[0], -ESHUTDOWN);
2199 
2200 	dev_dbg(&dev->pdev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
2201 			setup->bRequestType, setup->bRequest,
2202 			wValue, wIndex, wLength);
2203 
2204 	/* RNDIS gadget delegate */
2205 	if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
2206 		/* USB_CDC_SEND_ENCAPSULATED_COMMAND */
2207 		goto delegate;
2208 	}
2209 
2210 	/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2211 	if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
2212 		/* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2213 		goto delegate;
2214 	}
2215 
2216 	/* We process some stardard setup requests here */
2217 	switch (setup->bRequest) {
2218 	case USB_REQ_GET_STATUS:
2219 		dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_GET_STATUS\n");
2220 		/* get status, DATA and STATUS phase */
2221 		if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2222 					!= (USB_DIR_IN | USB_TYPE_STANDARD))
2223 			break;
2224 		get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
2225 		goto end;
2226 
2227 	case USB_REQ_SET_ADDRESS:
2228 		dev_dbg(&dev->pdev->dev, "SETUP: USB_REQ_SET_ADDRESS\n");
2229 		/* STATUS phase */
2230 		if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
2231 						| USB_RECIP_DEVICE))
2232 			break;
2233 		set_address(dev, wValue, wIndex, wLength);
2234 		goto end;
2235 
2236 	case USB_REQ_CLEAR_FEATURE:
2237 	case USB_REQ_SET_FEATURE:
2238 		/* STATUS phase */
2239 	{
2240 		int rc = -EOPNOTSUPP;
2241 		if (setup->bRequest == USB_REQ_SET_FEATURE)
2242 			dev_dbg(&dev->pdev->dev,
2243 					"SETUP: USB_REQ_SET_FEATURE\n");
2244 		else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
2245 			dev_dbg(&dev->pdev->dev,
2246 					"SETUP: USB_REQ_CLEAR_FEATURE\n");
2247 
2248 		if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
2249 				== (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
2250 			struct langwell_ep	*epn;
2251 			epn = get_ep_by_windex(dev, wIndex);
2252 			/* stall if endpoint doesn't exist */
2253 			if (!epn) {
2254 				ep0_stall(dev);
2255 				goto end;
2256 			}
2257 
2258 			if (wValue != 0 || wLength != 0
2259 					|| epn->ep_num > dev->ep_max)
2260 				break;
2261 
2262 			spin_unlock(&dev->lock);
2263 			rc = langwell_ep_set_halt(&epn->ep,
2264 				(setup->bRequest == USB_REQ_SET_FEATURE)
2265 				? 1 : 0);
2266 			spin_lock(&dev->lock);
2267 
2268 		} else if ((setup->bRequestType & (USB_RECIP_MASK
2269 				| USB_TYPE_MASK)) == (USB_RECIP_DEVICE
2270 				| USB_TYPE_STANDARD)) {
2271 			rc = 0;
2272 			switch (wValue) {
2273 			case USB_DEVICE_REMOTE_WAKEUP:
2274 				if (setup->bRequest == USB_REQ_SET_FEATURE) {
2275 					dev->remote_wakeup = 1;
2276 					dev->dev_status |= (1 << wValue);
2277 				} else {
2278 					dev->remote_wakeup = 0;
2279 					dev->dev_status &= ~(1 << wValue);
2280 				}
2281 				break;
2282 			case USB_DEVICE_TEST_MODE:
2283 				dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
2284 				if ((wIndex & 0xff) ||
2285 					(dev->gadget.speed != USB_SPEED_HIGH))
2286 					ep0_stall(dev);
2287 
2288 				switch (wIndex >> 8) {
2289 				case TEST_J:
2290 				case TEST_K:
2291 				case TEST_SE0_NAK:
2292 				case TEST_PACKET:
2293 				case TEST_FORCE_EN:
2294 					if (prime_status_phase(dev, EP_DIR_IN))
2295 						ep0_stall(dev);
2296 					portsc1 = readl(&dev->op_regs->portsc1);
2297 					portsc1 |= (wIndex & 0xf00) << 8;
2298 					writel(portsc1, &dev->op_regs->portsc1);
2299 					goto end;
2300 				default:
2301 					rc = -EOPNOTSUPP;
2302 				}
2303 				break;
2304 			default:
2305 				rc = -EOPNOTSUPP;
2306 				break;
2307 			}
2308 
2309 			if (!gadget_is_otg(&dev->gadget))
2310 				break;
2311 			else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
2312 				dev->gadget.b_hnp_enable = 1;
2313 			else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
2314 				dev->gadget.a_hnp_support = 1;
2315 			else if (setup->bRequest ==
2316 					USB_DEVICE_A_ALT_HNP_SUPPORT)
2317 				dev->gadget.a_alt_hnp_support = 1;
2318 			else
2319 				break;
2320 		} else
2321 			break;
2322 
2323 		if (rc == 0) {
2324 			if (prime_status_phase(dev, EP_DIR_IN))
2325 				ep0_stall(dev);
2326 		}
2327 		goto end;
2328 	}
2329 
2330 	case USB_REQ_GET_DESCRIPTOR:
2331 		dev_dbg(&dev->pdev->dev,
2332 				"SETUP: USB_REQ_GET_DESCRIPTOR\n");
2333 		goto delegate;
2334 
2335 	case USB_REQ_SET_DESCRIPTOR:
2336 		dev_dbg(&dev->pdev->dev,
2337 				"SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
2338 		goto delegate;
2339 
2340 	case USB_REQ_GET_CONFIGURATION:
2341 		dev_dbg(&dev->pdev->dev,
2342 				"SETUP: USB_REQ_GET_CONFIGURATION\n");
2343 		goto delegate;
2344 
2345 	case USB_REQ_SET_CONFIGURATION:
2346 		dev_dbg(&dev->pdev->dev,
2347 				"SETUP: USB_REQ_SET_CONFIGURATION\n");
2348 		goto delegate;
2349 
2350 	case USB_REQ_GET_INTERFACE:
2351 		dev_dbg(&dev->pdev->dev,
2352 				"SETUP: USB_REQ_GET_INTERFACE\n");
2353 		goto delegate;
2354 
2355 	case USB_REQ_SET_INTERFACE:
2356 		dev_dbg(&dev->pdev->dev,
2357 				"SETUP: USB_REQ_SET_INTERFACE\n");
2358 		goto delegate;
2359 
2360 	case USB_REQ_SYNCH_FRAME:
2361 		dev_dbg(&dev->pdev->dev,
2362 				"SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
2363 		goto delegate;
2364 
2365 	default:
2366 		/* delegate USB standard requests to the gadget driver */
2367 		goto delegate;
2368 delegate:
2369 		/* USB requests handled by gadget */
2370 		if (wLength) {
2371 			/* DATA phase from gadget, STATUS phase from udc */
2372 			dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
2373 					?  USB_DIR_IN : USB_DIR_OUT;
2374 			dev_vdbg(&dev->pdev->dev,
2375 					"dev->ep0_dir = 0x%x, wLength = %d\n",
2376 					dev->ep0_dir, wLength);
2377 			spin_unlock(&dev->lock);
2378 			if (dev->driver->setup(&dev->gadget,
2379 					&dev->local_setup_buff) < 0)
2380 				ep0_stall(dev);
2381 			spin_lock(&dev->lock);
2382 			dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
2383 					?  DATA_STATE_XMIT : DATA_STATE_RECV;
2384 		} else {
2385 			/* no DATA phase, IN STATUS phase from gadget */
2386 			dev->ep0_dir = USB_DIR_IN;
2387 			dev_vdbg(&dev->pdev->dev,
2388 					"dev->ep0_dir = 0x%x, wLength = %d\n",
2389 					dev->ep0_dir, wLength);
2390 			spin_unlock(&dev->lock);
2391 			if (dev->driver->setup(&dev->gadget,
2392 					&dev->local_setup_buff) < 0)
2393 				ep0_stall(dev);
2394 			spin_lock(&dev->lock);
2395 			dev->ep0_state = WAIT_FOR_OUT_STATUS;
2396 		}
2397 		break;
2398 	}
2399 end:
2400 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2401 }
2402 
2403 
2404 /* transfer completion, process endpoint request and free the completed dTDs
2405  * for this request
2406  */
process_ep_req(struct langwell_udc * dev,int index,struct langwell_request * curr_req)2407 static int process_ep_req(struct langwell_udc *dev, int index,
2408 		struct langwell_request *curr_req)
2409 {
2410 	struct langwell_dtd	*curr_dtd;
2411 	struct langwell_dqh	*curr_dqh;
2412 	int			td_complete, actual, remaining_length;
2413 	int			i, dir;
2414 	u8			dtd_status = 0;
2415 	int			retval = 0;
2416 
2417 	curr_dqh = &dev->ep_dqh[index];
2418 	dir = index % 2;
2419 
2420 	curr_dtd = curr_req->head;
2421 	td_complete = 0;
2422 	actual = curr_req->req.length;
2423 
2424 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2425 
2426 	for (i = 0; i < curr_req->dtd_count; i++) {
2427 
2428 		/* command execution states by dTD */
2429 		dtd_status = curr_dtd->dtd_status;
2430 
2431 		barrier();
2432 		remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2433 		actual -= remaining_length;
2434 
2435 		if (!dtd_status) {
2436 			/* transfers completed successfully */
2437 			if (!remaining_length) {
2438 				td_complete++;
2439 				dev_vdbg(&dev->pdev->dev,
2440 					"dTD transmitted successfully\n");
2441 			} else {
2442 				if (dir) {
2443 					dev_vdbg(&dev->pdev->dev,
2444 						"TX dTD remains data\n");
2445 					retval = -EPROTO;
2446 					break;
2447 
2448 				} else {
2449 					td_complete++;
2450 					break;
2451 				}
2452 			}
2453 		} else {
2454 			/* transfers completed with errors */
2455 			if (dtd_status & DTD_STS_ACTIVE) {
2456 				dev_dbg(&dev->pdev->dev,
2457 					"dTD status ACTIVE dQH[%d]\n", index);
2458 				retval = 1;
2459 				return retval;
2460 			} else if (dtd_status & DTD_STS_HALTED) {
2461 				dev_err(&dev->pdev->dev,
2462 					"dTD error %08x dQH[%d]\n",
2463 					dtd_status, index);
2464 				/* clear the errors and halt condition */
2465 				curr_dqh->dtd_status = 0;
2466 				retval = -EPIPE;
2467 				break;
2468 			} else if (dtd_status & DTD_STS_DBE) {
2469 				dev_dbg(&dev->pdev->dev,
2470 					"data buffer (overflow) error\n");
2471 				retval = -EPROTO;
2472 				break;
2473 			} else if (dtd_status & DTD_STS_TRE) {
2474 				dev_dbg(&dev->pdev->dev,
2475 					"transaction(ISO) error\n");
2476 				retval = -EILSEQ;
2477 				break;
2478 			} else
2479 				dev_err(&dev->pdev->dev,
2480 					"unknown error (0x%x)!\n",
2481 					dtd_status);
2482 		}
2483 
2484 		if (i != curr_req->dtd_count - 1)
2485 			curr_dtd = (struct langwell_dtd *)
2486 				curr_dtd->next_dtd_virt;
2487 	}
2488 
2489 	if (retval)
2490 		return retval;
2491 
2492 	curr_req->req.actual = actual;
2493 
2494 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2495 	return 0;
2496 }
2497 
2498 
2499 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
ep0_req_complete(struct langwell_udc * dev,struct langwell_ep * ep0,struct langwell_request * req)2500 static void ep0_req_complete(struct langwell_udc *dev,
2501 		struct langwell_ep *ep0, struct langwell_request *req)
2502 {
2503 	u32	new_addr;
2504 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2505 
2506 	if (dev->usb_state == USB_STATE_ADDRESS) {
2507 		/* set the new address */
2508 		new_addr = (u32)dev->dev_addr;
2509 		writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
2510 
2511 		new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
2512 		dev_vdbg(&dev->pdev->dev, "new_addr = %d\n", new_addr);
2513 	}
2514 
2515 	done(ep0, req, 0);
2516 
2517 	switch (dev->ep0_state) {
2518 	case DATA_STATE_XMIT:
2519 		/* receive status phase */
2520 		if (prime_status_phase(dev, EP_DIR_OUT))
2521 			ep0_stall(dev);
2522 		break;
2523 	case DATA_STATE_RECV:
2524 		/* send status phase */
2525 		if (prime_status_phase(dev, EP_DIR_IN))
2526 			ep0_stall(dev);
2527 		break;
2528 	case WAIT_FOR_OUT_STATUS:
2529 		dev->ep0_state = WAIT_FOR_SETUP;
2530 		break;
2531 	case WAIT_FOR_SETUP:
2532 		dev_err(&dev->pdev->dev, "unexpect ep0 packets\n");
2533 		break;
2534 	default:
2535 		ep0_stall(dev);
2536 		break;
2537 	}
2538 
2539 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2540 }
2541 
2542 
2543 /* USB transfer completion interrupt */
handle_trans_complete(struct langwell_udc * dev)2544 static void handle_trans_complete(struct langwell_udc *dev)
2545 {
2546 	u32			complete_bits;
2547 	int			i, ep_num, dir, bit_mask, status;
2548 	struct langwell_ep	*epn;
2549 	struct langwell_request	*curr_req, *temp_req;
2550 
2551 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2552 
2553 	complete_bits = readl(&dev->op_regs->endptcomplete);
2554 	dev_vdbg(&dev->pdev->dev, "endptcomplete register: 0x%08x\n",
2555 			complete_bits);
2556 
2557 	/* Write-Clear the bits in endptcomplete register */
2558 	writel(complete_bits, &dev->op_regs->endptcomplete);
2559 
2560 	if (!complete_bits) {
2561 		dev_dbg(&dev->pdev->dev, "complete_bits = 0\n");
2562 		goto done;
2563 	}
2564 
2565 	for (i = 0; i < dev->ep_max; i++) {
2566 		ep_num = i / 2;
2567 		dir = i % 2;
2568 
2569 		bit_mask = 1 << (ep_num + 16 * dir);
2570 
2571 		if (!(complete_bits & bit_mask))
2572 			continue;
2573 
2574 		/* ep0 */
2575 		if (i == 1)
2576 			epn = &dev->ep[0];
2577 		else
2578 			epn = &dev->ep[i];
2579 
2580 		if (epn->name == NULL) {
2581 			dev_warn(&dev->pdev->dev, "invalid endpoint\n");
2582 			continue;
2583 		}
2584 
2585 		if (i < 2)
2586 			/* ep0 in and out */
2587 			dev_dbg(&dev->pdev->dev, "%s-%s transfer completed\n",
2588 					epn->name,
2589 					is_in(epn) ? "in" : "out");
2590 		else
2591 			dev_dbg(&dev->pdev->dev, "%s transfer completed\n",
2592 					epn->name);
2593 
2594 		/* process the req queue until an uncomplete request */
2595 		list_for_each_entry_safe(curr_req, temp_req,
2596 				&epn->queue, queue) {
2597 			status = process_ep_req(dev, i, curr_req);
2598 			dev_vdbg(&dev->pdev->dev, "%s req status: %d\n",
2599 					epn->name, status);
2600 
2601 			if (status)
2602 				break;
2603 
2604 			/* write back status to req */
2605 			curr_req->req.status = status;
2606 
2607 			/* ep0 request completion */
2608 			if (ep_num == 0) {
2609 				ep0_req_complete(dev, epn, curr_req);
2610 				break;
2611 			} else {
2612 				done(epn, curr_req, status);
2613 			}
2614 		}
2615 	}
2616 done:
2617 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2618 }
2619 
2620 /* port change detect interrupt handler */
handle_port_change(struct langwell_udc * dev)2621 static void handle_port_change(struct langwell_udc *dev)
2622 {
2623 	u32	portsc1, devlc;
2624 
2625 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2626 
2627 	if (dev->bus_reset)
2628 		dev->bus_reset = 0;
2629 
2630 	portsc1 = readl(&dev->op_regs->portsc1);
2631 	devlc = readl(&dev->op_regs->devlc);
2632 	dev_vdbg(&dev->pdev->dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
2633 			portsc1, devlc);
2634 
2635 	/* bus reset is finished */
2636 	if (!(portsc1 & PORTS_PR)) {
2637 		/* get the speed */
2638 		dev->gadget.speed = lpm_device_speed(devlc);
2639 		dev_vdbg(&dev->pdev->dev, "dev->gadget.speed = %d\n",
2640 			dev->gadget.speed);
2641 	}
2642 
2643 	/* LPM L0 to L1 */
2644 	if (dev->lpm && dev->lpm_state == LPM_L0)
2645 		if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
2646 			dev_info(&dev->pdev->dev, "LPM L0 to L1\n");
2647 			dev->lpm_state = LPM_L1;
2648 		}
2649 
2650 	/* LPM L1 to L0, force resume or remote wakeup finished */
2651 	if (dev->lpm && dev->lpm_state == LPM_L1)
2652 		if (!(portsc1 & PORTS_SUSP)) {
2653 			dev_info(&dev->pdev->dev, "LPM L1 to L0\n");
2654 			dev->lpm_state = LPM_L0;
2655 		}
2656 
2657 	/* update USB state */
2658 	if (!dev->resume_state)
2659 		dev->usb_state = USB_STATE_DEFAULT;
2660 
2661 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2662 }
2663 
2664 
2665 /* USB reset interrupt handler */
handle_usb_reset(struct langwell_udc * dev)2666 static void handle_usb_reset(struct langwell_udc *dev)
2667 {
2668 	u32		deviceaddr,
2669 			endptsetupstat,
2670 			endptcomplete;
2671 	unsigned long	timeout;
2672 
2673 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2674 
2675 	/* Write-Clear the device address */
2676 	deviceaddr = readl(&dev->op_regs->deviceaddr);
2677 	writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
2678 
2679 	dev->dev_addr = 0;
2680 
2681 	/* clear usb state */
2682 	dev->resume_state = 0;
2683 
2684 	/* LPM L1 to L0, reset */
2685 	if (dev->lpm)
2686 		dev->lpm_state = LPM_L0;
2687 
2688 	dev->ep0_dir = USB_DIR_OUT;
2689 	dev->ep0_state = WAIT_FOR_SETUP;
2690 
2691 	/* remote wakeup reset to 0 when the device is reset */
2692 	dev->remote_wakeup = 0;
2693 	dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
2694 	dev->gadget.b_hnp_enable = 0;
2695 	dev->gadget.a_hnp_support = 0;
2696 	dev->gadget.a_alt_hnp_support = 0;
2697 
2698 	/* Write-Clear all the setup token semaphores */
2699 	endptsetupstat = readl(&dev->op_regs->endptsetupstat);
2700 	writel(endptsetupstat, &dev->op_regs->endptsetupstat);
2701 
2702 	/* Write-Clear all the endpoint complete status bits */
2703 	endptcomplete = readl(&dev->op_regs->endptcomplete);
2704 	writel(endptcomplete, &dev->op_regs->endptcomplete);
2705 
2706 	/* wait until all endptprime bits cleared */
2707 	timeout = jiffies + PRIME_TIMEOUT;
2708 	while (readl(&dev->op_regs->endptprime)) {
2709 		if (time_after(jiffies, timeout)) {
2710 			dev_err(&dev->pdev->dev, "USB reset timeout\n");
2711 			break;
2712 		}
2713 		cpu_relax();
2714 	}
2715 
2716 	/* write 1s to endptflush register to clear any primed buffers */
2717 	writel((u32) ~0, &dev->op_regs->endptflush);
2718 
2719 	if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
2720 		dev_vdbg(&dev->pdev->dev, "USB bus reset\n");
2721 		/* bus is reseting */
2722 		dev->bus_reset = 1;
2723 
2724 		/* reset all the queues, stop all USB activities */
2725 		stop_activity(dev);
2726 		dev->usb_state = USB_STATE_DEFAULT;
2727 	} else {
2728 		dev_vdbg(&dev->pdev->dev, "device controller reset\n");
2729 		/* controller reset */
2730 		langwell_udc_reset(dev);
2731 
2732 		/* reset all the queues, stop all USB activities */
2733 		stop_activity(dev);
2734 
2735 		/* reset ep0 dQH and endptctrl */
2736 		ep0_reset(dev);
2737 
2738 		/* enable interrupt and set controller to run state */
2739 		langwell_udc_start(dev);
2740 
2741 		dev->usb_state = USB_STATE_ATTACHED;
2742 	}
2743 
2744 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2745 }
2746 
2747 
2748 /* USB bus suspend/resume interrupt */
handle_bus_suspend(struct langwell_udc * dev)2749 static void handle_bus_suspend(struct langwell_udc *dev)
2750 {
2751 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2752 
2753 	dev->resume_state = dev->usb_state;
2754 	dev->usb_state = USB_STATE_SUSPENDED;
2755 
2756 	/* report suspend to the driver */
2757 	if (dev->driver) {
2758 		if (dev->driver->suspend) {
2759 			spin_unlock(&dev->lock);
2760 			dev->driver->suspend(&dev->gadget);
2761 			spin_lock(&dev->lock);
2762 			dev_dbg(&dev->pdev->dev, "suspend %s\n",
2763 					dev->driver->driver.name);
2764 		}
2765 	}
2766 
2767 	/* enter PHY low power suspend */
2768 	if (dev->pdev->device != 0x0829)
2769 		langwell_phy_low_power(dev, 0);
2770 
2771 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2772 }
2773 
2774 
handle_bus_resume(struct langwell_udc * dev)2775 static void handle_bus_resume(struct langwell_udc *dev)
2776 {
2777 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2778 
2779 	dev->usb_state = dev->resume_state;
2780 	dev->resume_state = 0;
2781 
2782 	/* exit PHY low power suspend */
2783 	if (dev->pdev->device != 0x0829)
2784 		langwell_phy_low_power(dev, 0);
2785 
2786 	/* report resume to the driver */
2787 	if (dev->driver) {
2788 		if (dev->driver->resume) {
2789 			spin_unlock(&dev->lock);
2790 			dev->driver->resume(&dev->gadget);
2791 			spin_lock(&dev->lock);
2792 			dev_dbg(&dev->pdev->dev, "resume %s\n",
2793 					dev->driver->driver.name);
2794 		}
2795 	}
2796 
2797 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2798 }
2799 
2800 
2801 /* USB device controller interrupt handler */
langwell_irq(int irq,void * _dev)2802 static irqreturn_t langwell_irq(int irq, void *_dev)
2803 {
2804 	struct langwell_udc	*dev = _dev;
2805 	u32			usbsts,
2806 				usbintr,
2807 				irq_sts,
2808 				portsc1;
2809 
2810 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
2811 
2812 	if (dev->stopped) {
2813 		dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2814 		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2815 		return IRQ_NONE;
2816 	}
2817 
2818 	spin_lock(&dev->lock);
2819 
2820 	/* USB status */
2821 	usbsts = readl(&dev->op_regs->usbsts);
2822 
2823 	/* USB interrupt enable */
2824 	usbintr = readl(&dev->op_regs->usbintr);
2825 
2826 	irq_sts = usbsts & usbintr;
2827 	dev_vdbg(&dev->pdev->dev,
2828 			"usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
2829 			usbsts, usbintr, irq_sts);
2830 
2831 	if (!irq_sts) {
2832 		dev_vdbg(&dev->pdev->dev, "handle IRQ_NONE\n");
2833 		dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2834 		spin_unlock(&dev->lock);
2835 		return IRQ_NONE;
2836 	}
2837 
2838 	/* Write-Clear interrupt status bits */
2839 	writel(irq_sts, &dev->op_regs->usbsts);
2840 
2841 	/* resume from suspend */
2842 	portsc1 = readl(&dev->op_regs->portsc1);
2843 	if (dev->usb_state == USB_STATE_SUSPENDED)
2844 		if (!(portsc1 & PORTS_SUSP))
2845 			handle_bus_resume(dev);
2846 
2847 	/* USB interrupt */
2848 	if (irq_sts & STS_UI) {
2849 		dev_vdbg(&dev->pdev->dev, "USB interrupt\n");
2850 
2851 		/* setup packet received from ep0 */
2852 		if (readl(&dev->op_regs->endptsetupstat)
2853 				& EP0SETUPSTAT_MASK) {
2854 			dev_vdbg(&dev->pdev->dev,
2855 				"USB SETUP packet received interrupt\n");
2856 			/* setup tripwire semaphone */
2857 			setup_tripwire(dev);
2858 			handle_setup_packet(dev, &dev->local_setup_buff);
2859 		}
2860 
2861 		/* USB transfer completion */
2862 		if (readl(&dev->op_regs->endptcomplete)) {
2863 			dev_vdbg(&dev->pdev->dev,
2864 				"USB transfer completion interrupt\n");
2865 			handle_trans_complete(dev);
2866 		}
2867 	}
2868 
2869 	/* SOF received interrupt (for ISO transfer) */
2870 	if (irq_sts & STS_SRI) {
2871 		/* FIXME */
2872 		/* dev_vdbg(&dev->pdev->dev, "SOF received interrupt\n"); */
2873 	}
2874 
2875 	/* port change detect interrupt */
2876 	if (irq_sts & STS_PCI) {
2877 		dev_vdbg(&dev->pdev->dev, "port change detect interrupt\n");
2878 		handle_port_change(dev);
2879 	}
2880 
2881 	/* suspend interrupt */
2882 	if (irq_sts & STS_SLI) {
2883 		dev_vdbg(&dev->pdev->dev, "suspend interrupt\n");
2884 		handle_bus_suspend(dev);
2885 	}
2886 
2887 	/* USB reset interrupt */
2888 	if (irq_sts & STS_URI) {
2889 		dev_vdbg(&dev->pdev->dev, "USB reset interrupt\n");
2890 		handle_usb_reset(dev);
2891 	}
2892 
2893 	/* USB error or system error interrupt */
2894 	if (irq_sts & (STS_UEI | STS_SEI)) {
2895 		/* FIXME */
2896 		dev_warn(&dev->pdev->dev, "error IRQ, irq_sts: %x\n", irq_sts);
2897 	}
2898 
2899 	spin_unlock(&dev->lock);
2900 
2901 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2902 	return IRQ_HANDLED;
2903 }
2904 
2905 
2906 /*-------------------------------------------------------------------------*/
2907 
2908 /* release device structure */
gadget_release(struct device * _dev)2909 static void gadget_release(struct device *_dev)
2910 {
2911 	struct langwell_udc	*dev = dev_get_drvdata(_dev);
2912 
2913 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2914 
2915 	complete(dev->done);
2916 
2917 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2918 	kfree(dev);
2919 }
2920 
2921 
2922 /* enable SRAM caching if SRAM detected */
sram_init(struct langwell_udc * dev)2923 static void sram_init(struct langwell_udc *dev)
2924 {
2925 	struct pci_dev		*pdev = dev->pdev;
2926 
2927 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2928 
2929 	dev->sram_addr = pci_resource_start(pdev, 1);
2930 	dev->sram_size = pci_resource_len(pdev, 1);
2931 	dev_info(&dev->pdev->dev, "Found private SRAM at %x size:%x\n",
2932 			dev->sram_addr, dev->sram_size);
2933 	dev->got_sram = 1;
2934 
2935 	if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
2936 		dev_warn(&dev->pdev->dev, "SRAM request failed\n");
2937 		dev->got_sram = 0;
2938 	} else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
2939 			dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
2940 		dev_warn(&dev->pdev->dev, "SRAM DMA declare failed\n");
2941 		pci_release_region(pdev, 1);
2942 		dev->got_sram = 0;
2943 	}
2944 
2945 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2946 }
2947 
2948 
2949 /* release SRAM caching */
sram_deinit(struct langwell_udc * dev)2950 static void sram_deinit(struct langwell_udc *dev)
2951 {
2952 	struct pci_dev *pdev = dev->pdev;
2953 
2954 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2955 
2956 	dma_release_declared_memory(&pdev->dev);
2957 	pci_release_region(pdev, 1);
2958 
2959 	dev->got_sram = 0;
2960 
2961 	dev_info(&dev->pdev->dev, "release SRAM caching\n");
2962 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
2963 }
2964 
2965 
2966 /* tear down the binding between this driver and the pci device */
langwell_udc_remove(struct pci_dev * pdev)2967 static void langwell_udc_remove(struct pci_dev *pdev)
2968 {
2969 	struct langwell_udc	*dev = pci_get_drvdata(pdev);
2970 
2971 	DECLARE_COMPLETION(done);
2972 
2973 	BUG_ON(dev->driver);
2974 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
2975 
2976 	dev->done = &done;
2977 
2978 	/* free dTD dma_pool and dQH */
2979 	if (dev->dtd_pool)
2980 		dma_pool_destroy(dev->dtd_pool);
2981 
2982 	if (dev->ep_dqh)
2983 		dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
2984 			dev->ep_dqh, dev->ep_dqh_dma);
2985 
2986 	/* release SRAM caching */
2987 	if (dev->has_sram && dev->got_sram)
2988 		sram_deinit(dev);
2989 
2990 	if (dev->status_req) {
2991 		kfree(dev->status_req->req.buf);
2992 		kfree(dev->status_req);
2993 	}
2994 
2995 	kfree(dev->ep);
2996 
2997 	/* disable IRQ handler */
2998 	if (dev->got_irq)
2999 		free_irq(pdev->irq, dev);
3000 
3001 	if (dev->cap_regs)
3002 		iounmap(dev->cap_regs);
3003 
3004 	if (dev->region)
3005 		release_mem_region(pci_resource_start(pdev, 0),
3006 				pci_resource_len(pdev, 0));
3007 
3008 	if (dev->enabled)
3009 		pci_disable_device(pdev);
3010 
3011 	dev->cap_regs = NULL;
3012 
3013 	dev_info(&dev->pdev->dev, "unbind\n");
3014 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3015 
3016 	device_unregister(&dev->gadget.dev);
3017 	device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3018 	device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
3019 
3020 	pci_set_drvdata(pdev, NULL);
3021 
3022 	/* free dev, wait for the release() finished */
3023 	wait_for_completion(&done);
3024 }
3025 
3026 
3027 /*
3028  * wrap this driver around the specified device, but
3029  * don't respond over USB until a gadget driver binds to us.
3030  */
langwell_udc_probe(struct pci_dev * pdev,const struct pci_device_id * id)3031 static int langwell_udc_probe(struct pci_dev *pdev,
3032 		const struct pci_device_id *id)
3033 {
3034 	struct langwell_udc	*dev;
3035 	unsigned long		resource, len;
3036 	void			__iomem *base = NULL;
3037 	size_t			size;
3038 	int			retval;
3039 
3040 	/* alloc, and start init */
3041 	dev = kzalloc(sizeof *dev, GFP_KERNEL);
3042 	if (dev == NULL) {
3043 		retval = -ENOMEM;
3044 		goto error;
3045 	}
3046 
3047 	/* initialize device spinlock */
3048 	spin_lock_init(&dev->lock);
3049 
3050 	dev->pdev = pdev;
3051 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3052 
3053 	pci_set_drvdata(pdev, dev);
3054 
3055 	/* now all the pci goodies ... */
3056 	if (pci_enable_device(pdev) < 0) {
3057 		retval = -ENODEV;
3058 		goto error;
3059 	}
3060 	dev->enabled = 1;
3061 
3062 	/* control register: BAR 0 */
3063 	resource = pci_resource_start(pdev, 0);
3064 	len = pci_resource_len(pdev, 0);
3065 	if (!request_mem_region(resource, len, driver_name)) {
3066 		dev_err(&dev->pdev->dev, "controller already in use\n");
3067 		retval = -EBUSY;
3068 		goto error;
3069 	}
3070 	dev->region = 1;
3071 
3072 	base = ioremap_nocache(resource, len);
3073 	if (base == NULL) {
3074 		dev_err(&dev->pdev->dev, "can't map memory\n");
3075 		retval = -EFAULT;
3076 		goto error;
3077 	}
3078 
3079 	dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
3080 	dev_vdbg(&dev->pdev->dev, "dev->cap_regs: %p\n", dev->cap_regs);
3081 	dev->op_regs = (struct langwell_op_regs __iomem *)
3082 		(base + OP_REG_OFFSET);
3083 	dev_vdbg(&dev->pdev->dev, "dev->op_regs: %p\n", dev->op_regs);
3084 
3085 	/* irq setup after old hardware is cleaned up */
3086 	if (!pdev->irq) {
3087 		dev_err(&dev->pdev->dev, "No IRQ. Check PCI setup!\n");
3088 		retval = -ENODEV;
3089 		goto error;
3090 	}
3091 
3092 	dev->has_sram = 1;
3093 	dev->got_sram = 0;
3094 	dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
3095 
3096 	/* enable SRAM caching if detected */
3097 	if (dev->has_sram && !dev->got_sram)
3098 		sram_init(dev);
3099 
3100 	dev_info(&dev->pdev->dev,
3101 			"irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
3102 			pdev->irq, resource, len, base);
3103 	/* enables bus-mastering for device dev */
3104 	pci_set_master(pdev);
3105 
3106 	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3107 				driver_name, dev) != 0) {
3108 		dev_err(&dev->pdev->dev,
3109 				"request interrupt %d failed\n", pdev->irq);
3110 		retval = -EBUSY;
3111 		goto error;
3112 	}
3113 	dev->got_irq = 1;
3114 
3115 	/* set stopped bit */
3116 	dev->stopped = 1;
3117 
3118 	/* capabilities and endpoint number */
3119 	dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
3120 	dev->dciversion = readw(&dev->cap_regs->dciversion);
3121 	dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
3122 	dev_vdbg(&dev->pdev->dev, "dev->lpm: %d\n", dev->lpm);
3123 	dev_vdbg(&dev->pdev->dev, "dev->dciversion: 0x%04x\n",
3124 			dev->dciversion);
3125 	dev_vdbg(&dev->pdev->dev, "dccparams: 0x%08x\n",
3126 			readl(&dev->cap_regs->dccparams));
3127 	dev_vdbg(&dev->pdev->dev, "dev->devcap: %d\n", dev->devcap);
3128 	if (!dev->devcap) {
3129 		dev_err(&dev->pdev->dev, "can't support device mode\n");
3130 		retval = -ENODEV;
3131 		goto error;
3132 	}
3133 
3134 	/* a pair of endpoints (out/in) for each address */
3135 	dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
3136 	dev_vdbg(&dev->pdev->dev, "dev->ep_max: %d\n", dev->ep_max);
3137 
3138 	/* allocate endpoints memory */
3139 	dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
3140 			GFP_KERNEL);
3141 	if (!dev->ep) {
3142 		dev_err(&dev->pdev->dev, "allocate endpoints memory failed\n");
3143 		retval = -ENOMEM;
3144 		goto error;
3145 	}
3146 
3147 	/* allocate device dQH memory */
3148 	size = dev->ep_max * sizeof(struct langwell_dqh);
3149 	dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3150 	if (size < DQH_ALIGNMENT)
3151 		size = DQH_ALIGNMENT;
3152 	else if ((size % DQH_ALIGNMENT) != 0) {
3153 		size += DQH_ALIGNMENT + 1;
3154 		size &= ~(DQH_ALIGNMENT - 1);
3155 	}
3156 	dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3157 					&dev->ep_dqh_dma, GFP_KERNEL);
3158 	if (!dev->ep_dqh) {
3159 		dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3160 		retval = -ENOMEM;
3161 		goto error;
3162 	}
3163 	dev->ep_dqh_size = size;
3164 	dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3165 
3166 	/* initialize ep0 status request structure */
3167 	dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
3168 	if (!dev->status_req) {
3169 		dev_err(&dev->pdev->dev,
3170 				"allocate status_req memory failed\n");
3171 		retval = -ENOMEM;
3172 		goto error;
3173 	}
3174 	INIT_LIST_HEAD(&dev->status_req->queue);
3175 
3176 	/* allocate a small amount of memory to get valid address */
3177 	dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
3178 	dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
3179 
3180 	dev->resume_state = USB_STATE_NOTATTACHED;
3181 	dev->usb_state = USB_STATE_POWERED;
3182 	dev->ep0_dir = USB_DIR_OUT;
3183 
3184 	/* remote wakeup reset to 0 when the device is reset */
3185 	dev->remote_wakeup = 0;
3186 	dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
3187 
3188 	/* reset device controller */
3189 	langwell_udc_reset(dev);
3190 
3191 	/* initialize gadget structure */
3192 	dev->gadget.ops = &langwell_ops;	/* usb_gadget_ops */
3193 	dev->gadget.ep0 = &dev->ep[0].ep;	/* gadget ep0 */
3194 	INIT_LIST_HEAD(&dev->gadget.ep_list);	/* ep_list */
3195 	dev->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
3196 	dev->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
3197 
3198 	/* the "gadget" abstracts/virtualizes the controller */
3199 	dev_set_name(&dev->gadget.dev, "gadget");
3200 	dev->gadget.dev.parent = &pdev->dev;
3201 	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3202 	dev->gadget.dev.release = gadget_release;
3203 	dev->gadget.name = driver_name;		/* gadget name */
3204 
3205 	/* controller endpoints reinit */
3206 	eps_reinit(dev);
3207 
3208 	/* reset ep0 dQH and endptctrl */
3209 	ep0_reset(dev);
3210 
3211 	/* create dTD dma_pool resource */
3212 	dev->dtd_pool = dma_pool_create("langwell_dtd",
3213 			&dev->pdev->dev,
3214 			sizeof(struct langwell_dtd),
3215 			DTD_ALIGNMENT,
3216 			DMA_BOUNDARY);
3217 
3218 	if (!dev->dtd_pool) {
3219 		retval = -ENOMEM;
3220 		goto error;
3221 	}
3222 
3223 	/* done */
3224 	dev_info(&dev->pdev->dev, "%s\n", driver_desc);
3225 	dev_info(&dev->pdev->dev, "irq %d, pci mem %p\n", pdev->irq, base);
3226 	dev_info(&dev->pdev->dev, "Driver version: " DRIVER_VERSION "\n");
3227 	dev_info(&dev->pdev->dev, "Support (max) %d endpoints\n", dev->ep_max);
3228 	dev_info(&dev->pdev->dev, "Device interface version: 0x%04x\n",
3229 			dev->dciversion);
3230 	dev_info(&dev->pdev->dev, "Controller mode: %s\n",
3231 			dev->devcap ? "Device" : "Host");
3232 	dev_info(&dev->pdev->dev, "Support USB LPM: %s\n",
3233 			dev->lpm ? "Yes" : "No");
3234 
3235 	dev_vdbg(&dev->pdev->dev,
3236 			"After langwell_udc_probe(), print all registers:\n");
3237 	print_all_registers(dev);
3238 
3239 	retval = device_register(&dev->gadget.dev);
3240 	if (retval)
3241 		goto error;
3242 
3243 	retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3244 	if (retval)
3245 		goto error;
3246 
3247 	retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
3248 	if (retval)
3249 		goto error;
3250 
3251 	retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
3252 	if (retval)
3253 		goto error_attr1;
3254 
3255 	dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3256 	return 0;
3257 
3258 error_attr1:
3259 	device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
3260 error:
3261 	if (dev) {
3262 		dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3263 		langwell_udc_remove(pdev);
3264 	}
3265 
3266 	return retval;
3267 }
3268 
3269 
3270 /* device controller suspend */
langwell_udc_suspend(struct pci_dev * pdev,pm_message_t state)3271 static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3272 {
3273 	struct langwell_udc	*dev = pci_get_drvdata(pdev);
3274 
3275 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3276 
3277 	usb_del_gadget_udc(&dev->gadget);
3278 	/* disable interrupt and set controller to stop state */
3279 	langwell_udc_stop(dev);
3280 
3281 	/* disable IRQ handler */
3282 	if (dev->got_irq)
3283 		free_irq(pdev->irq, dev);
3284 	dev->got_irq = 0;
3285 
3286 	/* save PCI state */
3287 	pci_save_state(pdev);
3288 
3289 	spin_lock_irq(&dev->lock);
3290 	/* stop all usb activities */
3291 	stop_activity(dev);
3292 	spin_unlock_irq(&dev->lock);
3293 
3294 	/* free dTD dma_pool and dQH */
3295 	if (dev->dtd_pool)
3296 		dma_pool_destroy(dev->dtd_pool);
3297 
3298 	if (dev->ep_dqh)
3299 		dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
3300 			dev->ep_dqh, dev->ep_dqh_dma);
3301 
3302 	/* release SRAM caching */
3303 	if (dev->has_sram && dev->got_sram)
3304 		sram_deinit(dev);
3305 
3306 	/* set device power state */
3307 	pci_set_power_state(pdev, PCI_D3hot);
3308 
3309 	/* enter PHY low power suspend */
3310 	if (dev->pdev->device != 0x0829)
3311 		langwell_phy_low_power(dev, 1);
3312 
3313 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3314 	return 0;
3315 }
3316 
3317 
3318 /* device controller resume */
langwell_udc_resume(struct pci_dev * pdev)3319 static int langwell_udc_resume(struct pci_dev *pdev)
3320 {
3321 	struct langwell_udc	*dev = pci_get_drvdata(pdev);
3322 	size_t			size;
3323 
3324 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3325 
3326 	/* exit PHY low power suspend */
3327 	if (dev->pdev->device != 0x0829)
3328 		langwell_phy_low_power(dev, 0);
3329 
3330 	/* set device D0 power state */
3331 	pci_set_power_state(pdev, PCI_D0);
3332 
3333 	/* enable SRAM caching if detected */
3334 	if (dev->has_sram && !dev->got_sram)
3335 		sram_init(dev);
3336 
3337 	/* allocate device dQH memory */
3338 	size = dev->ep_max * sizeof(struct langwell_dqh);
3339 	dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
3340 	if (size < DQH_ALIGNMENT)
3341 		size = DQH_ALIGNMENT;
3342 	else if ((size % DQH_ALIGNMENT) != 0) {
3343 		size += DQH_ALIGNMENT + 1;
3344 		size &= ~(DQH_ALIGNMENT - 1);
3345 	}
3346 	dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3347 					&dev->ep_dqh_dma, GFP_KERNEL);
3348 	if (!dev->ep_dqh) {
3349 		dev_err(&dev->pdev->dev, "allocate dQH memory failed\n");
3350 		return -ENOMEM;
3351 	}
3352 	dev->ep_dqh_size = size;
3353 	dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
3354 
3355 	/* create dTD dma_pool resource */
3356 	dev->dtd_pool = dma_pool_create("langwell_dtd",
3357 			&dev->pdev->dev,
3358 			sizeof(struct langwell_dtd),
3359 			DTD_ALIGNMENT,
3360 			DMA_BOUNDARY);
3361 
3362 	if (!dev->dtd_pool)
3363 		return -ENOMEM;
3364 
3365 	/* restore PCI state */
3366 	pci_restore_state(pdev);
3367 
3368 	/* enable IRQ handler */
3369 	if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3370 				driver_name, dev) != 0) {
3371 		dev_err(&dev->pdev->dev, "request interrupt %d failed\n",
3372 				pdev->irq);
3373 		return -EBUSY;
3374 	}
3375 	dev->got_irq = 1;
3376 
3377 	/* reset and start controller to run state */
3378 	if (dev->stopped) {
3379 		/* reset device controller */
3380 		langwell_udc_reset(dev);
3381 
3382 		/* reset ep0 dQH and endptctrl */
3383 		ep0_reset(dev);
3384 
3385 		/* start device if gadget is loaded */
3386 		if (dev->driver)
3387 			langwell_udc_start(dev);
3388 	}
3389 
3390 	/* reset USB status */
3391 	dev->usb_state = USB_STATE_ATTACHED;
3392 	dev->ep0_state = WAIT_FOR_SETUP;
3393 	dev->ep0_dir = USB_DIR_OUT;
3394 
3395 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3396 	return 0;
3397 }
3398 
3399 
3400 /* pci driver shutdown */
langwell_udc_shutdown(struct pci_dev * pdev)3401 static void langwell_udc_shutdown(struct pci_dev *pdev)
3402 {
3403 	struct langwell_udc	*dev = pci_get_drvdata(pdev);
3404 	u32			usbmode;
3405 
3406 	dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
3407 
3408 	/* reset controller mode to IDLE */
3409 	usbmode = readl(&dev->op_regs->usbmode);
3410 	dev_dbg(&dev->pdev->dev, "usbmode = 0x%08x\n", usbmode);
3411 	usbmode &= (~3 | MODE_IDLE);
3412 	writel(usbmode, &dev->op_regs->usbmode);
3413 
3414 	dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
3415 }
3416 
3417 /*-------------------------------------------------------------------------*/
3418 
3419 static const struct pci_device_id pci_ids[] = { {
3420 	.class =	((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3421 	.class_mask =	~0,
3422 	.vendor =	0x8086,
3423 	.device =	0x0811,
3424 	.subvendor =	PCI_ANY_ID,
3425 	.subdevice =	PCI_ANY_ID,
3426 }, { /* end: all zeroes */ }
3427 };
3428 
3429 MODULE_DEVICE_TABLE(pci, pci_ids);
3430 
3431 
3432 static struct pci_driver langwell_pci_driver = {
3433 	.name =		(char *) driver_name,
3434 	.id_table =	pci_ids,
3435 
3436 	.probe =	langwell_udc_probe,
3437 	.remove =	langwell_udc_remove,
3438 
3439 	/* device controller suspend/resume */
3440 	.suspend =	langwell_udc_suspend,
3441 	.resume =	langwell_udc_resume,
3442 
3443 	.shutdown =	langwell_udc_shutdown,
3444 };
3445 
3446 
init(void)3447 static int __init init(void)
3448 {
3449 	return pci_register_driver(&langwell_pci_driver);
3450 }
3451 module_init(init);
3452 
3453 
cleanup(void)3454 static void __exit cleanup(void)
3455 {
3456 	pci_unregister_driver(&langwell_pci_driver);
3457 }
3458 module_exit(cleanup);
3459 
3460 
3461 MODULE_DESCRIPTION(DRIVER_DESC);
3462 MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3463 MODULE_VERSION(DRIVER_VERSION);
3464 MODULE_LICENSE("GPL");
3465 
3466