1 /*
2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5  * Author: Thomas Dahlmann
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 /*
14  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17  *
18  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20  * by BIOS init).
21  *
22  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24  * can be used with gadget ether.
25  */
26 
27 /* debug control */
28 /* #define UDC_VERBOSE */
29 
30 /* Driver strings */
31 #define UDC_MOD_DESCRIPTION		"AMD 5536 UDC - USB Device Controller"
32 #define UDC_DRIVER_VERSION_STRING	"01.00.0206 - $Revision: #3 $"
33 
34 /* system */
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/ioport.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/errno.h>
43 #include <linux/init.h>
44 #include <linux/timer.h>
45 #include <linux/list.h>
46 #include <linux/interrupt.h>
47 #include <linux/ioctl.h>
48 #include <linux/fs.h>
49 #include <linux/dmapool.h>
50 #include <linux/moduleparam.h>
51 #include <linux/device.h>
52 #include <linux/io.h>
53 #include <linux/irq.h>
54 #include <linux/prefetch.h>
55 
56 #include <asm/byteorder.h>
57 #include <asm/system.h>
58 #include <asm/unaligned.h>
59 
60 /* gadget stack */
61 #include <linux/usb/ch9.h>
62 #include <linux/usb/gadget.h>
63 
64 /* udc specific */
65 #include "amd5536udc.h"
66 
67 
68 static void udc_tasklet_disconnect(unsigned long);
69 static void empty_req_queue(struct udc_ep *);
70 static int udc_probe(struct udc *dev);
71 static void udc_basic_init(struct udc *dev);
72 static void udc_setup_endpoints(struct udc *dev);
73 static void udc_soft_reset(struct udc *dev);
74 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
75 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
76 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
77 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
78 				unsigned long buf_len, gfp_t gfp_flags);
79 static int udc_remote_wakeup(struct udc *dev);
80 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
81 static void udc_pci_remove(struct pci_dev *pdev);
82 
83 /* description */
84 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
85 static const char name[] = "amd5536udc";
86 
87 /* structure to hold endpoint function pointers */
88 static const struct usb_ep_ops udc_ep_ops;
89 
90 /* received setup data */
91 static union udc_setup_data setup_data;
92 
93 /* pointer to device object */
94 static struct udc *udc;
95 
96 /* irq spin lock for soft reset */
97 static DEFINE_SPINLOCK(udc_irq_spinlock);
98 /* stall spin lock */
99 static DEFINE_SPINLOCK(udc_stall_spinlock);
100 
101 /*
102 * slave mode: pending bytes in rx fifo after nyet,
103 * used if EPIN irq came but no req was available
104 */
105 static unsigned int udc_rxfifo_pending;
106 
107 /* count soft resets after suspend to avoid loop */
108 static int soft_reset_occured;
109 static int soft_reset_after_usbreset_occured;
110 
111 /* timer */
112 static struct timer_list udc_timer;
113 static int stop_timer;
114 
115 /* set_rde -- Is used to control enabling of RX DMA. Problem is
116  * that UDC has only one bit (RDE) to enable/disable RX DMA for
117  * all OUT endpoints. So we have to handle race conditions like
118  * when OUT data reaches the fifo but no request was queued yet.
119  * This cannot be solved by letting the RX DMA disabled until a
120  * request gets queued because there may be other OUT packets
121  * in the FIFO (important for not blocking control traffic).
122  * The value of set_rde controls the correspondig timer.
123  *
124  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
125  * set_rde  0 == do not touch RDE, do no start the RDE timer
126  * set_rde  1 == timer function will look whether FIFO has data
127  * set_rde  2 == set by timer function to enable RX DMA on next call
128  */
129 static int set_rde = -1;
130 
131 static DECLARE_COMPLETION(on_exit);
132 static struct timer_list udc_pollstall_timer;
133 static int stop_pollstall_timer;
134 static DECLARE_COMPLETION(on_pollstall_exit);
135 
136 /* tasklet for usb disconnect */
137 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
138 		(unsigned long) &udc);
139 
140 
141 /* endpoint names used for print */
142 static const char ep0_string[] = "ep0in";
143 static const char *ep_string[] = {
144 	ep0_string,
145 	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
146 	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
147 	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
148 	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
149 	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
150 	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
151 	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
152 };
153 
154 /* DMA usage flag */
155 static bool use_dma = 1;
156 /* packet per buffer dma */
157 static bool use_dma_ppb = 1;
158 /* with per descr. update */
159 static bool use_dma_ppb_du;
160 /* buffer fill mode */
161 static int use_dma_bufferfill_mode;
162 /* full speed only mode */
163 static bool use_fullspeed;
164 /* tx buffer size for high speed */
165 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
166 
167 /* module parameters */
168 module_param(use_dma, bool, S_IRUGO);
169 MODULE_PARM_DESC(use_dma, "true for DMA");
170 module_param(use_dma_ppb, bool, S_IRUGO);
171 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
172 module_param(use_dma_ppb_du, bool, S_IRUGO);
173 MODULE_PARM_DESC(use_dma_ppb_du,
174 	"true for DMA in packet per buffer mode with descriptor update");
175 module_param(use_fullspeed, bool, S_IRUGO);
176 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
177 
178 /*---------------------------------------------------------------------------*/
179 /* Prints UDC device registers and endpoint irq registers */
print_regs(struct udc * dev)180 static void print_regs(struct udc *dev)
181 {
182 	DBG(dev, "------- Device registers -------\n");
183 	DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
184 	DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
185 	DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
186 	DBG(dev, "\n");
187 	DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
188 	DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
189 	DBG(dev, "\n");
190 	DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
191 	DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
192 	DBG(dev, "\n");
193 	DBG(dev, "USE DMA        = %d\n", use_dma);
194 	if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
195 		DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
196 			"WITHOUT desc. update)\n");
197 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
198 	} else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
199 		DBG(dev, "DMA mode       = PPBDU (packet per buffer "
200 			"WITH desc. update)\n");
201 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
202 	}
203 	if (use_dma && use_dma_bufferfill_mode) {
204 		DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
205 		dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
206 	}
207 	if (!use_dma) {
208 		dev_info(&dev->pdev->dev, "FIFO mode\n");
209 	}
210 	DBG(dev, "-------------------------------------------------------\n");
211 }
212 
213 /* Masks unused interrupts */
udc_mask_unused_interrupts(struct udc * dev)214 static int udc_mask_unused_interrupts(struct udc *dev)
215 {
216 	u32 tmp;
217 
218 	/* mask all dev interrupts */
219 	tmp =	AMD_BIT(UDC_DEVINT_SVC) |
220 		AMD_BIT(UDC_DEVINT_ENUM) |
221 		AMD_BIT(UDC_DEVINT_US) |
222 		AMD_BIT(UDC_DEVINT_UR) |
223 		AMD_BIT(UDC_DEVINT_ES) |
224 		AMD_BIT(UDC_DEVINT_SI) |
225 		AMD_BIT(UDC_DEVINT_SOF)|
226 		AMD_BIT(UDC_DEVINT_SC);
227 	writel(tmp, &dev->regs->irqmsk);
228 
229 	/* mask all ep interrupts */
230 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
231 
232 	return 0;
233 }
234 
235 /* Enables endpoint 0 interrupts */
udc_enable_ep0_interrupts(struct udc * dev)236 static int udc_enable_ep0_interrupts(struct udc *dev)
237 {
238 	u32 tmp;
239 
240 	DBG(dev, "udc_enable_ep0_interrupts()\n");
241 
242 	/* read irq mask */
243 	tmp = readl(&dev->regs->ep_irqmsk);
244 	/* enable ep0 irq's */
245 	tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
246 		& AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
247 	writel(tmp, &dev->regs->ep_irqmsk);
248 
249 	return 0;
250 }
251 
252 /* Enables device interrupts for SET_INTF and SET_CONFIG */
udc_enable_dev_setup_interrupts(struct udc * dev)253 static int udc_enable_dev_setup_interrupts(struct udc *dev)
254 {
255 	u32 tmp;
256 
257 	DBG(dev, "enable device interrupts for setup data\n");
258 
259 	/* read irq mask */
260 	tmp = readl(&dev->regs->irqmsk);
261 
262 	/* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
263 	tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
264 		& AMD_UNMASK_BIT(UDC_DEVINT_SC)
265 		& AMD_UNMASK_BIT(UDC_DEVINT_UR)
266 		& AMD_UNMASK_BIT(UDC_DEVINT_SVC)
267 		& AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
268 	writel(tmp, &dev->regs->irqmsk);
269 
270 	return 0;
271 }
272 
273 /* Calculates fifo start of endpoint based on preceding endpoints */
udc_set_txfifo_addr(struct udc_ep * ep)274 static int udc_set_txfifo_addr(struct udc_ep *ep)
275 {
276 	struct udc	*dev;
277 	u32 tmp;
278 	int i;
279 
280 	if (!ep || !(ep->in))
281 		return -EINVAL;
282 
283 	dev = ep->dev;
284 	ep->txfifo = dev->txfifo;
285 
286 	/* traverse ep's */
287 	for (i = 0; i < ep->num; i++) {
288 		if (dev->ep[i].regs) {
289 			/* read fifo size */
290 			tmp = readl(&dev->ep[i].regs->bufin_framenum);
291 			tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
292 			ep->txfifo += tmp;
293 		}
294 	}
295 	return 0;
296 }
297 
298 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
299 static u32 cnak_pending;
300 
UDC_QUEUE_CNAK(struct udc_ep * ep,unsigned num)301 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
302 {
303 	if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
304 		DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
305 		cnak_pending |= 1 << (num);
306 		ep->naking = 1;
307 	} else
308 		cnak_pending = cnak_pending & (~(1 << (num)));
309 }
310 
311 
312 /* Enables endpoint, is called by gadget driver */
313 static int
udc_ep_enable(struct usb_ep * usbep,const struct usb_endpoint_descriptor * desc)314 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
315 {
316 	struct udc_ep		*ep;
317 	struct udc		*dev;
318 	u32			tmp;
319 	unsigned long		iflags;
320 	u8 udc_csr_epix;
321 	unsigned		maxpacket;
322 
323 	if (!usbep
324 			|| usbep->name == ep0_string
325 			|| !desc
326 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
327 		return -EINVAL;
328 
329 	ep = container_of(usbep, struct udc_ep, ep);
330 	dev = ep->dev;
331 
332 	DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
333 
334 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
335 		return -ESHUTDOWN;
336 
337 	spin_lock_irqsave(&dev->lock, iflags);
338 	ep->desc = desc;
339 
340 	ep->halted = 0;
341 
342 	/* set traffic type */
343 	tmp = readl(&dev->ep[ep->num].regs->ctl);
344 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
345 	writel(tmp, &dev->ep[ep->num].regs->ctl);
346 
347 	/* set max packet size */
348 	maxpacket = usb_endpoint_maxp(desc);
349 	tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
350 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
351 	ep->ep.maxpacket = maxpacket;
352 	writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
353 
354 	/* IN ep */
355 	if (ep->in) {
356 
357 		/* ep ix in UDC CSR register space */
358 		udc_csr_epix = ep->num;
359 
360 		/* set buffer size (tx fifo entries) */
361 		tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
362 		/* double buffering: fifo size = 2 x max packet size */
363 		tmp = AMD_ADDBITS(
364 				tmp,
365 				maxpacket * UDC_EPIN_BUFF_SIZE_MULT
366 					  / UDC_DWORD_BYTES,
367 				UDC_EPIN_BUFF_SIZE);
368 		writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
369 
370 		/* calc. tx fifo base addr */
371 		udc_set_txfifo_addr(ep);
372 
373 		/* flush fifo */
374 		tmp = readl(&ep->regs->ctl);
375 		tmp |= AMD_BIT(UDC_EPCTL_F);
376 		writel(tmp, &ep->regs->ctl);
377 
378 	/* OUT ep */
379 	} else {
380 		/* ep ix in UDC CSR register space */
381 		udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
382 
383 		/* set max packet size UDC CSR	*/
384 		tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
385 		tmp = AMD_ADDBITS(tmp, maxpacket,
386 					UDC_CSR_NE_MAX_PKT);
387 		writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
388 
389 		if (use_dma && !ep->in) {
390 			/* alloc and init BNA dummy request */
391 			ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
392 			ep->bna_occurred = 0;
393 		}
394 
395 		if (ep->num != UDC_EP0OUT_IX)
396 			dev->data_ep_enabled = 1;
397 	}
398 
399 	/* set ep values */
400 	tmp = readl(&dev->csr->ne[udc_csr_epix]);
401 	/* max packet */
402 	tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
403 	/* ep number */
404 	tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
405 	/* ep direction */
406 	tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
407 	/* ep type */
408 	tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
409 	/* ep config */
410 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
411 	/* ep interface */
412 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
413 	/* ep alt */
414 	tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
415 	/* write reg */
416 	writel(tmp, &dev->csr->ne[udc_csr_epix]);
417 
418 	/* enable ep irq */
419 	tmp = readl(&dev->regs->ep_irqmsk);
420 	tmp &= AMD_UNMASK_BIT(ep->num);
421 	writel(tmp, &dev->regs->ep_irqmsk);
422 
423 	/*
424 	 * clear NAK by writing CNAK
425 	 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
426 	 */
427 	if (!use_dma || ep->in) {
428 		tmp = readl(&ep->regs->ctl);
429 		tmp |= AMD_BIT(UDC_EPCTL_CNAK);
430 		writel(tmp, &ep->regs->ctl);
431 		ep->naking = 0;
432 		UDC_QUEUE_CNAK(ep, ep->num);
433 	}
434 	tmp = desc->bEndpointAddress;
435 	DBG(dev, "%s enabled\n", usbep->name);
436 
437 	spin_unlock_irqrestore(&dev->lock, iflags);
438 	return 0;
439 }
440 
441 /* Resets endpoint */
ep_init(struct udc_regs __iomem * regs,struct udc_ep * ep)442 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
443 {
444 	u32		tmp;
445 
446 	VDBG(ep->dev, "ep-%d reset\n", ep->num);
447 	ep->desc = NULL;
448 	ep->ep.ops = &udc_ep_ops;
449 	INIT_LIST_HEAD(&ep->queue);
450 
451 	ep->ep.maxpacket = (u16) ~0;
452 	/* set NAK */
453 	tmp = readl(&ep->regs->ctl);
454 	tmp |= AMD_BIT(UDC_EPCTL_SNAK);
455 	writel(tmp, &ep->regs->ctl);
456 	ep->naking = 1;
457 
458 	/* disable interrupt */
459 	tmp = readl(&regs->ep_irqmsk);
460 	tmp |= AMD_BIT(ep->num);
461 	writel(tmp, &regs->ep_irqmsk);
462 
463 	if (ep->in) {
464 		/* unset P and IN bit of potential former DMA */
465 		tmp = readl(&ep->regs->ctl);
466 		tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
467 		writel(tmp, &ep->regs->ctl);
468 
469 		tmp = readl(&ep->regs->sts);
470 		tmp |= AMD_BIT(UDC_EPSTS_IN);
471 		writel(tmp, &ep->regs->sts);
472 
473 		/* flush the fifo */
474 		tmp = readl(&ep->regs->ctl);
475 		tmp |= AMD_BIT(UDC_EPCTL_F);
476 		writel(tmp, &ep->regs->ctl);
477 
478 	}
479 	/* reset desc pointer */
480 	writel(0, &ep->regs->desptr);
481 }
482 
483 /* Disables endpoint, is called by gadget driver */
udc_ep_disable(struct usb_ep * usbep)484 static int udc_ep_disable(struct usb_ep *usbep)
485 {
486 	struct udc_ep	*ep = NULL;
487 	unsigned long	iflags;
488 
489 	if (!usbep)
490 		return -EINVAL;
491 
492 	ep = container_of(usbep, struct udc_ep, ep);
493 	if (usbep->name == ep0_string || !ep->desc)
494 		return -EINVAL;
495 
496 	DBG(ep->dev, "Disable ep-%d\n", ep->num);
497 
498 	spin_lock_irqsave(&ep->dev->lock, iflags);
499 	udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
500 	empty_req_queue(ep);
501 	ep_init(ep->dev->regs, ep);
502 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
503 
504 	return 0;
505 }
506 
507 /* Allocates request packet, called by gadget driver */
508 static struct usb_request *
udc_alloc_request(struct usb_ep * usbep,gfp_t gfp)509 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
510 {
511 	struct udc_request	*req;
512 	struct udc_data_dma	*dma_desc;
513 	struct udc_ep	*ep;
514 
515 	if (!usbep)
516 		return NULL;
517 
518 	ep = container_of(usbep, struct udc_ep, ep);
519 
520 	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
521 	req = kzalloc(sizeof(struct udc_request), gfp);
522 	if (!req)
523 		return NULL;
524 
525 	req->req.dma = DMA_DONT_USE;
526 	INIT_LIST_HEAD(&req->queue);
527 
528 	if (ep->dma) {
529 		/* ep0 in requests are allocated from data pool here */
530 		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
531 						&req->td_phys);
532 		if (!dma_desc) {
533 			kfree(req);
534 			return NULL;
535 		}
536 
537 		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
538 				"td_phys = %lx\n",
539 				req, dma_desc,
540 				(unsigned long)req->td_phys);
541 		/* prevent from using desc. - set HOST BUSY */
542 		dma_desc->status = AMD_ADDBITS(dma_desc->status,
543 						UDC_DMA_STP_STS_BS_HOST_BUSY,
544 						UDC_DMA_STP_STS_BS);
545 		dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
546 		req->td_data = dma_desc;
547 		req->td_data_last = NULL;
548 		req->chain_len = 1;
549 	}
550 
551 	return &req->req;
552 }
553 
554 /* Frees request packet, called by gadget driver */
555 static void
udc_free_request(struct usb_ep * usbep,struct usb_request * usbreq)556 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
557 {
558 	struct udc_ep	*ep;
559 	struct udc_request	*req;
560 
561 	if (!usbep || !usbreq)
562 		return;
563 
564 	ep = container_of(usbep, struct udc_ep, ep);
565 	req = container_of(usbreq, struct udc_request, req);
566 	VDBG(ep->dev, "free_req req=%p\n", req);
567 	BUG_ON(!list_empty(&req->queue));
568 	if (req->td_data) {
569 		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
570 
571 		/* free dma chain if created */
572 		if (req->chain_len > 1) {
573 			udc_free_dma_chain(ep->dev, req);
574 		}
575 
576 		pci_pool_free(ep->dev->data_requests, req->td_data,
577 							req->td_phys);
578 	}
579 	kfree(req);
580 }
581 
582 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
udc_init_bna_dummy(struct udc_request * req)583 static void udc_init_bna_dummy(struct udc_request *req)
584 {
585 	if (req) {
586 		/* set last bit */
587 		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
588 		/* set next pointer to itself */
589 		req->td_data->next = req->td_phys;
590 		/* set HOST BUSY */
591 		req->td_data->status
592 			= AMD_ADDBITS(req->td_data->status,
593 					UDC_DMA_STP_STS_BS_DMA_DONE,
594 					UDC_DMA_STP_STS_BS);
595 #ifdef UDC_VERBOSE
596 		pr_debug("bna desc = %p, sts = %08x\n",
597 			req->td_data, req->td_data->status);
598 #endif
599 	}
600 }
601 
602 /* Allocate BNA dummy descriptor */
udc_alloc_bna_dummy(struct udc_ep * ep)603 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
604 {
605 	struct udc_request *req = NULL;
606 	struct usb_request *_req = NULL;
607 
608 	/* alloc the dummy request */
609 	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
610 	if (_req) {
611 		req = container_of(_req, struct udc_request, req);
612 		ep->bna_dummy_req = req;
613 		udc_init_bna_dummy(req);
614 	}
615 	return req;
616 }
617 
618 /* Write data to TX fifo for IN packets */
619 static void
udc_txfifo_write(struct udc_ep * ep,struct usb_request * req)620 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
621 {
622 	u8			*req_buf;
623 	u32			*buf;
624 	int			i, j;
625 	unsigned		bytes = 0;
626 	unsigned		remaining = 0;
627 
628 	if (!req || !ep)
629 		return;
630 
631 	req_buf = req->buf + req->actual;
632 	prefetch(req_buf);
633 	remaining = req->length - req->actual;
634 
635 	buf = (u32 *) req_buf;
636 
637 	bytes = ep->ep.maxpacket;
638 	if (bytes > remaining)
639 		bytes = remaining;
640 
641 	/* dwords first */
642 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
643 		writel(*(buf + i), ep->txfifo);
644 	}
645 
646 	/* remaining bytes must be written by byte access */
647 	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
648 		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
649 							ep->txfifo);
650 	}
651 
652 	/* dummy write confirm */
653 	writel(0, &ep->regs->confirm);
654 }
655 
656 /* Read dwords from RX fifo for OUT transfers */
udc_rxfifo_read_dwords(struct udc * dev,u32 * buf,int dwords)657 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
658 {
659 	int i;
660 
661 	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
662 
663 	for (i = 0; i < dwords; i++) {
664 		*(buf + i) = readl(dev->rxfifo);
665 	}
666 	return 0;
667 }
668 
669 /* Read bytes from RX fifo for OUT transfers */
udc_rxfifo_read_bytes(struct udc * dev,u8 * buf,int bytes)670 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
671 {
672 	int i, j;
673 	u32 tmp;
674 
675 	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
676 
677 	/* dwords first */
678 	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
679 		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
680 	}
681 
682 	/* remaining bytes must be read by byte access */
683 	if (bytes % UDC_DWORD_BYTES) {
684 		tmp = readl(dev->rxfifo);
685 		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
686 			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
687 			tmp = tmp >> UDC_BITS_PER_BYTE;
688 		}
689 	}
690 
691 	return 0;
692 }
693 
694 /* Read data from RX fifo for OUT transfers */
695 static int
udc_rxfifo_read(struct udc_ep * ep,struct udc_request * req)696 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
697 {
698 	u8 *buf;
699 	unsigned buf_space;
700 	unsigned bytes = 0;
701 	unsigned finished = 0;
702 
703 	/* received number bytes */
704 	bytes = readl(&ep->regs->sts);
705 	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
706 
707 	buf_space = req->req.length - req->req.actual;
708 	buf = req->req.buf + req->req.actual;
709 	if (bytes > buf_space) {
710 		if ((buf_space % ep->ep.maxpacket) != 0) {
711 			DBG(ep->dev,
712 				"%s: rx %d bytes, rx-buf space = %d bytesn\n",
713 				ep->ep.name, bytes, buf_space);
714 			req->req.status = -EOVERFLOW;
715 		}
716 		bytes = buf_space;
717 	}
718 	req->req.actual += bytes;
719 
720 	/* last packet ? */
721 	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
722 		|| ((req->req.actual == req->req.length) && !req->req.zero))
723 		finished = 1;
724 
725 	/* read rx fifo bytes */
726 	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
727 	udc_rxfifo_read_bytes(ep->dev, buf, bytes);
728 
729 	return finished;
730 }
731 
732 /* create/re-init a DMA descriptor or a DMA descriptor chain */
prep_dma(struct udc_ep * ep,struct udc_request * req,gfp_t gfp)733 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
734 {
735 	int	retval = 0;
736 	u32	tmp;
737 
738 	VDBG(ep->dev, "prep_dma\n");
739 	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
740 			ep->num, req->td_data);
741 
742 	/* set buffer pointer */
743 	req->td_data->bufptr = req->req.dma;
744 
745 	/* set last bit */
746 	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
747 
748 	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
749 	if (use_dma_ppb) {
750 
751 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
752 		if (retval != 0) {
753 			if (retval == -ENOMEM)
754 				DBG(ep->dev, "Out of DMA memory\n");
755 			return retval;
756 		}
757 		if (ep->in) {
758 			if (req->req.length == ep->ep.maxpacket) {
759 				/* write tx bytes */
760 				req->td_data->status =
761 					AMD_ADDBITS(req->td_data->status,
762 						ep->ep.maxpacket,
763 						UDC_DMA_IN_STS_TXBYTES);
764 
765 			}
766 		}
767 
768 	}
769 
770 	if (ep->in) {
771 		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
772 				"maxpacket=%d ep%d\n",
773 				use_dma_ppb, req->req.length,
774 				ep->ep.maxpacket, ep->num);
775 		/*
776 		 * if bytes < max packet then tx bytes must
777 		 * be written in packet per buffer mode
778 		 */
779 		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
780 				|| ep->num == UDC_EP0OUT_IX
781 				|| ep->num == UDC_EP0IN_IX) {
782 			/* write tx bytes */
783 			req->td_data->status =
784 				AMD_ADDBITS(req->td_data->status,
785 						req->req.length,
786 						UDC_DMA_IN_STS_TXBYTES);
787 			/* reset frame num */
788 			req->td_data->status =
789 				AMD_ADDBITS(req->td_data->status,
790 						0,
791 						UDC_DMA_IN_STS_FRAMENUM);
792 		}
793 		/* set HOST BUSY */
794 		req->td_data->status =
795 			AMD_ADDBITS(req->td_data->status,
796 				UDC_DMA_STP_STS_BS_HOST_BUSY,
797 				UDC_DMA_STP_STS_BS);
798 	} else {
799 		VDBG(ep->dev, "OUT set host ready\n");
800 		/* set HOST READY */
801 		req->td_data->status =
802 			AMD_ADDBITS(req->td_data->status,
803 				UDC_DMA_STP_STS_BS_HOST_READY,
804 				UDC_DMA_STP_STS_BS);
805 
806 
807 			/* clear NAK by writing CNAK */
808 			if (ep->naking) {
809 				tmp = readl(&ep->regs->ctl);
810 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
811 				writel(tmp, &ep->regs->ctl);
812 				ep->naking = 0;
813 				UDC_QUEUE_CNAK(ep, ep->num);
814 			}
815 
816 	}
817 
818 	return retval;
819 }
820 
821 /* Completes request packet ... caller MUST hold lock */
822 static void
complete_req(struct udc_ep * ep,struct udc_request * req,int sts)823 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
824 __releases(ep->dev->lock)
825 __acquires(ep->dev->lock)
826 {
827 	struct udc		*dev;
828 	unsigned		halted;
829 
830 	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
831 
832 	dev = ep->dev;
833 	/* unmap DMA */
834 	if (req->dma_mapping) {
835 		if (ep->in)
836 			pci_unmap_single(dev->pdev,
837 					req->req.dma,
838 					req->req.length,
839 					PCI_DMA_TODEVICE);
840 		else
841 			pci_unmap_single(dev->pdev,
842 					req->req.dma,
843 					req->req.length,
844 					PCI_DMA_FROMDEVICE);
845 		req->dma_mapping = 0;
846 		req->req.dma = DMA_DONT_USE;
847 	}
848 
849 	halted = ep->halted;
850 	ep->halted = 1;
851 
852 	/* set new status if pending */
853 	if (req->req.status == -EINPROGRESS)
854 		req->req.status = sts;
855 
856 	/* remove from ep queue */
857 	list_del_init(&req->queue);
858 
859 	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
860 		&req->req, req->req.length, ep->ep.name, sts);
861 
862 	spin_unlock(&dev->lock);
863 	req->req.complete(&ep->ep, &req->req);
864 	spin_lock(&dev->lock);
865 	ep->halted = halted;
866 }
867 
868 /* frees pci pool descriptors of a DMA chain */
udc_free_dma_chain(struct udc * dev,struct udc_request * req)869 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
870 {
871 
872 	int ret_val = 0;
873 	struct udc_data_dma	*td;
874 	struct udc_data_dma	*td_last = NULL;
875 	unsigned int i;
876 
877 	DBG(dev, "free chain req = %p\n", req);
878 
879 	/* do not free first desc., will be done by free for request */
880 	td_last = req->td_data;
881 	td = phys_to_virt(td_last->next);
882 
883 	for (i = 1; i < req->chain_len; i++) {
884 
885 		pci_pool_free(dev->data_requests, td,
886 				(dma_addr_t) td_last->next);
887 		td_last = td;
888 		td = phys_to_virt(td_last->next);
889 	}
890 
891 	return ret_val;
892 }
893 
894 /* Iterates to the end of a DMA chain and returns last descriptor */
udc_get_last_dma_desc(struct udc_request * req)895 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
896 {
897 	struct udc_data_dma	*td;
898 
899 	td = req->td_data;
900 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
901 		td = phys_to_virt(td->next);
902 	}
903 
904 	return td;
905 
906 }
907 
908 /* Iterates to the end of a DMA chain and counts bytes received */
udc_get_ppbdu_rxbytes(struct udc_request * req)909 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
910 {
911 	struct udc_data_dma	*td;
912 	u32 count;
913 
914 	td = req->td_data;
915 	/* received number bytes */
916 	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
917 
918 	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
919 		td = phys_to_virt(td->next);
920 		/* received number bytes */
921 		if (td) {
922 			count += AMD_GETBITS(td->status,
923 				UDC_DMA_OUT_STS_RXBYTES);
924 		}
925 	}
926 
927 	return count;
928 
929 }
930 
931 /* Creates or re-inits a DMA chain */
udc_create_dma_chain(struct udc_ep * ep,struct udc_request * req,unsigned long buf_len,gfp_t gfp_flags)932 static int udc_create_dma_chain(
933 	struct udc_ep *ep,
934 	struct udc_request *req,
935 	unsigned long buf_len, gfp_t gfp_flags
936 )
937 {
938 	unsigned long bytes = req->req.length;
939 	unsigned int i;
940 	dma_addr_t dma_addr;
941 	struct udc_data_dma	*td = NULL;
942 	struct udc_data_dma	*last = NULL;
943 	unsigned long txbytes;
944 	unsigned create_new_chain = 0;
945 	unsigned len;
946 
947 	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
948 			bytes, buf_len);
949 	dma_addr = DMA_DONT_USE;
950 
951 	/* unset L bit in first desc for OUT */
952 	if (!ep->in) {
953 		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
954 	}
955 
956 	/* alloc only new desc's if not already available */
957 	len = req->req.length / ep->ep.maxpacket;
958 	if (req->req.length % ep->ep.maxpacket) {
959 		len++;
960 	}
961 
962 	if (len > req->chain_len) {
963 		/* shorter chain already allocated before */
964 		if (req->chain_len > 1) {
965 			udc_free_dma_chain(ep->dev, req);
966 		}
967 		req->chain_len = len;
968 		create_new_chain = 1;
969 	}
970 
971 	td = req->td_data;
972 	/* gen. required number of descriptors and buffers */
973 	for (i = buf_len; i < bytes; i += buf_len) {
974 		/* create or determine next desc. */
975 		if (create_new_chain) {
976 
977 			td = pci_pool_alloc(ep->dev->data_requests,
978 					gfp_flags, &dma_addr);
979 			if (!td)
980 				return -ENOMEM;
981 
982 			td->status = 0;
983 		} else if (i == buf_len) {
984 			/* first td */
985 			td = (struct udc_data_dma *) phys_to_virt(
986 						req->td_data->next);
987 			td->status = 0;
988 		} else {
989 			td = (struct udc_data_dma *) phys_to_virt(last->next);
990 			td->status = 0;
991 		}
992 
993 
994 		if (td)
995 			td->bufptr = req->req.dma + i; /* assign buffer */
996 		else
997 			break;
998 
999 		/* short packet ? */
1000 		if ((bytes - i) >= buf_len) {
1001 			txbytes = buf_len;
1002 		} else {
1003 			/* short packet */
1004 			txbytes = bytes - i;
1005 		}
1006 
1007 		/* link td and assign tx bytes */
1008 		if (i == buf_len) {
1009 			if (create_new_chain) {
1010 				req->td_data->next = dma_addr;
1011 			} else {
1012 				/* req->td_data->next = virt_to_phys(td); */
1013 			}
1014 			/* write tx bytes */
1015 			if (ep->in) {
1016 				/* first desc */
1017 				req->td_data->status =
1018 					AMD_ADDBITS(req->td_data->status,
1019 							ep->ep.maxpacket,
1020 							UDC_DMA_IN_STS_TXBYTES);
1021 				/* second desc */
1022 				td->status = AMD_ADDBITS(td->status,
1023 							txbytes,
1024 							UDC_DMA_IN_STS_TXBYTES);
1025 			}
1026 		} else {
1027 			if (create_new_chain) {
1028 				last->next = dma_addr;
1029 			} else {
1030 				/* last->next = virt_to_phys(td); */
1031 			}
1032 			if (ep->in) {
1033 				/* write tx bytes */
1034 				td->status = AMD_ADDBITS(td->status,
1035 							txbytes,
1036 							UDC_DMA_IN_STS_TXBYTES);
1037 			}
1038 		}
1039 		last = td;
1040 	}
1041 	/* set last bit */
1042 	if (td) {
1043 		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1044 		/* last desc. points to itself */
1045 		req->td_data_last = td;
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 /* Enabling RX DMA */
udc_set_rde(struct udc * dev)1052 static void udc_set_rde(struct udc *dev)
1053 {
1054 	u32 tmp;
1055 
1056 	VDBG(dev, "udc_set_rde()\n");
1057 	/* stop RDE timer */
1058 	if (timer_pending(&udc_timer)) {
1059 		set_rde = 0;
1060 		mod_timer(&udc_timer, jiffies - 1);
1061 	}
1062 	/* set RDE */
1063 	tmp = readl(&dev->regs->ctl);
1064 	tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1065 	writel(tmp, &dev->regs->ctl);
1066 }
1067 
1068 /* Queues a request packet, called by gadget driver */
1069 static int
udc_queue(struct usb_ep * usbep,struct usb_request * usbreq,gfp_t gfp)1070 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1071 {
1072 	int			retval = 0;
1073 	u8			open_rxfifo = 0;
1074 	unsigned long		iflags;
1075 	struct udc_ep		*ep;
1076 	struct udc_request	*req;
1077 	struct udc		*dev;
1078 	u32			tmp;
1079 
1080 	/* check the inputs */
1081 	req = container_of(usbreq, struct udc_request, req);
1082 
1083 	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1084 			|| !list_empty(&req->queue))
1085 		return -EINVAL;
1086 
1087 	ep = container_of(usbep, struct udc_ep, ep);
1088 	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1089 		return -EINVAL;
1090 
1091 	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1092 	dev = ep->dev;
1093 
1094 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1095 		return -ESHUTDOWN;
1096 
1097 	/* map dma (usually done before) */
1098 	if (ep->dma && usbreq->length != 0
1099 			&& (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1100 		VDBG(dev, "DMA map req %p\n", req);
1101 		if (ep->in)
1102 			usbreq->dma = pci_map_single(dev->pdev,
1103 						usbreq->buf,
1104 						usbreq->length,
1105 						PCI_DMA_TODEVICE);
1106 		else
1107 			usbreq->dma = pci_map_single(dev->pdev,
1108 						usbreq->buf,
1109 						usbreq->length,
1110 						PCI_DMA_FROMDEVICE);
1111 		req->dma_mapping = 1;
1112 	}
1113 
1114 	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1115 			usbep->name, usbreq, usbreq->length,
1116 			req->td_data, usbreq->buf);
1117 
1118 	spin_lock_irqsave(&dev->lock, iflags);
1119 	usbreq->actual = 0;
1120 	usbreq->status = -EINPROGRESS;
1121 	req->dma_done = 0;
1122 
1123 	/* on empty queue just do first transfer */
1124 	if (list_empty(&ep->queue)) {
1125 		/* zlp */
1126 		if (usbreq->length == 0) {
1127 			/* IN zlp's are handled by hardware */
1128 			complete_req(ep, req, 0);
1129 			VDBG(dev, "%s: zlp\n", ep->ep.name);
1130 			/*
1131 			 * if set_config or set_intf is waiting for ack by zlp
1132 			 * then set CSR_DONE
1133 			 */
1134 			if (dev->set_cfg_not_acked) {
1135 				tmp = readl(&dev->regs->ctl);
1136 				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1137 				writel(tmp, &dev->regs->ctl);
1138 				dev->set_cfg_not_acked = 0;
1139 			}
1140 			/* setup command is ACK'ed now by zlp */
1141 			if (dev->waiting_zlp_ack_ep0in) {
1142 				/* clear NAK by writing CNAK in EP0_IN */
1143 				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1144 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1145 				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1146 				dev->ep[UDC_EP0IN_IX].naking = 0;
1147 				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1148 							UDC_EP0IN_IX);
1149 				dev->waiting_zlp_ack_ep0in = 0;
1150 			}
1151 			goto finished;
1152 		}
1153 		if (ep->dma) {
1154 			retval = prep_dma(ep, req, gfp);
1155 			if (retval != 0)
1156 				goto finished;
1157 			/* write desc pointer to enable DMA */
1158 			if (ep->in) {
1159 				/* set HOST READY */
1160 				req->td_data->status =
1161 					AMD_ADDBITS(req->td_data->status,
1162 						UDC_DMA_IN_STS_BS_HOST_READY,
1163 						UDC_DMA_IN_STS_BS);
1164 			}
1165 
1166 			/* disabled rx dma while descriptor update */
1167 			if (!ep->in) {
1168 				/* stop RDE timer */
1169 				if (timer_pending(&udc_timer)) {
1170 					set_rde = 0;
1171 					mod_timer(&udc_timer, jiffies - 1);
1172 				}
1173 				/* clear RDE */
1174 				tmp = readl(&dev->regs->ctl);
1175 				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1176 				writel(tmp, &dev->regs->ctl);
1177 				open_rxfifo = 1;
1178 
1179 				/*
1180 				 * if BNA occurred then let BNA dummy desc.
1181 				 * point to current desc.
1182 				 */
1183 				if (ep->bna_occurred) {
1184 					VDBG(dev, "copy to BNA dummy desc.\n");
1185 					memcpy(ep->bna_dummy_req->td_data,
1186 						req->td_data,
1187 						sizeof(struct udc_data_dma));
1188 				}
1189 			}
1190 			/* write desc pointer */
1191 			writel(req->td_phys, &ep->regs->desptr);
1192 
1193 			/* clear NAK by writing CNAK */
1194 			if (ep->naking) {
1195 				tmp = readl(&ep->regs->ctl);
1196 				tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1197 				writel(tmp, &ep->regs->ctl);
1198 				ep->naking = 0;
1199 				UDC_QUEUE_CNAK(ep, ep->num);
1200 			}
1201 
1202 			if (ep->in) {
1203 				/* enable ep irq */
1204 				tmp = readl(&dev->regs->ep_irqmsk);
1205 				tmp &= AMD_UNMASK_BIT(ep->num);
1206 				writel(tmp, &dev->regs->ep_irqmsk);
1207 			}
1208 		} else if (ep->in) {
1209 				/* enable ep irq */
1210 				tmp = readl(&dev->regs->ep_irqmsk);
1211 				tmp &= AMD_UNMASK_BIT(ep->num);
1212 				writel(tmp, &dev->regs->ep_irqmsk);
1213 			}
1214 
1215 	} else if (ep->dma) {
1216 
1217 		/*
1218 		 * prep_dma not used for OUT ep's, this is not possible
1219 		 * for PPB modes, because of chain creation reasons
1220 		 */
1221 		if (ep->in) {
1222 			retval = prep_dma(ep, req, gfp);
1223 			if (retval != 0)
1224 				goto finished;
1225 		}
1226 	}
1227 	VDBG(dev, "list_add\n");
1228 	/* add request to ep queue */
1229 	if (req) {
1230 
1231 		list_add_tail(&req->queue, &ep->queue);
1232 
1233 		/* open rxfifo if out data queued */
1234 		if (open_rxfifo) {
1235 			/* enable DMA */
1236 			req->dma_going = 1;
1237 			udc_set_rde(dev);
1238 			if (ep->num != UDC_EP0OUT_IX)
1239 				dev->data_ep_queued = 1;
1240 		}
1241 		/* stop OUT naking */
1242 		if (!ep->in) {
1243 			if (!use_dma && udc_rxfifo_pending) {
1244 				DBG(dev, "udc_queue(): pending bytes in "
1245 					"rxfifo after nyet\n");
1246 				/*
1247 				 * read pending bytes afer nyet:
1248 				 * referring to isr
1249 				 */
1250 				if (udc_rxfifo_read(ep, req)) {
1251 					/* finish */
1252 					complete_req(ep, req, 0);
1253 				}
1254 				udc_rxfifo_pending = 0;
1255 
1256 			}
1257 		}
1258 	}
1259 
1260 finished:
1261 	spin_unlock_irqrestore(&dev->lock, iflags);
1262 	return retval;
1263 }
1264 
1265 /* Empty request queue of an endpoint; caller holds spinlock */
empty_req_queue(struct udc_ep * ep)1266 static void empty_req_queue(struct udc_ep *ep)
1267 {
1268 	struct udc_request	*req;
1269 
1270 	ep->halted = 1;
1271 	while (!list_empty(&ep->queue)) {
1272 		req = list_entry(ep->queue.next,
1273 			struct udc_request,
1274 			queue);
1275 		complete_req(ep, req, -ESHUTDOWN);
1276 	}
1277 }
1278 
1279 /* Dequeues a request packet, called by gadget driver */
udc_dequeue(struct usb_ep * usbep,struct usb_request * usbreq)1280 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1281 {
1282 	struct udc_ep		*ep;
1283 	struct udc_request	*req;
1284 	unsigned		halted;
1285 	unsigned long		iflags;
1286 
1287 	ep = container_of(usbep, struct udc_ep, ep);
1288 	if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1289 				&& ep->num != UDC_EP0OUT_IX)))
1290 		return -EINVAL;
1291 
1292 	req = container_of(usbreq, struct udc_request, req);
1293 
1294 	spin_lock_irqsave(&ep->dev->lock, iflags);
1295 	halted = ep->halted;
1296 	ep->halted = 1;
1297 	/* request in processing or next one */
1298 	if (ep->queue.next == &req->queue) {
1299 		if (ep->dma && req->dma_going) {
1300 			if (ep->in)
1301 				ep->cancel_transfer = 1;
1302 			else {
1303 				u32 tmp;
1304 				u32 dma_sts;
1305 				/* stop potential receive DMA */
1306 				tmp = readl(&udc->regs->ctl);
1307 				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1308 							&udc->regs->ctl);
1309 				/*
1310 				 * Cancel transfer later in ISR
1311 				 * if descriptor was touched.
1312 				 */
1313 				dma_sts = AMD_GETBITS(req->td_data->status,
1314 							UDC_DMA_OUT_STS_BS);
1315 				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1316 					ep->cancel_transfer = 1;
1317 				else {
1318 					udc_init_bna_dummy(ep->req);
1319 					writel(ep->bna_dummy_req->td_phys,
1320 						&ep->regs->desptr);
1321 				}
1322 				writel(tmp, &udc->regs->ctl);
1323 			}
1324 		}
1325 	}
1326 	complete_req(ep, req, -ECONNRESET);
1327 	ep->halted = halted;
1328 
1329 	spin_unlock_irqrestore(&ep->dev->lock, iflags);
1330 	return 0;
1331 }
1332 
1333 /* Halt or clear halt of endpoint */
1334 static int
udc_set_halt(struct usb_ep * usbep,int halt)1335 udc_set_halt(struct usb_ep *usbep, int halt)
1336 {
1337 	struct udc_ep	*ep;
1338 	u32 tmp;
1339 	unsigned long iflags;
1340 	int retval = 0;
1341 
1342 	if (!usbep)
1343 		return -EINVAL;
1344 
1345 	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1346 
1347 	ep = container_of(usbep, struct udc_ep, ep);
1348 	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1349 		return -EINVAL;
1350 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1351 		return -ESHUTDOWN;
1352 
1353 	spin_lock_irqsave(&udc_stall_spinlock, iflags);
1354 	/* halt or clear halt */
1355 	if (halt) {
1356 		if (ep->num == 0)
1357 			ep->dev->stall_ep0in = 1;
1358 		else {
1359 			/*
1360 			 * set STALL
1361 			 * rxfifo empty not taken into acount
1362 			 */
1363 			tmp = readl(&ep->regs->ctl);
1364 			tmp |= AMD_BIT(UDC_EPCTL_S);
1365 			writel(tmp, &ep->regs->ctl);
1366 			ep->halted = 1;
1367 
1368 			/* setup poll timer */
1369 			if (!timer_pending(&udc_pollstall_timer)) {
1370 				udc_pollstall_timer.expires = jiffies +
1371 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1372 					/ (1000 * 1000);
1373 				if (!stop_pollstall_timer) {
1374 					DBG(ep->dev, "start polltimer\n");
1375 					add_timer(&udc_pollstall_timer);
1376 				}
1377 			}
1378 		}
1379 	} else {
1380 		/* ep is halted by set_halt() before */
1381 		if (ep->halted) {
1382 			tmp = readl(&ep->regs->ctl);
1383 			/* clear stall bit */
1384 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1385 			/* clear NAK by writing CNAK */
1386 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1387 			writel(tmp, &ep->regs->ctl);
1388 			ep->halted = 0;
1389 			UDC_QUEUE_CNAK(ep, ep->num);
1390 		}
1391 	}
1392 	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1393 	return retval;
1394 }
1395 
1396 /* gadget interface */
1397 static const struct usb_ep_ops udc_ep_ops = {
1398 	.enable		= udc_ep_enable,
1399 	.disable	= udc_ep_disable,
1400 
1401 	.alloc_request	= udc_alloc_request,
1402 	.free_request	= udc_free_request,
1403 
1404 	.queue		= udc_queue,
1405 	.dequeue	= udc_dequeue,
1406 
1407 	.set_halt	= udc_set_halt,
1408 	/* fifo ops not implemented */
1409 };
1410 
1411 /*-------------------------------------------------------------------------*/
1412 
1413 /* Get frame counter (not implemented) */
udc_get_frame(struct usb_gadget * gadget)1414 static int udc_get_frame(struct usb_gadget *gadget)
1415 {
1416 	return -EOPNOTSUPP;
1417 }
1418 
1419 /* Remote wakeup gadget interface */
udc_wakeup(struct usb_gadget * gadget)1420 static int udc_wakeup(struct usb_gadget *gadget)
1421 {
1422 	struct udc		*dev;
1423 
1424 	if (!gadget)
1425 		return -EINVAL;
1426 	dev = container_of(gadget, struct udc, gadget);
1427 	udc_remote_wakeup(dev);
1428 
1429 	return 0;
1430 }
1431 
1432 static int amd5536_start(struct usb_gadget_driver *driver,
1433 		int (*bind)(struct usb_gadget *));
1434 static int amd5536_stop(struct usb_gadget_driver *driver);
1435 /* gadget operations */
1436 static const struct usb_gadget_ops udc_ops = {
1437 	.wakeup		= udc_wakeup,
1438 	.get_frame	= udc_get_frame,
1439 	.start		= amd5536_start,
1440 	.stop		= amd5536_stop,
1441 };
1442 
1443 /* Setups endpoint parameters, adds endpoints to linked list */
make_ep_lists(struct udc * dev)1444 static void make_ep_lists(struct udc *dev)
1445 {
1446 	/* make gadget ep lists */
1447 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1448 	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1449 						&dev->gadget.ep_list);
1450 	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1451 						&dev->gadget.ep_list);
1452 	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1453 						&dev->gadget.ep_list);
1454 
1455 	/* fifo config */
1456 	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1457 	if (dev->gadget.speed == USB_SPEED_FULL)
1458 		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1459 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1460 		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1461 	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1462 }
1463 
1464 /* init registers at driver load time */
startup_registers(struct udc * dev)1465 static int startup_registers(struct udc *dev)
1466 {
1467 	u32 tmp;
1468 
1469 	/* init controller by soft reset */
1470 	udc_soft_reset(dev);
1471 
1472 	/* mask not needed interrupts */
1473 	udc_mask_unused_interrupts(dev);
1474 
1475 	/* put into initial config */
1476 	udc_basic_init(dev);
1477 	/* link up all endpoints */
1478 	udc_setup_endpoints(dev);
1479 
1480 	/* program speed */
1481 	tmp = readl(&dev->regs->cfg);
1482 	if (use_fullspeed) {
1483 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1484 	} else {
1485 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1486 	}
1487 	writel(tmp, &dev->regs->cfg);
1488 
1489 	return 0;
1490 }
1491 
1492 /* Inits UDC context */
udc_basic_init(struct udc * dev)1493 static void udc_basic_init(struct udc *dev)
1494 {
1495 	u32	tmp;
1496 
1497 	DBG(dev, "udc_basic_init()\n");
1498 
1499 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1500 
1501 	/* stop RDE timer */
1502 	if (timer_pending(&udc_timer)) {
1503 		set_rde = 0;
1504 		mod_timer(&udc_timer, jiffies - 1);
1505 	}
1506 	/* stop poll stall timer */
1507 	if (timer_pending(&udc_pollstall_timer)) {
1508 		mod_timer(&udc_pollstall_timer, jiffies - 1);
1509 	}
1510 	/* disable DMA */
1511 	tmp = readl(&dev->regs->ctl);
1512 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1513 	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1514 	writel(tmp, &dev->regs->ctl);
1515 
1516 	/* enable dynamic CSR programming */
1517 	tmp = readl(&dev->regs->cfg);
1518 	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1519 	/* set self powered */
1520 	tmp |= AMD_BIT(UDC_DEVCFG_SP);
1521 	/* set remote wakeupable */
1522 	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1523 	writel(tmp, &dev->regs->cfg);
1524 
1525 	make_ep_lists(dev);
1526 
1527 	dev->data_ep_enabled = 0;
1528 	dev->data_ep_queued = 0;
1529 }
1530 
1531 /* Sets initial endpoint parameters */
udc_setup_endpoints(struct udc * dev)1532 static void udc_setup_endpoints(struct udc *dev)
1533 {
1534 	struct udc_ep	*ep;
1535 	u32	tmp;
1536 	u32	reg;
1537 
1538 	DBG(dev, "udc_setup_endpoints()\n");
1539 
1540 	/* read enum speed */
1541 	tmp = readl(&dev->regs->sts);
1542 	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1543 	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1544 		dev->gadget.speed = USB_SPEED_HIGH;
1545 	} else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1546 		dev->gadget.speed = USB_SPEED_FULL;
1547 	}
1548 
1549 	/* set basic ep parameters */
1550 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1551 		ep = &dev->ep[tmp];
1552 		ep->dev = dev;
1553 		ep->ep.name = ep_string[tmp];
1554 		ep->num = tmp;
1555 		/* txfifo size is calculated at enable time */
1556 		ep->txfifo = dev->txfifo;
1557 
1558 		/* fifo size */
1559 		if (tmp < UDC_EPIN_NUM) {
1560 			ep->fifo_depth = UDC_TXFIFO_SIZE;
1561 			ep->in = 1;
1562 		} else {
1563 			ep->fifo_depth = UDC_RXFIFO_SIZE;
1564 			ep->in = 0;
1565 
1566 		}
1567 		ep->regs = &dev->ep_regs[tmp];
1568 		/*
1569 		 * ep will be reset only if ep was not enabled before to avoid
1570 		 * disabling ep interrupts when ENUM interrupt occurs but ep is
1571 		 * not enabled by gadget driver
1572 		 */
1573 		if (!ep->desc) {
1574 			ep_init(dev->regs, ep);
1575 		}
1576 
1577 		if (use_dma) {
1578 			/*
1579 			 * ep->dma is not really used, just to indicate that
1580 			 * DMA is active: remove this
1581 			 * dma regs = dev control regs
1582 			 */
1583 			ep->dma = &dev->regs->ctl;
1584 
1585 			/* nak OUT endpoints until enable - not for ep0 */
1586 			if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1587 						&& tmp > UDC_EPIN_NUM) {
1588 				/* set NAK */
1589 				reg = readl(&dev->ep[tmp].regs->ctl);
1590 				reg |= AMD_BIT(UDC_EPCTL_SNAK);
1591 				writel(reg, &dev->ep[tmp].regs->ctl);
1592 				dev->ep[tmp].naking = 1;
1593 
1594 			}
1595 		}
1596 	}
1597 	/* EP0 max packet */
1598 	if (dev->gadget.speed == USB_SPEED_FULL) {
1599 		dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1600 		dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1601 						UDC_FS_EP0OUT_MAX_PKT_SIZE;
1602 	} else if (dev->gadget.speed == USB_SPEED_HIGH) {
1603 		dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1604 		dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1605 	}
1606 
1607 	/*
1608 	 * with suspend bug workaround, ep0 params for gadget driver
1609 	 * are set at gadget driver bind() call
1610 	 */
1611 	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1612 	dev->ep[UDC_EP0IN_IX].halted = 0;
1613 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1614 
1615 	/* init cfg/alt/int */
1616 	dev->cur_config = 0;
1617 	dev->cur_intf = 0;
1618 	dev->cur_alt = 0;
1619 }
1620 
1621 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
usb_connect(struct udc * dev)1622 static void usb_connect(struct udc *dev)
1623 {
1624 
1625 	dev_info(&dev->pdev->dev, "USB Connect\n");
1626 
1627 	dev->connected = 1;
1628 
1629 	/* put into initial config */
1630 	udc_basic_init(dev);
1631 
1632 	/* enable device setup interrupts */
1633 	udc_enable_dev_setup_interrupts(dev);
1634 }
1635 
1636 /*
1637  * Calls gadget with disconnect event and resets the UDC and makes
1638  * initial bringup to be ready for ep0 events
1639  */
usb_disconnect(struct udc * dev)1640 static void usb_disconnect(struct udc *dev)
1641 {
1642 
1643 	dev_info(&dev->pdev->dev, "USB Disconnect\n");
1644 
1645 	dev->connected = 0;
1646 
1647 	/* mask interrupts */
1648 	udc_mask_unused_interrupts(dev);
1649 
1650 	/* REVISIT there doesn't seem to be a point to having this
1651 	 * talk to a tasklet ... do it directly, we already hold
1652 	 * the spinlock needed to process the disconnect.
1653 	 */
1654 
1655 	tasklet_schedule(&disconnect_tasklet);
1656 }
1657 
1658 /* Tasklet for disconnect to be outside of interrupt context */
udc_tasklet_disconnect(unsigned long par)1659 static void udc_tasklet_disconnect(unsigned long par)
1660 {
1661 	struct udc *dev = (struct udc *)(*((struct udc **) par));
1662 	u32 tmp;
1663 
1664 	DBG(dev, "Tasklet disconnect\n");
1665 	spin_lock_irq(&dev->lock);
1666 
1667 	if (dev->driver) {
1668 		spin_unlock(&dev->lock);
1669 		dev->driver->disconnect(&dev->gadget);
1670 		spin_lock(&dev->lock);
1671 
1672 		/* empty queues */
1673 		for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1674 			empty_req_queue(&dev->ep[tmp]);
1675 		}
1676 
1677 	}
1678 
1679 	/* disable ep0 */
1680 	ep_init(dev->regs,
1681 			&dev->ep[UDC_EP0IN_IX]);
1682 
1683 
1684 	if (!soft_reset_occured) {
1685 		/* init controller by soft reset */
1686 		udc_soft_reset(dev);
1687 		soft_reset_occured++;
1688 	}
1689 
1690 	/* re-enable dev interrupts */
1691 	udc_enable_dev_setup_interrupts(dev);
1692 	/* back to full speed ? */
1693 	if (use_fullspeed) {
1694 		tmp = readl(&dev->regs->cfg);
1695 		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1696 		writel(tmp, &dev->regs->cfg);
1697 	}
1698 
1699 	spin_unlock_irq(&dev->lock);
1700 }
1701 
1702 /* Reset the UDC core */
udc_soft_reset(struct udc * dev)1703 static void udc_soft_reset(struct udc *dev)
1704 {
1705 	unsigned long	flags;
1706 
1707 	DBG(dev, "Soft reset\n");
1708 	/*
1709 	 * reset possible waiting interrupts, because int.
1710 	 * status is lost after soft reset,
1711 	 * ep int. status reset
1712 	 */
1713 	writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1714 	/* device int. status reset */
1715 	writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1716 
1717 	spin_lock_irqsave(&udc_irq_spinlock, flags);
1718 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1719 	readl(&dev->regs->cfg);
1720 	spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1721 
1722 }
1723 
1724 /* RDE timer callback to set RDE bit */
udc_timer_function(unsigned long v)1725 static void udc_timer_function(unsigned long v)
1726 {
1727 	u32 tmp;
1728 
1729 	spin_lock_irq(&udc_irq_spinlock);
1730 
1731 	if (set_rde > 0) {
1732 		/*
1733 		 * open the fifo if fifo was filled on last timer call
1734 		 * conditionally
1735 		 */
1736 		if (set_rde > 1) {
1737 			/* set RDE to receive setup data */
1738 			tmp = readl(&udc->regs->ctl);
1739 			tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1740 			writel(tmp, &udc->regs->ctl);
1741 			set_rde = -1;
1742 		} else if (readl(&udc->regs->sts)
1743 				& AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1744 			/*
1745 			 * if fifo empty setup polling, do not just
1746 			 * open the fifo
1747 			 */
1748 			udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1749 			if (!stop_timer) {
1750 				add_timer(&udc_timer);
1751 			}
1752 		} else {
1753 			/*
1754 			 * fifo contains data now, setup timer for opening
1755 			 * the fifo when timer expires to be able to receive
1756 			 * setup packets, when data packets gets queued by
1757 			 * gadget layer then timer will forced to expire with
1758 			 * set_rde=0 (RDE is set in udc_queue())
1759 			 */
1760 			set_rde++;
1761 			/* debug: lhadmot_timer_start = 221070 */
1762 			udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1763 			if (!stop_timer) {
1764 				add_timer(&udc_timer);
1765 			}
1766 		}
1767 
1768 	} else
1769 		set_rde = -1; /* RDE was set by udc_queue() */
1770 	spin_unlock_irq(&udc_irq_spinlock);
1771 	if (stop_timer)
1772 		complete(&on_exit);
1773 
1774 }
1775 
1776 /* Handle halt state, used in stall poll timer */
udc_handle_halt_state(struct udc_ep * ep)1777 static void udc_handle_halt_state(struct udc_ep *ep)
1778 {
1779 	u32 tmp;
1780 	/* set stall as long not halted */
1781 	if (ep->halted == 1) {
1782 		tmp = readl(&ep->regs->ctl);
1783 		/* STALL cleared ? */
1784 		if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1785 			/*
1786 			 * FIXME: MSC spec requires that stall remains
1787 			 * even on receivng of CLEAR_FEATURE HALT. So
1788 			 * we would set STALL again here to be compliant.
1789 			 * But with current mass storage drivers this does
1790 			 * not work (would produce endless host retries).
1791 			 * So we clear halt on CLEAR_FEATURE.
1792 			 *
1793 			DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1794 			tmp |= AMD_BIT(UDC_EPCTL_S);
1795 			writel(tmp, &ep->regs->ctl);*/
1796 
1797 			/* clear NAK by writing CNAK */
1798 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1799 			writel(tmp, &ep->regs->ctl);
1800 			ep->halted = 0;
1801 			UDC_QUEUE_CNAK(ep, ep->num);
1802 		}
1803 	}
1804 }
1805 
1806 /* Stall timer callback to poll S bit and set it again after */
udc_pollstall_timer_function(unsigned long v)1807 static void udc_pollstall_timer_function(unsigned long v)
1808 {
1809 	struct udc_ep *ep;
1810 	int halted = 0;
1811 
1812 	spin_lock_irq(&udc_stall_spinlock);
1813 	/*
1814 	 * only one IN and OUT endpoints are handled
1815 	 * IN poll stall
1816 	 */
1817 	ep = &udc->ep[UDC_EPIN_IX];
1818 	udc_handle_halt_state(ep);
1819 	if (ep->halted)
1820 		halted = 1;
1821 	/* OUT poll stall */
1822 	ep = &udc->ep[UDC_EPOUT_IX];
1823 	udc_handle_halt_state(ep);
1824 	if (ep->halted)
1825 		halted = 1;
1826 
1827 	/* setup timer again when still halted */
1828 	if (!stop_pollstall_timer && halted) {
1829 		udc_pollstall_timer.expires = jiffies +
1830 					HZ * UDC_POLLSTALL_TIMER_USECONDS
1831 					/ (1000 * 1000);
1832 		add_timer(&udc_pollstall_timer);
1833 	}
1834 	spin_unlock_irq(&udc_stall_spinlock);
1835 
1836 	if (stop_pollstall_timer)
1837 		complete(&on_pollstall_exit);
1838 }
1839 
1840 /* Inits endpoint 0 so that SETUP packets are processed */
activate_control_endpoints(struct udc * dev)1841 static void activate_control_endpoints(struct udc *dev)
1842 {
1843 	u32 tmp;
1844 
1845 	DBG(dev, "activate_control_endpoints\n");
1846 
1847 	/* flush fifo */
1848 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1849 	tmp |= AMD_BIT(UDC_EPCTL_F);
1850 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1851 
1852 	/* set ep0 directions */
1853 	dev->ep[UDC_EP0IN_IX].in = 1;
1854 	dev->ep[UDC_EP0OUT_IX].in = 0;
1855 
1856 	/* set buffer size (tx fifo entries) of EP0_IN */
1857 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1858 	if (dev->gadget.speed == USB_SPEED_FULL)
1859 		tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1860 					UDC_EPIN_BUFF_SIZE);
1861 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1862 		tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1863 					UDC_EPIN_BUFF_SIZE);
1864 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1865 
1866 	/* set max packet size of EP0_IN */
1867 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1868 	if (dev->gadget.speed == USB_SPEED_FULL)
1869 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1870 					UDC_EP_MAX_PKT_SIZE);
1871 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1872 		tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1873 				UDC_EP_MAX_PKT_SIZE);
1874 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1875 
1876 	/* set max packet size of EP0_OUT */
1877 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1878 	if (dev->gadget.speed == USB_SPEED_FULL)
1879 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1880 					UDC_EP_MAX_PKT_SIZE);
1881 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1882 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1883 					UDC_EP_MAX_PKT_SIZE);
1884 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1885 
1886 	/* set max packet size of EP0 in UDC CSR */
1887 	tmp = readl(&dev->csr->ne[0]);
1888 	if (dev->gadget.speed == USB_SPEED_FULL)
1889 		tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1890 					UDC_CSR_NE_MAX_PKT);
1891 	else if (dev->gadget.speed == USB_SPEED_HIGH)
1892 		tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1893 					UDC_CSR_NE_MAX_PKT);
1894 	writel(tmp, &dev->csr->ne[0]);
1895 
1896 	if (use_dma) {
1897 		dev->ep[UDC_EP0OUT_IX].td->status |=
1898 			AMD_BIT(UDC_DMA_OUT_STS_L);
1899 		/* write dma desc address */
1900 		writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1901 			&dev->ep[UDC_EP0OUT_IX].regs->subptr);
1902 		writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1903 			&dev->ep[UDC_EP0OUT_IX].regs->desptr);
1904 		/* stop RDE timer */
1905 		if (timer_pending(&udc_timer)) {
1906 			set_rde = 0;
1907 			mod_timer(&udc_timer, jiffies - 1);
1908 		}
1909 		/* stop pollstall timer */
1910 		if (timer_pending(&udc_pollstall_timer)) {
1911 			mod_timer(&udc_pollstall_timer, jiffies - 1);
1912 		}
1913 		/* enable DMA */
1914 		tmp = readl(&dev->regs->ctl);
1915 		tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1916 				| AMD_BIT(UDC_DEVCTL_RDE)
1917 				| AMD_BIT(UDC_DEVCTL_TDE);
1918 		if (use_dma_bufferfill_mode) {
1919 			tmp |= AMD_BIT(UDC_DEVCTL_BF);
1920 		} else if (use_dma_ppb_du) {
1921 			tmp |= AMD_BIT(UDC_DEVCTL_DU);
1922 		}
1923 		writel(tmp, &dev->regs->ctl);
1924 	}
1925 
1926 	/* clear NAK by writing CNAK for EP0IN */
1927 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1928 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1929 	writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1930 	dev->ep[UDC_EP0IN_IX].naking = 0;
1931 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1932 
1933 	/* clear NAK by writing CNAK for EP0OUT */
1934 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1935 	tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1936 	writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1937 	dev->ep[UDC_EP0OUT_IX].naking = 0;
1938 	UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1939 }
1940 
1941 /* Make endpoint 0 ready for control traffic */
setup_ep0(struct udc * dev)1942 static int setup_ep0(struct udc *dev)
1943 {
1944 	activate_control_endpoints(dev);
1945 	/* enable ep0 interrupts */
1946 	udc_enable_ep0_interrupts(dev);
1947 	/* enable device setup interrupts */
1948 	udc_enable_dev_setup_interrupts(dev);
1949 
1950 	return 0;
1951 }
1952 
1953 /* Called by gadget driver to register itself */
amd5536_start(struct usb_gadget_driver * driver,int (* bind)(struct usb_gadget *))1954 static int amd5536_start(struct usb_gadget_driver *driver,
1955 		int (*bind)(struct usb_gadget *))
1956 {
1957 	struct udc		*dev = udc;
1958 	int			retval;
1959 	u32 tmp;
1960 
1961 	if (!driver || !bind || !driver->setup
1962 			|| driver->max_speed < USB_SPEED_HIGH)
1963 		return -EINVAL;
1964 	if (!dev)
1965 		return -ENODEV;
1966 	if (dev->driver)
1967 		return -EBUSY;
1968 
1969 	driver->driver.bus = NULL;
1970 	dev->driver = driver;
1971 	dev->gadget.dev.driver = &driver->driver;
1972 
1973 	retval = bind(&dev->gadget);
1974 
1975 	/* Some gadget drivers use both ep0 directions.
1976 	 * NOTE: to gadget driver, ep0 is just one endpoint...
1977 	 */
1978 	dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1979 		dev->ep[UDC_EP0IN_IX].ep.driver_data;
1980 
1981 	if (retval) {
1982 		DBG(dev, "binding to %s returning %d\n",
1983 				driver->driver.name, retval);
1984 		dev->driver = NULL;
1985 		dev->gadget.dev.driver = NULL;
1986 		return retval;
1987 	}
1988 
1989 	/* get ready for ep0 traffic */
1990 	setup_ep0(dev);
1991 
1992 	/* clear SD */
1993 	tmp = readl(&dev->regs->ctl);
1994 	tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1995 	writel(tmp, &dev->regs->ctl);
1996 
1997 	usb_connect(dev);
1998 
1999 	return 0;
2000 }
2001 
2002 /* shutdown requests and disconnect from gadget */
2003 static void
shutdown(struct udc * dev,struct usb_gadget_driver * driver)2004 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2005 __releases(dev->lock)
2006 __acquires(dev->lock)
2007 {
2008 	int tmp;
2009 
2010 	if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2011 		spin_unlock(&dev->lock);
2012 		driver->disconnect(&dev->gadget);
2013 		spin_lock(&dev->lock);
2014 	}
2015 
2016 	/* empty queues and init hardware */
2017 	udc_basic_init(dev);
2018 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2019 		empty_req_queue(&dev->ep[tmp]);
2020 
2021 	udc_setup_endpoints(dev);
2022 }
2023 
2024 /* Called by gadget driver to unregister itself */
amd5536_stop(struct usb_gadget_driver * driver)2025 static int amd5536_stop(struct usb_gadget_driver *driver)
2026 {
2027 	struct udc	*dev = udc;
2028 	unsigned long	flags;
2029 	u32 tmp;
2030 
2031 	if (!dev)
2032 		return -ENODEV;
2033 	if (!driver || driver != dev->driver || !driver->unbind)
2034 		return -EINVAL;
2035 
2036 	spin_lock_irqsave(&dev->lock, flags);
2037 	udc_mask_unused_interrupts(dev);
2038 	shutdown(dev, driver);
2039 	spin_unlock_irqrestore(&dev->lock, flags);
2040 
2041 	driver->unbind(&dev->gadget);
2042 	dev->gadget.dev.driver = NULL;
2043 	dev->driver = NULL;
2044 
2045 	/* set SD */
2046 	tmp = readl(&dev->regs->ctl);
2047 	tmp |= AMD_BIT(UDC_DEVCTL_SD);
2048 	writel(tmp, &dev->regs->ctl);
2049 
2050 
2051 	DBG(dev, "%s: unregistered\n", driver->driver.name);
2052 
2053 	return 0;
2054 }
2055 
2056 /* Clear pending NAK bits */
udc_process_cnak_queue(struct udc * dev)2057 static void udc_process_cnak_queue(struct udc *dev)
2058 {
2059 	u32 tmp;
2060 	u32 reg;
2061 
2062 	/* check epin's */
2063 	DBG(dev, "CNAK pending queue processing\n");
2064 	for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2065 		if (cnak_pending & (1 << tmp)) {
2066 			DBG(dev, "CNAK pending for ep%d\n", tmp);
2067 			/* clear NAK by writing CNAK */
2068 			reg = readl(&dev->ep[tmp].regs->ctl);
2069 			reg |= AMD_BIT(UDC_EPCTL_CNAK);
2070 			writel(reg, &dev->ep[tmp].regs->ctl);
2071 			dev->ep[tmp].naking = 0;
2072 			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2073 		}
2074 	}
2075 	/* ...	and ep0out */
2076 	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2077 		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2078 		/* clear NAK by writing CNAK */
2079 		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2080 		reg |= AMD_BIT(UDC_EPCTL_CNAK);
2081 		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2082 		dev->ep[UDC_EP0OUT_IX].naking = 0;
2083 		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2084 				dev->ep[UDC_EP0OUT_IX].num);
2085 	}
2086 }
2087 
2088 /* Enabling RX DMA after setup packet */
udc_ep0_set_rde(struct udc * dev)2089 static void udc_ep0_set_rde(struct udc *dev)
2090 {
2091 	if (use_dma) {
2092 		/*
2093 		 * only enable RXDMA when no data endpoint enabled
2094 		 * or data is queued
2095 		 */
2096 		if (!dev->data_ep_enabled || dev->data_ep_queued) {
2097 			udc_set_rde(dev);
2098 		} else {
2099 			/*
2100 			 * setup timer for enabling RDE (to not enable
2101 			 * RXFIFO DMA for data endpoints to early)
2102 			 */
2103 			if (set_rde != 0 && !timer_pending(&udc_timer)) {
2104 				udc_timer.expires =
2105 					jiffies + HZ/UDC_RDE_TIMER_DIV;
2106 				set_rde = 1;
2107 				if (!stop_timer) {
2108 					add_timer(&udc_timer);
2109 				}
2110 			}
2111 		}
2112 	}
2113 }
2114 
2115 
2116 /* Interrupt handler for data OUT traffic */
udc_data_out_isr(struct udc * dev,int ep_ix)2117 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2118 {
2119 	irqreturn_t		ret_val = IRQ_NONE;
2120 	u32			tmp;
2121 	struct udc_ep		*ep;
2122 	struct udc_request	*req;
2123 	unsigned int		count;
2124 	struct udc_data_dma	*td = NULL;
2125 	unsigned		dma_done;
2126 
2127 	VDBG(dev, "ep%d irq\n", ep_ix);
2128 	ep = &dev->ep[ep_ix];
2129 
2130 	tmp = readl(&ep->regs->sts);
2131 	if (use_dma) {
2132 		/* BNA event ? */
2133 		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2134 			DBG(dev, "BNA ep%dout occurred - DESPTR = %x \n",
2135 					ep->num, readl(&ep->regs->desptr));
2136 			/* clear BNA */
2137 			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2138 			if (!ep->cancel_transfer)
2139 				ep->bna_occurred = 1;
2140 			else
2141 				ep->cancel_transfer = 0;
2142 			ret_val = IRQ_HANDLED;
2143 			goto finished;
2144 		}
2145 	}
2146 	/* HE event ? */
2147 	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2148 		dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
2149 
2150 		/* clear HE */
2151 		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2152 		ret_val = IRQ_HANDLED;
2153 		goto finished;
2154 	}
2155 
2156 	if (!list_empty(&ep->queue)) {
2157 
2158 		/* next request */
2159 		req = list_entry(ep->queue.next,
2160 			struct udc_request, queue);
2161 	} else {
2162 		req = NULL;
2163 		udc_rxfifo_pending = 1;
2164 	}
2165 	VDBG(dev, "req = %p\n", req);
2166 	/* fifo mode */
2167 	if (!use_dma) {
2168 
2169 		/* read fifo */
2170 		if (req && udc_rxfifo_read(ep, req)) {
2171 			ret_val = IRQ_HANDLED;
2172 
2173 			/* finish */
2174 			complete_req(ep, req, 0);
2175 			/* next request */
2176 			if (!list_empty(&ep->queue) && !ep->halted) {
2177 				req = list_entry(ep->queue.next,
2178 					struct udc_request, queue);
2179 			} else
2180 				req = NULL;
2181 		}
2182 
2183 	/* DMA */
2184 	} else if (!ep->cancel_transfer && req != NULL) {
2185 		ret_val = IRQ_HANDLED;
2186 
2187 		/* check for DMA done */
2188 		if (!use_dma_ppb) {
2189 			dma_done = AMD_GETBITS(req->td_data->status,
2190 						UDC_DMA_OUT_STS_BS);
2191 		/* packet per buffer mode - rx bytes */
2192 		} else {
2193 			/*
2194 			 * if BNA occurred then recover desc. from
2195 			 * BNA dummy desc.
2196 			 */
2197 			if (ep->bna_occurred) {
2198 				VDBG(dev, "Recover desc. from BNA dummy\n");
2199 				memcpy(req->td_data, ep->bna_dummy_req->td_data,
2200 						sizeof(struct udc_data_dma));
2201 				ep->bna_occurred = 0;
2202 				udc_init_bna_dummy(ep->req);
2203 			}
2204 			td = udc_get_last_dma_desc(req);
2205 			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2206 		}
2207 		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2208 			/* buffer fill mode - rx bytes */
2209 			if (!use_dma_ppb) {
2210 				/* received number bytes */
2211 				count = AMD_GETBITS(req->td_data->status,
2212 						UDC_DMA_OUT_STS_RXBYTES);
2213 				VDBG(dev, "rx bytes=%u\n", count);
2214 			/* packet per buffer mode - rx bytes */
2215 			} else {
2216 				VDBG(dev, "req->td_data=%p\n", req->td_data);
2217 				VDBG(dev, "last desc = %p\n", td);
2218 				/* received number bytes */
2219 				if (use_dma_ppb_du) {
2220 					/* every desc. counts bytes */
2221 					count = udc_get_ppbdu_rxbytes(req);
2222 				} else {
2223 					/* last desc. counts bytes */
2224 					count = AMD_GETBITS(td->status,
2225 						UDC_DMA_OUT_STS_RXBYTES);
2226 					if (!count && req->req.length
2227 						== UDC_DMA_MAXPACKET) {
2228 						/*
2229 						 * on 64k packets the RXBYTES
2230 						 * field is zero
2231 						 */
2232 						count = UDC_DMA_MAXPACKET;
2233 					}
2234 				}
2235 				VDBG(dev, "last desc rx bytes=%u\n", count);
2236 			}
2237 
2238 			tmp = req->req.length - req->req.actual;
2239 			if (count > tmp) {
2240 				if ((tmp % ep->ep.maxpacket) != 0) {
2241 					DBG(dev, "%s: rx %db, space=%db\n",
2242 						ep->ep.name, count, tmp);
2243 					req->req.status = -EOVERFLOW;
2244 				}
2245 				count = tmp;
2246 			}
2247 			req->req.actual += count;
2248 			req->dma_going = 0;
2249 			/* complete request */
2250 			complete_req(ep, req, 0);
2251 
2252 			/* next request */
2253 			if (!list_empty(&ep->queue) && !ep->halted) {
2254 				req = list_entry(ep->queue.next,
2255 					struct udc_request,
2256 					queue);
2257 				/*
2258 				 * DMA may be already started by udc_queue()
2259 				 * called by gadget drivers completion
2260 				 * routine. This happens when queue
2261 				 * holds one request only.
2262 				 */
2263 				if (req->dma_going == 0) {
2264 					/* next dma */
2265 					if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2266 						goto finished;
2267 					/* write desc pointer */
2268 					writel(req->td_phys,
2269 						&ep->regs->desptr);
2270 					req->dma_going = 1;
2271 					/* enable DMA */
2272 					udc_set_rde(dev);
2273 				}
2274 			} else {
2275 				/*
2276 				 * implant BNA dummy descriptor to allow
2277 				 * RXFIFO opening by RDE
2278 				 */
2279 				if (ep->bna_dummy_req) {
2280 					/* write desc pointer */
2281 					writel(ep->bna_dummy_req->td_phys,
2282 						&ep->regs->desptr);
2283 					ep->bna_occurred = 0;
2284 				}
2285 
2286 				/*
2287 				 * schedule timer for setting RDE if queue
2288 				 * remains empty to allow ep0 packets pass
2289 				 * through
2290 				 */
2291 				if (set_rde != 0
2292 						&& !timer_pending(&udc_timer)) {
2293 					udc_timer.expires =
2294 						jiffies
2295 						+ HZ*UDC_RDE_TIMER_SECONDS;
2296 					set_rde = 1;
2297 					if (!stop_timer) {
2298 						add_timer(&udc_timer);
2299 					}
2300 				}
2301 				if (ep->num != UDC_EP0OUT_IX)
2302 					dev->data_ep_queued = 0;
2303 			}
2304 
2305 		} else {
2306 			/*
2307 			* RX DMA must be reenabled for each desc in PPBDU mode
2308 			* and must be enabled for PPBNDU mode in case of BNA
2309 			*/
2310 			udc_set_rde(dev);
2311 		}
2312 
2313 	} else if (ep->cancel_transfer) {
2314 		ret_val = IRQ_HANDLED;
2315 		ep->cancel_transfer = 0;
2316 	}
2317 
2318 	/* check pending CNAKS */
2319 	if (cnak_pending) {
2320 		/* CNAk processing when rxfifo empty only */
2321 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2322 			udc_process_cnak_queue(dev);
2323 		}
2324 	}
2325 
2326 	/* clear OUT bits in ep status */
2327 	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2328 finished:
2329 	return ret_val;
2330 }
2331 
2332 /* Interrupt handler for data IN traffic */
udc_data_in_isr(struct udc * dev,int ep_ix)2333 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2334 {
2335 	irqreturn_t ret_val = IRQ_NONE;
2336 	u32 tmp;
2337 	u32 epsts;
2338 	struct udc_ep *ep;
2339 	struct udc_request *req;
2340 	struct udc_data_dma *td;
2341 	unsigned dma_done;
2342 	unsigned len;
2343 
2344 	ep = &dev->ep[ep_ix];
2345 
2346 	epsts = readl(&ep->regs->sts);
2347 	if (use_dma) {
2348 		/* BNA ? */
2349 		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2350 			dev_err(&dev->pdev->dev,
2351 				"BNA ep%din occurred - DESPTR = %08lx \n",
2352 				ep->num,
2353 				(unsigned long) readl(&ep->regs->desptr));
2354 
2355 			/* clear BNA */
2356 			writel(epsts, &ep->regs->sts);
2357 			ret_val = IRQ_HANDLED;
2358 			goto finished;
2359 		}
2360 	}
2361 	/* HE event ? */
2362 	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2363 		dev_err(&dev->pdev->dev,
2364 			"HE ep%dn occurred - DESPTR = %08lx \n",
2365 			ep->num, (unsigned long) readl(&ep->regs->desptr));
2366 
2367 		/* clear HE */
2368 		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2369 		ret_val = IRQ_HANDLED;
2370 		goto finished;
2371 	}
2372 
2373 	/* DMA completion */
2374 	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2375 		VDBG(dev, "TDC set- completion\n");
2376 		ret_val = IRQ_HANDLED;
2377 		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2378 			req = list_entry(ep->queue.next,
2379 					struct udc_request, queue);
2380 			/*
2381 			 * length bytes transferred
2382 			 * check dma done of last desc. in PPBDU mode
2383 			 */
2384 			if (use_dma_ppb_du) {
2385 				td = udc_get_last_dma_desc(req);
2386 				if (td) {
2387 					dma_done =
2388 						AMD_GETBITS(td->status,
2389 						UDC_DMA_IN_STS_BS);
2390 					/* don't care DMA done */
2391 					req->req.actual = req->req.length;
2392 				}
2393 			} else {
2394 				/* assume all bytes transferred */
2395 				req->req.actual = req->req.length;
2396 			}
2397 
2398 			if (req->req.actual == req->req.length) {
2399 				/* complete req */
2400 				complete_req(ep, req, 0);
2401 				req->dma_going = 0;
2402 				/* further request available ? */
2403 				if (list_empty(&ep->queue)) {
2404 					/* disable interrupt */
2405 					tmp = readl(&dev->regs->ep_irqmsk);
2406 					tmp |= AMD_BIT(ep->num);
2407 					writel(tmp, &dev->regs->ep_irqmsk);
2408 				}
2409 			}
2410 		}
2411 		ep->cancel_transfer = 0;
2412 
2413 	}
2414 	/*
2415 	 * status reg has IN bit set and TDC not set (if TDC was handled,
2416 	 * IN must not be handled (UDC defect) ?
2417 	 */
2418 	if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2419 			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2420 		ret_val = IRQ_HANDLED;
2421 		if (!list_empty(&ep->queue)) {
2422 			/* next request */
2423 			req = list_entry(ep->queue.next,
2424 					struct udc_request, queue);
2425 			/* FIFO mode */
2426 			if (!use_dma) {
2427 				/* write fifo */
2428 				udc_txfifo_write(ep, &req->req);
2429 				len = req->req.length - req->req.actual;
2430 						if (len > ep->ep.maxpacket)
2431 							len = ep->ep.maxpacket;
2432 						req->req.actual += len;
2433 				if (req->req.actual == req->req.length
2434 					|| (len != ep->ep.maxpacket)) {
2435 					/* complete req */
2436 					complete_req(ep, req, 0);
2437 				}
2438 			/* DMA */
2439 			} else if (req && !req->dma_going) {
2440 				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2441 					req, req->td_data);
2442 				if (req->td_data) {
2443 
2444 					req->dma_going = 1;
2445 
2446 					/*
2447 					 * unset L bit of first desc.
2448 					 * for chain
2449 					 */
2450 					if (use_dma_ppb && req->req.length >
2451 							ep->ep.maxpacket) {
2452 						req->td_data->status &=
2453 							AMD_CLEAR_BIT(
2454 							UDC_DMA_IN_STS_L);
2455 					}
2456 
2457 					/* write desc pointer */
2458 					writel(req->td_phys, &ep->regs->desptr);
2459 
2460 					/* set HOST READY */
2461 					req->td_data->status =
2462 						AMD_ADDBITS(
2463 						req->td_data->status,
2464 						UDC_DMA_IN_STS_BS_HOST_READY,
2465 						UDC_DMA_IN_STS_BS);
2466 
2467 					/* set poll demand bit */
2468 					tmp = readl(&ep->regs->ctl);
2469 					tmp |= AMD_BIT(UDC_EPCTL_P);
2470 					writel(tmp, &ep->regs->ctl);
2471 				}
2472 			}
2473 
2474 		} else if (!use_dma && ep->in) {
2475 			/* disable interrupt */
2476 			tmp = readl(
2477 				&dev->regs->ep_irqmsk);
2478 			tmp |= AMD_BIT(ep->num);
2479 			writel(tmp,
2480 				&dev->regs->ep_irqmsk);
2481 		}
2482 	}
2483 	/* clear status bits */
2484 	writel(epsts, &ep->regs->sts);
2485 
2486 finished:
2487 	return ret_val;
2488 
2489 }
2490 
2491 /* Interrupt handler for Control OUT traffic */
udc_control_out_isr(struct udc * dev)2492 static irqreturn_t udc_control_out_isr(struct udc *dev)
2493 __releases(dev->lock)
2494 __acquires(dev->lock)
2495 {
2496 	irqreturn_t ret_val = IRQ_NONE;
2497 	u32 tmp;
2498 	int setup_supported;
2499 	u32 count;
2500 	int set = 0;
2501 	struct udc_ep	*ep;
2502 	struct udc_ep	*ep_tmp;
2503 
2504 	ep = &dev->ep[UDC_EP0OUT_IX];
2505 
2506 	/* clear irq */
2507 	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2508 
2509 	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2510 	/* check BNA and clear if set */
2511 	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2512 		VDBG(dev, "ep0: BNA set\n");
2513 		writel(AMD_BIT(UDC_EPSTS_BNA),
2514 			&dev->ep[UDC_EP0OUT_IX].regs->sts);
2515 		ep->bna_occurred = 1;
2516 		ret_val = IRQ_HANDLED;
2517 		goto finished;
2518 	}
2519 
2520 	/* type of data: SETUP or DATA 0 bytes */
2521 	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2522 	VDBG(dev, "data_typ = %x\n", tmp);
2523 
2524 	/* setup data */
2525 	if (tmp == UDC_EPSTS_OUT_SETUP) {
2526 		ret_val = IRQ_HANDLED;
2527 
2528 		ep->dev->stall_ep0in = 0;
2529 		dev->waiting_zlp_ack_ep0in = 0;
2530 
2531 		/* set NAK for EP0_IN */
2532 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2533 		tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2534 		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2535 		dev->ep[UDC_EP0IN_IX].naking = 1;
2536 		/* get setup data */
2537 		if (use_dma) {
2538 
2539 			/* clear OUT bits in ep status */
2540 			writel(UDC_EPSTS_OUT_CLEAR,
2541 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2542 
2543 			setup_data.data[0] =
2544 				dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2545 			setup_data.data[1] =
2546 				dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2547 			/* set HOST READY */
2548 			dev->ep[UDC_EP0OUT_IX].td_stp->status =
2549 					UDC_DMA_STP_STS_BS_HOST_READY;
2550 		} else {
2551 			/* read fifo */
2552 			udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2553 		}
2554 
2555 		/* determine direction of control data */
2556 		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2557 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2558 			/* enable RDE */
2559 			udc_ep0_set_rde(dev);
2560 			set = 0;
2561 		} else {
2562 			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2563 			/*
2564 			 * implant BNA dummy descriptor to allow RXFIFO opening
2565 			 * by RDE
2566 			 */
2567 			if (ep->bna_dummy_req) {
2568 				/* write desc pointer */
2569 				writel(ep->bna_dummy_req->td_phys,
2570 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2571 				ep->bna_occurred = 0;
2572 			}
2573 
2574 			set = 1;
2575 			dev->ep[UDC_EP0OUT_IX].naking = 1;
2576 			/*
2577 			 * setup timer for enabling RDE (to not enable
2578 			 * RXFIFO DMA for data to early)
2579 			 */
2580 			set_rde = 1;
2581 			if (!timer_pending(&udc_timer)) {
2582 				udc_timer.expires = jiffies +
2583 							HZ/UDC_RDE_TIMER_DIV;
2584 				if (!stop_timer) {
2585 					add_timer(&udc_timer);
2586 				}
2587 			}
2588 		}
2589 
2590 		/*
2591 		 * mass storage reset must be processed here because
2592 		 * next packet may be a CLEAR_FEATURE HALT which would not
2593 		 * clear the stall bit when no STALL handshake was received
2594 		 * before (autostall can cause this)
2595 		 */
2596 		if (setup_data.data[0] == UDC_MSCRES_DWORD0
2597 				&& setup_data.data[1] == UDC_MSCRES_DWORD1) {
2598 			DBG(dev, "MSC Reset\n");
2599 			/*
2600 			 * clear stall bits
2601 			 * only one IN and OUT endpoints are handled
2602 			 */
2603 			ep_tmp = &udc->ep[UDC_EPIN_IX];
2604 			udc_set_halt(&ep_tmp->ep, 0);
2605 			ep_tmp = &udc->ep[UDC_EPOUT_IX];
2606 			udc_set_halt(&ep_tmp->ep, 0);
2607 		}
2608 
2609 		/* call gadget with setup data received */
2610 		spin_unlock(&dev->lock);
2611 		setup_supported = dev->driver->setup(&dev->gadget,
2612 						&setup_data.request);
2613 		spin_lock(&dev->lock);
2614 
2615 		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2616 		/* ep0 in returns data (not zlp) on IN phase */
2617 		if (setup_supported >= 0 && setup_supported <
2618 				UDC_EP0IN_MAXPACKET) {
2619 			/* clear NAK by writing CNAK in EP0_IN */
2620 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2621 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2622 			dev->ep[UDC_EP0IN_IX].naking = 0;
2623 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2624 
2625 		/* if unsupported request then stall */
2626 		} else if (setup_supported < 0) {
2627 			tmp |= AMD_BIT(UDC_EPCTL_S);
2628 			writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2629 		} else
2630 			dev->waiting_zlp_ack_ep0in = 1;
2631 
2632 
2633 		/* clear NAK by writing CNAK in EP0_OUT */
2634 		if (!set) {
2635 			tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2636 			tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2637 			writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2638 			dev->ep[UDC_EP0OUT_IX].naking = 0;
2639 			UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2640 		}
2641 
2642 		if (!use_dma) {
2643 			/* clear OUT bits in ep status */
2644 			writel(UDC_EPSTS_OUT_CLEAR,
2645 				&dev->ep[UDC_EP0OUT_IX].regs->sts);
2646 		}
2647 
2648 	/* data packet 0 bytes */
2649 	} else if (tmp == UDC_EPSTS_OUT_DATA) {
2650 		/* clear OUT bits in ep status */
2651 		writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2652 
2653 		/* get setup data: only 0 packet */
2654 		if (use_dma) {
2655 			/* no req if 0 packet, just reactivate */
2656 			if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2657 				VDBG(dev, "ZLP\n");
2658 
2659 				/* set HOST READY */
2660 				dev->ep[UDC_EP0OUT_IX].td->status =
2661 					AMD_ADDBITS(
2662 					dev->ep[UDC_EP0OUT_IX].td->status,
2663 					UDC_DMA_OUT_STS_BS_HOST_READY,
2664 					UDC_DMA_OUT_STS_BS);
2665 				/* enable RDE */
2666 				udc_ep0_set_rde(dev);
2667 				ret_val = IRQ_HANDLED;
2668 
2669 			} else {
2670 				/* control write */
2671 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2672 				/* re-program desc. pointer for possible ZLPs */
2673 				writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2674 					&dev->ep[UDC_EP0OUT_IX].regs->desptr);
2675 				/* enable RDE */
2676 				udc_ep0_set_rde(dev);
2677 			}
2678 		} else {
2679 
2680 			/* received number bytes */
2681 			count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2682 			count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2683 			/* out data for fifo mode not working */
2684 			count = 0;
2685 
2686 			/* 0 packet or real data ? */
2687 			if (count != 0) {
2688 				ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2689 			} else {
2690 				/* dummy read confirm */
2691 				readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2692 				ret_val = IRQ_HANDLED;
2693 			}
2694 		}
2695 	}
2696 
2697 	/* check pending CNAKS */
2698 	if (cnak_pending) {
2699 		/* CNAk processing when rxfifo empty only */
2700 		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2701 			udc_process_cnak_queue(dev);
2702 		}
2703 	}
2704 
2705 finished:
2706 	return ret_val;
2707 }
2708 
2709 /* Interrupt handler for Control IN traffic */
udc_control_in_isr(struct udc * dev)2710 static irqreturn_t udc_control_in_isr(struct udc *dev)
2711 {
2712 	irqreturn_t ret_val = IRQ_NONE;
2713 	u32 tmp;
2714 	struct udc_ep *ep;
2715 	struct udc_request *req;
2716 	unsigned len;
2717 
2718 	ep = &dev->ep[UDC_EP0IN_IX];
2719 
2720 	/* clear irq */
2721 	writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2722 
2723 	tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2724 	/* DMA completion */
2725 	if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2726 		VDBG(dev, "isr: TDC clear \n");
2727 		ret_val = IRQ_HANDLED;
2728 
2729 		/* clear TDC bit */
2730 		writel(AMD_BIT(UDC_EPSTS_TDC),
2731 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2732 
2733 	/* status reg has IN bit set ? */
2734 	} else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2735 		ret_val = IRQ_HANDLED;
2736 
2737 		if (ep->dma) {
2738 			/* clear IN bit */
2739 			writel(AMD_BIT(UDC_EPSTS_IN),
2740 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2741 		}
2742 		if (dev->stall_ep0in) {
2743 			DBG(dev, "stall ep0in\n");
2744 			/* halt ep0in */
2745 			tmp = readl(&ep->regs->ctl);
2746 			tmp |= AMD_BIT(UDC_EPCTL_S);
2747 			writel(tmp, &ep->regs->ctl);
2748 		} else {
2749 			if (!list_empty(&ep->queue)) {
2750 				/* next request */
2751 				req = list_entry(ep->queue.next,
2752 						struct udc_request, queue);
2753 
2754 				if (ep->dma) {
2755 					/* write desc pointer */
2756 					writel(req->td_phys, &ep->regs->desptr);
2757 					/* set HOST READY */
2758 					req->td_data->status =
2759 						AMD_ADDBITS(
2760 						req->td_data->status,
2761 						UDC_DMA_STP_STS_BS_HOST_READY,
2762 						UDC_DMA_STP_STS_BS);
2763 
2764 					/* set poll demand bit */
2765 					tmp =
2766 					readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2767 					tmp |= AMD_BIT(UDC_EPCTL_P);
2768 					writel(tmp,
2769 					&dev->ep[UDC_EP0IN_IX].regs->ctl);
2770 
2771 					/* all bytes will be transferred */
2772 					req->req.actual = req->req.length;
2773 
2774 					/* complete req */
2775 					complete_req(ep, req, 0);
2776 
2777 				} else {
2778 					/* write fifo */
2779 					udc_txfifo_write(ep, &req->req);
2780 
2781 					/* lengh bytes transferred */
2782 					len = req->req.length - req->req.actual;
2783 					if (len > ep->ep.maxpacket)
2784 						len = ep->ep.maxpacket;
2785 
2786 					req->req.actual += len;
2787 					if (req->req.actual == req->req.length
2788 						|| (len != ep->ep.maxpacket)) {
2789 						/* complete req */
2790 						complete_req(ep, req, 0);
2791 					}
2792 				}
2793 
2794 			}
2795 		}
2796 		ep->halted = 0;
2797 		dev->stall_ep0in = 0;
2798 		if (!ep->dma) {
2799 			/* clear IN bit */
2800 			writel(AMD_BIT(UDC_EPSTS_IN),
2801 				&dev->ep[UDC_EP0IN_IX].regs->sts);
2802 		}
2803 	}
2804 
2805 	return ret_val;
2806 }
2807 
2808 
2809 /* Interrupt handler for global device events */
udc_dev_isr(struct udc * dev,u32 dev_irq)2810 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2811 __releases(dev->lock)
2812 __acquires(dev->lock)
2813 {
2814 	irqreturn_t ret_val = IRQ_NONE;
2815 	u32 tmp;
2816 	u32 cfg;
2817 	struct udc_ep *ep;
2818 	u16 i;
2819 	u8 udc_csr_epix;
2820 
2821 	/* SET_CONFIG irq ? */
2822 	if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2823 		ret_val = IRQ_HANDLED;
2824 
2825 		/* read config value */
2826 		tmp = readl(&dev->regs->sts);
2827 		cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2828 		DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2829 		dev->cur_config = cfg;
2830 		dev->set_cfg_not_acked = 1;
2831 
2832 		/* make usb request for gadget driver */
2833 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2834 		setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2835 		setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2836 
2837 		/* programm the NE registers */
2838 		for (i = 0; i < UDC_EP_NUM; i++) {
2839 			ep = &dev->ep[i];
2840 			if (ep->in) {
2841 
2842 				/* ep ix in UDC CSR register space */
2843 				udc_csr_epix = ep->num;
2844 
2845 
2846 			/* OUT ep */
2847 			} else {
2848 				/* ep ix in UDC CSR register space */
2849 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2850 			}
2851 
2852 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2853 			/* ep cfg */
2854 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2855 						UDC_CSR_NE_CFG);
2856 			/* write reg */
2857 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2858 
2859 			/* clear stall bits */
2860 			ep->halted = 0;
2861 			tmp = readl(&ep->regs->ctl);
2862 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2863 			writel(tmp, &ep->regs->ctl);
2864 		}
2865 		/* call gadget zero with setup data received */
2866 		spin_unlock(&dev->lock);
2867 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2868 		spin_lock(&dev->lock);
2869 
2870 	} /* SET_INTERFACE ? */
2871 	if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2872 		ret_val = IRQ_HANDLED;
2873 
2874 		dev->set_cfg_not_acked = 1;
2875 		/* read interface and alt setting values */
2876 		tmp = readl(&dev->regs->sts);
2877 		dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2878 		dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2879 
2880 		/* make usb request for gadget driver */
2881 		memset(&setup_data, 0 , sizeof(union udc_setup_data));
2882 		setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2883 		setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2884 		setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2885 		setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2886 
2887 		DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2888 				dev->cur_alt, dev->cur_intf);
2889 
2890 		/* programm the NE registers */
2891 		for (i = 0; i < UDC_EP_NUM; i++) {
2892 			ep = &dev->ep[i];
2893 			if (ep->in) {
2894 
2895 				/* ep ix in UDC CSR register space */
2896 				udc_csr_epix = ep->num;
2897 
2898 
2899 			/* OUT ep */
2900 			} else {
2901 				/* ep ix in UDC CSR register space */
2902 				udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2903 			}
2904 
2905 			/* UDC CSR reg */
2906 			/* set ep values */
2907 			tmp = readl(&dev->csr->ne[udc_csr_epix]);
2908 			/* ep interface */
2909 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2910 						UDC_CSR_NE_INTF);
2911 			/* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2912 			/* ep alt */
2913 			tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2914 						UDC_CSR_NE_ALT);
2915 			/* write reg */
2916 			writel(tmp, &dev->csr->ne[udc_csr_epix]);
2917 
2918 			/* clear stall bits */
2919 			ep->halted = 0;
2920 			tmp = readl(&ep->regs->ctl);
2921 			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2922 			writel(tmp, &ep->regs->ctl);
2923 		}
2924 
2925 		/* call gadget zero with setup data received */
2926 		spin_unlock(&dev->lock);
2927 		tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2928 		spin_lock(&dev->lock);
2929 
2930 	} /* USB reset */
2931 	if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2932 		DBG(dev, "USB Reset interrupt\n");
2933 		ret_val = IRQ_HANDLED;
2934 
2935 		/* allow soft reset when suspend occurs */
2936 		soft_reset_occured = 0;
2937 
2938 		dev->waiting_zlp_ack_ep0in = 0;
2939 		dev->set_cfg_not_acked = 0;
2940 
2941 		/* mask not needed interrupts */
2942 		udc_mask_unused_interrupts(dev);
2943 
2944 		/* call gadget to resume and reset configs etc. */
2945 		spin_unlock(&dev->lock);
2946 		if (dev->sys_suspended && dev->driver->resume) {
2947 			dev->driver->resume(&dev->gadget);
2948 			dev->sys_suspended = 0;
2949 		}
2950 		dev->driver->disconnect(&dev->gadget);
2951 		spin_lock(&dev->lock);
2952 
2953 		/* disable ep0 to empty req queue */
2954 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2955 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2956 
2957 		/* soft reset when rxfifo not empty */
2958 		tmp = readl(&dev->regs->sts);
2959 		if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2960 				&& !soft_reset_after_usbreset_occured) {
2961 			udc_soft_reset(dev);
2962 			soft_reset_after_usbreset_occured++;
2963 		}
2964 
2965 		/*
2966 		 * DMA reset to kill potential old DMA hw hang,
2967 		 * POLL bit is already reset by ep_init() through
2968 		 * disconnect()
2969 		 */
2970 		DBG(dev, "DMA machine reset\n");
2971 		tmp = readl(&dev->regs->cfg);
2972 		writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2973 		writel(tmp, &dev->regs->cfg);
2974 
2975 		/* put into initial config */
2976 		udc_basic_init(dev);
2977 
2978 		/* enable device setup interrupts */
2979 		udc_enable_dev_setup_interrupts(dev);
2980 
2981 		/* enable suspend interrupt */
2982 		tmp = readl(&dev->regs->irqmsk);
2983 		tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2984 		writel(tmp, &dev->regs->irqmsk);
2985 
2986 	} /* USB suspend */
2987 	if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2988 		DBG(dev, "USB Suspend interrupt\n");
2989 		ret_val = IRQ_HANDLED;
2990 		if (dev->driver->suspend) {
2991 			spin_unlock(&dev->lock);
2992 			dev->sys_suspended = 1;
2993 			dev->driver->suspend(&dev->gadget);
2994 			spin_lock(&dev->lock);
2995 		}
2996 	} /* new speed ? */
2997 	if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2998 		DBG(dev, "ENUM interrupt\n");
2999 		ret_val = IRQ_HANDLED;
3000 		soft_reset_after_usbreset_occured = 0;
3001 
3002 		/* disable ep0 to empty req queue */
3003 		empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3004 		ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3005 
3006 		/* link up all endpoints */
3007 		udc_setup_endpoints(dev);
3008 		dev_info(&dev->pdev->dev, "Connect: %s\n",
3009 			 usb_speed_string(dev->gadget.speed));
3010 
3011 		/* init ep 0 */
3012 		activate_control_endpoints(dev);
3013 
3014 		/* enable ep0 interrupts */
3015 		udc_enable_ep0_interrupts(dev);
3016 	}
3017 	/* session valid change interrupt */
3018 	if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3019 		DBG(dev, "USB SVC interrupt\n");
3020 		ret_val = IRQ_HANDLED;
3021 
3022 		/* check that session is not valid to detect disconnect */
3023 		tmp = readl(&dev->regs->sts);
3024 		if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3025 			/* disable suspend interrupt */
3026 			tmp = readl(&dev->regs->irqmsk);
3027 			tmp |= AMD_BIT(UDC_DEVINT_US);
3028 			writel(tmp, &dev->regs->irqmsk);
3029 			DBG(dev, "USB Disconnect (session valid low)\n");
3030 			/* cleanup on disconnect */
3031 			usb_disconnect(udc);
3032 		}
3033 
3034 	}
3035 
3036 	return ret_val;
3037 }
3038 
3039 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
udc_irq(int irq,void * pdev)3040 static irqreturn_t udc_irq(int irq, void *pdev)
3041 {
3042 	struct udc *dev = pdev;
3043 	u32 reg;
3044 	u16 i;
3045 	u32 ep_irq;
3046 	irqreturn_t ret_val = IRQ_NONE;
3047 
3048 	spin_lock(&dev->lock);
3049 
3050 	/* check for ep irq */
3051 	reg = readl(&dev->regs->ep_irqsts);
3052 	if (reg) {
3053 		if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3054 			ret_val |= udc_control_out_isr(dev);
3055 		if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3056 			ret_val |= udc_control_in_isr(dev);
3057 
3058 		/*
3059 		 * data endpoint
3060 		 * iterate ep's
3061 		 */
3062 		for (i = 1; i < UDC_EP_NUM; i++) {
3063 			ep_irq = 1 << i;
3064 			if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3065 				continue;
3066 
3067 			/* clear irq status */
3068 			writel(ep_irq, &dev->regs->ep_irqsts);
3069 
3070 			/* irq for out ep ? */
3071 			if (i > UDC_EPIN_NUM)
3072 				ret_val |= udc_data_out_isr(dev, i);
3073 			else
3074 				ret_val |= udc_data_in_isr(dev, i);
3075 		}
3076 
3077 	}
3078 
3079 
3080 	/* check for dev irq */
3081 	reg = readl(&dev->regs->irqsts);
3082 	if (reg) {
3083 		/* clear irq */
3084 		writel(reg, &dev->regs->irqsts);
3085 		ret_val |= udc_dev_isr(dev, reg);
3086 	}
3087 
3088 
3089 	spin_unlock(&dev->lock);
3090 	return ret_val;
3091 }
3092 
3093 /* Tears down device */
gadget_release(struct device * pdev)3094 static void gadget_release(struct device *pdev)
3095 {
3096 	struct amd5536udc *dev = dev_get_drvdata(pdev);
3097 	kfree(dev);
3098 }
3099 
3100 /* Cleanup on device remove */
udc_remove(struct udc * dev)3101 static void udc_remove(struct udc *dev)
3102 {
3103 	/* remove timer */
3104 	stop_timer++;
3105 	if (timer_pending(&udc_timer))
3106 		wait_for_completion(&on_exit);
3107 	if (udc_timer.data)
3108 		del_timer_sync(&udc_timer);
3109 	/* remove pollstall timer */
3110 	stop_pollstall_timer++;
3111 	if (timer_pending(&udc_pollstall_timer))
3112 		wait_for_completion(&on_pollstall_exit);
3113 	if (udc_pollstall_timer.data)
3114 		del_timer_sync(&udc_pollstall_timer);
3115 	udc = NULL;
3116 }
3117 
3118 /* Reset all pci context */
udc_pci_remove(struct pci_dev * pdev)3119 static void udc_pci_remove(struct pci_dev *pdev)
3120 {
3121 	struct udc		*dev;
3122 
3123 	dev = pci_get_drvdata(pdev);
3124 
3125 	usb_del_gadget_udc(&udc->gadget);
3126 	/* gadget driver must not be registered */
3127 	BUG_ON(dev->driver != NULL);
3128 
3129 	/* dma pool cleanup */
3130 	if (dev->data_requests)
3131 		pci_pool_destroy(dev->data_requests);
3132 
3133 	if (dev->stp_requests) {
3134 		/* cleanup DMA desc's for ep0in */
3135 		pci_pool_free(dev->stp_requests,
3136 			dev->ep[UDC_EP0OUT_IX].td_stp,
3137 			dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3138 		pci_pool_free(dev->stp_requests,
3139 			dev->ep[UDC_EP0OUT_IX].td,
3140 			dev->ep[UDC_EP0OUT_IX].td_phys);
3141 
3142 		pci_pool_destroy(dev->stp_requests);
3143 	}
3144 
3145 	/* reset controller */
3146 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3147 	if (dev->irq_registered)
3148 		free_irq(pdev->irq, dev);
3149 	if (dev->regs)
3150 		iounmap(dev->regs);
3151 	if (dev->mem_region)
3152 		release_mem_region(pci_resource_start(pdev, 0),
3153 				pci_resource_len(pdev, 0));
3154 	if (dev->active)
3155 		pci_disable_device(pdev);
3156 
3157 	device_unregister(&dev->gadget.dev);
3158 	pci_set_drvdata(pdev, NULL);
3159 
3160 	udc_remove(dev);
3161 }
3162 
3163 /* create dma pools on init */
init_dma_pools(struct udc * dev)3164 static int init_dma_pools(struct udc *dev)
3165 {
3166 	struct udc_stp_dma	*td_stp;
3167 	struct udc_data_dma	*td_data;
3168 	int retval;
3169 
3170 	/* consistent DMA mode setting ? */
3171 	if (use_dma_ppb) {
3172 		use_dma_bufferfill_mode = 0;
3173 	} else {
3174 		use_dma_ppb_du = 0;
3175 		use_dma_bufferfill_mode = 1;
3176 	}
3177 
3178 	/* DMA setup */
3179 	dev->data_requests = dma_pool_create("data_requests", NULL,
3180 		sizeof(struct udc_data_dma), 0, 0);
3181 	if (!dev->data_requests) {
3182 		DBG(dev, "can't get request data pool\n");
3183 		retval = -ENOMEM;
3184 		goto finished;
3185 	}
3186 
3187 	/* EP0 in dma regs = dev control regs */
3188 	dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3189 
3190 	/* dma desc for setup data */
3191 	dev->stp_requests = dma_pool_create("setup requests", NULL,
3192 		sizeof(struct udc_stp_dma), 0, 0);
3193 	if (!dev->stp_requests) {
3194 		DBG(dev, "can't get stp request pool\n");
3195 		retval = -ENOMEM;
3196 		goto finished;
3197 	}
3198 	/* setup */
3199 	td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3200 				&dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3201 	if (td_stp == NULL) {
3202 		retval = -ENOMEM;
3203 		goto finished;
3204 	}
3205 	dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3206 
3207 	/* data: 0 packets !? */
3208 	td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3209 				&dev->ep[UDC_EP0OUT_IX].td_phys);
3210 	if (td_data == NULL) {
3211 		retval = -ENOMEM;
3212 		goto finished;
3213 	}
3214 	dev->ep[UDC_EP0OUT_IX].td = td_data;
3215 	return 0;
3216 
3217 finished:
3218 	return retval;
3219 }
3220 
3221 /* Called by pci bus driver to init pci context */
udc_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)3222 static int udc_pci_probe(
3223 	struct pci_dev *pdev,
3224 	const struct pci_device_id *id
3225 )
3226 {
3227 	struct udc		*dev;
3228 	unsigned long		resource;
3229 	unsigned long		len;
3230 	int			retval = 0;
3231 
3232 	/* one udc only */
3233 	if (udc) {
3234 		dev_dbg(&pdev->dev, "already probed\n");
3235 		return -EBUSY;
3236 	}
3237 
3238 	/* init */
3239 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3240 	if (!dev) {
3241 		retval = -ENOMEM;
3242 		goto finished;
3243 	}
3244 
3245 	/* pci setup */
3246 	if (pci_enable_device(pdev) < 0) {
3247 		kfree(dev);
3248 		dev = NULL;
3249 		retval = -ENODEV;
3250 		goto finished;
3251 	}
3252 	dev->active = 1;
3253 
3254 	/* PCI resource allocation */
3255 	resource = pci_resource_start(pdev, 0);
3256 	len = pci_resource_len(pdev, 0);
3257 
3258 	if (!request_mem_region(resource, len, name)) {
3259 		dev_dbg(&pdev->dev, "pci device used already\n");
3260 		kfree(dev);
3261 		dev = NULL;
3262 		retval = -EBUSY;
3263 		goto finished;
3264 	}
3265 	dev->mem_region = 1;
3266 
3267 	dev->virt_addr = ioremap_nocache(resource, len);
3268 	if (dev->virt_addr == NULL) {
3269 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3270 		kfree(dev);
3271 		dev = NULL;
3272 		retval = -EFAULT;
3273 		goto finished;
3274 	}
3275 
3276 	if (!pdev->irq) {
3277 		dev_err(&dev->pdev->dev, "irq not set\n");
3278 		kfree(dev);
3279 		dev = NULL;
3280 		retval = -ENODEV;
3281 		goto finished;
3282 	}
3283 
3284 	spin_lock_init(&dev->lock);
3285 	/* udc csr registers base */
3286 	dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3287 	/* dev registers base */
3288 	dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3289 	/* ep registers base */
3290 	dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3291 	/* fifo's base */
3292 	dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3293 	dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3294 
3295 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3296 		dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3297 		kfree(dev);
3298 		dev = NULL;
3299 		retval = -EBUSY;
3300 		goto finished;
3301 	}
3302 	dev->irq_registered = 1;
3303 
3304 	pci_set_drvdata(pdev, dev);
3305 
3306 	/* chip revision for Hs AMD5536 */
3307 	dev->chiprev = pdev->revision;
3308 
3309 	pci_set_master(pdev);
3310 	pci_try_set_mwi(pdev);
3311 
3312 	/* init dma pools */
3313 	if (use_dma) {
3314 		retval = init_dma_pools(dev);
3315 		if (retval != 0)
3316 			goto finished;
3317 	}
3318 
3319 	dev->phys_addr = resource;
3320 	dev->irq = pdev->irq;
3321 	dev->pdev = pdev;
3322 	dev->gadget.dev.parent = &pdev->dev;
3323 	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3324 
3325 	/* general probing */
3326 	if (udc_probe(dev) == 0)
3327 		return 0;
3328 
3329 finished:
3330 	if (dev)
3331 		udc_pci_remove(pdev);
3332 	return retval;
3333 }
3334 
3335 /* general probe */
udc_probe(struct udc * dev)3336 static int udc_probe(struct udc *dev)
3337 {
3338 	char		tmp[128];
3339 	u32		reg;
3340 	int		retval;
3341 
3342 	/* mark timer as not initialized */
3343 	udc_timer.data = 0;
3344 	udc_pollstall_timer.data = 0;
3345 
3346 	/* device struct setup */
3347 	dev->gadget.ops = &udc_ops;
3348 
3349 	dev_set_name(&dev->gadget.dev, "gadget");
3350 	dev->gadget.dev.release = gadget_release;
3351 	dev->gadget.name = name;
3352 	dev->gadget.max_speed = USB_SPEED_HIGH;
3353 
3354 	/* init registers, interrupts, ... */
3355 	startup_registers(dev);
3356 
3357 	dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3358 
3359 	snprintf(tmp, sizeof tmp, "%d", dev->irq);
3360 	dev_info(&dev->pdev->dev,
3361 		"irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3362 		tmp, dev->phys_addr, dev->chiprev,
3363 		(dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3364 	strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3365 	if (dev->chiprev == UDC_HSA0_REV) {
3366 		dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3367 		retval = -ENODEV;
3368 		goto finished;
3369 	}
3370 	dev_info(&dev->pdev->dev,
3371 		"driver version: %s(for Geode5536 B1)\n", tmp);
3372 	udc = dev;
3373 
3374 	retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget);
3375 	if (retval)
3376 		goto finished;
3377 
3378 	retval = device_register(&dev->gadget.dev);
3379 	if (retval) {
3380 		usb_del_gadget_udc(&dev->gadget);
3381 		put_device(&dev->gadget.dev);
3382 		goto finished;
3383 	}
3384 
3385 	/* timer init */
3386 	init_timer(&udc_timer);
3387 	udc_timer.function = udc_timer_function;
3388 	udc_timer.data = 1;
3389 	/* timer pollstall init */
3390 	init_timer(&udc_pollstall_timer);
3391 	udc_pollstall_timer.function = udc_pollstall_timer_function;
3392 	udc_pollstall_timer.data = 1;
3393 
3394 	/* set SD */
3395 	reg = readl(&dev->regs->ctl);
3396 	reg |= AMD_BIT(UDC_DEVCTL_SD);
3397 	writel(reg, &dev->regs->ctl);
3398 
3399 	/* print dev register info */
3400 	print_regs(dev);
3401 
3402 	return 0;
3403 
3404 finished:
3405 	return retval;
3406 }
3407 
3408 /* Initiates a remote wakeup */
udc_remote_wakeup(struct udc * dev)3409 static int udc_remote_wakeup(struct udc *dev)
3410 {
3411 	unsigned long flags;
3412 	u32 tmp;
3413 
3414 	DBG(dev, "UDC initiates remote wakeup\n");
3415 
3416 	spin_lock_irqsave(&dev->lock, flags);
3417 
3418 	tmp = readl(&dev->regs->ctl);
3419 	tmp |= AMD_BIT(UDC_DEVCTL_RES);
3420 	writel(tmp, &dev->regs->ctl);
3421 	tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3422 	writel(tmp, &dev->regs->ctl);
3423 
3424 	spin_unlock_irqrestore(&dev->lock, flags);
3425 	return 0;
3426 }
3427 
3428 /* PCI device parameters */
3429 static const struct pci_device_id pci_id[] = {
3430 	{
3431 		PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3432 		.class =	(PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3433 		.class_mask =	0xffffffff,
3434 	},
3435 	{},
3436 };
3437 MODULE_DEVICE_TABLE(pci, pci_id);
3438 
3439 /* PCI functions */
3440 static struct pci_driver udc_pci_driver = {
3441 	.name =		(char *) name,
3442 	.id_table =	pci_id,
3443 	.probe =	udc_pci_probe,
3444 	.remove =	udc_pci_remove,
3445 };
3446 
3447 /* Inits driver */
init(void)3448 static int __init init(void)
3449 {
3450 	return pci_register_driver(&udc_pci_driver);
3451 }
3452 module_init(init);
3453 
3454 /* Cleans driver */
cleanup(void)3455 static void __exit cleanup(void)
3456 {
3457 	pci_unregister_driver(&udc_pci_driver);
3458 }
3459 module_exit(cleanup);
3460 
3461 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3462 MODULE_AUTHOR("Thomas Dahlmann");
3463 MODULE_LICENSE("GPL");
3464 
3465