1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MUSB OTG driver peripheral support 4 * 5 * Copyright 2005 Mentor Graphics Corporation 6 * Copyright (C) 2005-2006 by Texas Instruments 7 * Copyright (C) 2006-2007 Nokia Corporation 8 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/list.h> 13 #include <linux/timer.h> 14 #include <linux/module.h> 15 #include <linux/smp.h> 16 #include <linux/spinlock.h> 17 #include <linux/string_choices.h> 18 #include <linux/delay.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 22 #include "musb_core.h" 23 #include "musb_trace.h" 24 25 26 /* ----------------------------------------------------------------------- */ 27 28 #define is_buffer_mapped(req) (is_dma_capable() && \ 29 (req->map_state != UN_MAPPED)) 30 31 /* Maps the buffer to dma */ 32 33 static inline void map_dma_buffer(struct musb_request *request, 34 struct musb *musb, struct musb_ep *musb_ep) 35 { 36 int compatible = true; 37 struct dma_controller *dma = musb->dma_controller; 38 39 request->map_state = UN_MAPPED; 40 41 if (!is_dma_capable() || !musb_ep->dma) 42 return; 43 44 /* Check if DMA engine can handle this request. 45 * DMA code must reject the USB request explicitly. 46 * Default behaviour is to map the request. 47 */ 48 if (dma->is_compatible) 49 compatible = dma->is_compatible(musb_ep->dma, 50 musb_ep->packet_sz, request->request.buf, 51 request->request.length); 52 if (!compatible) 53 return; 54 55 if (request->request.dma == DMA_ADDR_INVALID) { 56 dma_addr_t dma_addr; 57 int ret; 58 59 dma_addr = dma_map_single( 60 musb->controller, 61 request->request.buf, 62 request->request.length, 63 request->tx 64 ? DMA_TO_DEVICE 65 : DMA_FROM_DEVICE); 66 ret = dma_mapping_error(musb->controller, dma_addr); 67 if (ret) 68 return; 69 70 request->request.dma = dma_addr; 71 request->map_state = MUSB_MAPPED; 72 } else { 73 dma_sync_single_for_device(musb->controller, 74 request->request.dma, 75 request->request.length, 76 request->tx 77 ? DMA_TO_DEVICE 78 : DMA_FROM_DEVICE); 79 request->map_state = PRE_MAPPED; 80 } 81 } 82 83 /* Unmap the buffer from dma and maps it back to cpu */ 84 static inline void unmap_dma_buffer(struct musb_request *request, 85 struct musb *musb) 86 { 87 struct musb_ep *musb_ep = request->ep; 88 89 if (!is_buffer_mapped(request) || !musb_ep->dma) 90 return; 91 92 if (request->request.dma == DMA_ADDR_INVALID) { 93 dev_vdbg(musb->controller, 94 "not unmapping a never mapped buffer\n"); 95 return; 96 } 97 if (request->map_state == MUSB_MAPPED) { 98 dma_unmap_single(musb->controller, 99 request->request.dma, 100 request->request.length, 101 request->tx 102 ? DMA_TO_DEVICE 103 : DMA_FROM_DEVICE); 104 request->request.dma = DMA_ADDR_INVALID; 105 } else { /* PRE_MAPPED */ 106 dma_sync_single_for_cpu(musb->controller, 107 request->request.dma, 108 request->request.length, 109 request->tx 110 ? DMA_TO_DEVICE 111 : DMA_FROM_DEVICE); 112 } 113 request->map_state = UN_MAPPED; 114 } 115 116 /* 117 * Immediately complete a request. 118 * 119 * @param request the request to complete 120 * @param status the status to complete the request with 121 * Context: controller locked, IRQs blocked. 122 */ 123 void musb_g_giveback( 124 struct musb_ep *ep, 125 struct usb_request *request, 126 int status) 127 __releases(ep->musb->lock) 128 __acquires(ep->musb->lock) 129 { 130 struct musb_request *req; 131 struct musb *musb; 132 int busy = ep->busy; 133 134 req = to_musb_request(request); 135 136 list_del(&req->list); 137 if (req->request.status == -EINPROGRESS) 138 req->request.status = status; 139 musb = req->musb; 140 141 ep->busy = 1; 142 spin_unlock(&musb->lock); 143 144 if (!dma_mapping_error(&musb->g.dev, request->dma)) 145 unmap_dma_buffer(req, musb); 146 147 trace_musb_req_gb(req); 148 usb_gadget_giveback_request(&req->ep->end_point, &req->request); 149 spin_lock(&musb->lock); 150 ep->busy = busy; 151 } 152 153 /* ----------------------------------------------------------------------- */ 154 155 /* 156 * Abort requests queued to an endpoint using the status. Synchronous. 157 * caller locked controller and blocked irqs, and selected this ep. 158 */ 159 static void nuke(struct musb_ep *ep, const int status) 160 { 161 struct musb *musb = ep->musb; 162 struct musb_request *req = NULL; 163 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 164 165 ep->busy = 1; 166 167 if (is_dma_capable() && ep->dma) { 168 struct dma_controller *c = ep->musb->dma_controller; 169 int value; 170 171 if (ep->is_in) { 172 /* 173 * The programming guide says that we must not clear 174 * the DMAMODE bit before DMAENAB, so we only 175 * clear it in the second write... 176 */ 177 musb_writew(epio, MUSB_TXCSR, 178 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 179 musb_writew(epio, MUSB_TXCSR, 180 0 | MUSB_TXCSR_FLUSHFIFO); 181 } else { 182 musb_writew(epio, MUSB_RXCSR, 183 0 | MUSB_RXCSR_FLUSHFIFO); 184 musb_writew(epio, MUSB_RXCSR, 185 0 | MUSB_RXCSR_FLUSHFIFO); 186 } 187 188 value = c->channel_abort(ep->dma); 189 musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value); 190 c->channel_release(ep->dma); 191 ep->dma = NULL; 192 } 193 194 while (!list_empty(&ep->req_list)) { 195 req = list_first_entry(&ep->req_list, struct musb_request, list); 196 musb_g_giveback(ep, &req->request, status); 197 } 198 } 199 200 /* ----------------------------------------------------------------------- */ 201 202 /* Data transfers - pure PIO, pure DMA, or mixed mode */ 203 204 /* 205 * This assumes the separate CPPI engine is responding to DMA requests 206 * from the usb core ... sequenced a bit differently from mentor dma. 207 */ 208 209 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 210 { 211 if (can_bulk_split(musb, ep->type)) 212 return ep->hw_ep->max_packet_sz_tx; 213 else 214 return ep->packet_sz; 215 } 216 217 /* 218 * An endpoint is transmitting data. This can be called either from 219 * the IRQ routine or from ep.queue() to kickstart a request on an 220 * endpoint. 221 * 222 * Context: controller locked, IRQs blocked, endpoint selected 223 */ 224 static void txstate(struct musb *musb, struct musb_request *req) 225 { 226 u8 epnum = req->epnum; 227 struct musb_ep *musb_ep; 228 void __iomem *epio = musb->endpoints[epnum].regs; 229 struct usb_request *request; 230 u16 fifo_count = 0, csr; 231 int use_dma = 0; 232 233 musb_ep = req->ep; 234 235 /* Check if EP is disabled */ 236 if (!musb_ep->desc) { 237 musb_dbg(musb, "ep:%s disabled - ignore request", 238 musb_ep->end_point.name); 239 return; 240 } 241 242 /* we shouldn't get here while DMA is active ... but we do ... */ 243 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 244 musb_dbg(musb, "dma pending..."); 245 return; 246 } 247 248 /* read TXCSR before */ 249 csr = musb_readw(epio, MUSB_TXCSR); 250 251 request = &req->request; 252 fifo_count = min(max_ep_writesize(musb, musb_ep), 253 (int)(request->length - request->actual)); 254 255 if (csr & MUSB_TXCSR_TXPKTRDY) { 256 musb_dbg(musb, "%s old packet still ready , txcsr %03x", 257 musb_ep->end_point.name, csr); 258 return; 259 } 260 261 if (csr & MUSB_TXCSR_P_SENDSTALL) { 262 musb_dbg(musb, "%s stalling, txcsr %03x", 263 musb_ep->end_point.name, csr); 264 return; 265 } 266 267 musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x", 268 epnum, musb_ep->packet_sz, fifo_count, 269 csr); 270 271 #ifndef CONFIG_MUSB_PIO_ONLY 272 if (is_buffer_mapped(req)) { 273 struct dma_controller *c = musb->dma_controller; 274 size_t request_size; 275 276 /* setup DMA, then program endpoint CSR */ 277 request_size = min_t(size_t, request->length - request->actual, 278 musb_ep->dma->max_len); 279 280 use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 281 282 /* MUSB_TXCSR_P_ISO is still set correctly */ 283 284 if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) { 285 if (request_size < musb_ep->packet_sz) 286 musb_ep->dma->desired_mode = 0; 287 else 288 musb_ep->dma->desired_mode = 1; 289 290 use_dma = use_dma && c->channel_program( 291 musb_ep->dma, musb_ep->packet_sz, 292 musb_ep->dma->desired_mode, 293 request->dma + request->actual, request_size); 294 if (use_dma) { 295 if (musb_ep->dma->desired_mode == 0) { 296 /* 297 * We must not clear the DMAMODE bit 298 * before the DMAENAB bit -- and the 299 * latter doesn't always get cleared 300 * before we get here... 301 */ 302 csr &= ~(MUSB_TXCSR_AUTOSET 303 | MUSB_TXCSR_DMAENAB); 304 musb_writew(epio, MUSB_TXCSR, csr 305 | MUSB_TXCSR_P_WZC_BITS); 306 csr &= ~MUSB_TXCSR_DMAMODE; 307 csr |= (MUSB_TXCSR_DMAENAB | 308 MUSB_TXCSR_MODE); 309 /* against programming guide */ 310 } else { 311 csr |= (MUSB_TXCSR_DMAENAB 312 | MUSB_TXCSR_DMAMODE 313 | MUSB_TXCSR_MODE); 314 /* 315 * Enable Autoset according to table 316 * below 317 * bulk_split hb_mult Autoset_Enable 318 * 0 0 Yes(Normal) 319 * 0 >0 No(High BW ISO) 320 * 1 0 Yes(HS bulk) 321 * 1 >0 Yes(FS bulk) 322 */ 323 if (!musb_ep->hb_mult || 324 can_bulk_split(musb, 325 musb_ep->type)) 326 csr |= MUSB_TXCSR_AUTOSET; 327 } 328 csr &= ~MUSB_TXCSR_P_UNDERRUN; 329 330 musb_writew(epio, MUSB_TXCSR, csr); 331 } 332 } 333 334 if (is_cppi_enabled(musb)) { 335 /* program endpoint CSR first, then setup DMA */ 336 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 337 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 338 MUSB_TXCSR_MODE; 339 musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & 340 ~MUSB_TXCSR_P_UNDERRUN) | csr); 341 342 /* ensure writebuffer is empty */ 343 csr = musb_readw(epio, MUSB_TXCSR); 344 345 /* 346 * NOTE host side sets DMAENAB later than this; both are 347 * OK since the transfer dma glue (between CPPI and 348 * Mentor fifos) just tells CPPI it could start. Data 349 * only moves to the USB TX fifo when both fifos are 350 * ready. 351 */ 352 /* 353 * "mode" is irrelevant here; handle terminating ZLPs 354 * like PIO does, since the hardware RNDIS mode seems 355 * unreliable except for the 356 * last-packet-is-already-short case. 357 */ 358 use_dma = use_dma && c->channel_program( 359 musb_ep->dma, musb_ep->packet_sz, 360 0, 361 request->dma + request->actual, 362 request_size); 363 if (!use_dma) { 364 c->channel_release(musb_ep->dma); 365 musb_ep->dma = NULL; 366 csr &= ~MUSB_TXCSR_DMAENAB; 367 musb_writew(epio, MUSB_TXCSR, csr); 368 /* invariant: prequest->buf is non-null */ 369 } 370 } else if (tusb_dma_omap(musb)) 371 use_dma = use_dma && c->channel_program( 372 musb_ep->dma, musb_ep->packet_sz, 373 request->zero, 374 request->dma + request->actual, 375 request_size); 376 } 377 #endif 378 379 if (!use_dma) { 380 /* 381 * Unmap the dma buffer back to cpu if dma channel 382 * programming fails 383 */ 384 unmap_dma_buffer(req, musb); 385 386 musb_write_fifo(musb_ep->hw_ep, fifo_count, 387 (u8 *) (request->buf + request->actual)); 388 request->actual += fifo_count; 389 csr |= MUSB_TXCSR_TXPKTRDY; 390 csr &= ~MUSB_TXCSR_P_UNDERRUN; 391 musb_writew(epio, MUSB_TXCSR, csr); 392 } 393 394 /* host may already have the data when this message shows... */ 395 musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d", 396 musb_ep->end_point.name, use_dma ? "dma" : "pio", 397 request->actual, request->length, 398 musb_readw(epio, MUSB_TXCSR), 399 fifo_count, 400 musb_readw(epio, MUSB_TXMAXP)); 401 } 402 403 /* 404 * FIFO state update (e.g. data ready). 405 * Called from IRQ, with controller locked. 406 */ 407 void musb_g_tx(struct musb *musb, u8 epnum) 408 { 409 u16 csr; 410 struct musb_request *req; 411 struct usb_request *request; 412 u8 __iomem *mbase = musb->mregs; 413 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 414 void __iomem *epio = musb->endpoints[epnum].regs; 415 struct dma_channel *dma; 416 417 musb_ep_select(mbase, epnum); 418 req = next_request(musb_ep); 419 request = &req->request; 420 421 csr = musb_readw(epio, MUSB_TXCSR); 422 musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr); 423 424 dma = is_dma_capable() ? musb_ep->dma : NULL; 425 426 /* 427 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 428 * probably rates reporting as a host error. 429 */ 430 if (csr & MUSB_TXCSR_P_SENTSTALL) { 431 csr |= MUSB_TXCSR_P_WZC_BITS; 432 csr &= ~MUSB_TXCSR_P_SENTSTALL; 433 musb_writew(epio, MUSB_TXCSR, csr); 434 return; 435 } 436 437 if (csr & MUSB_TXCSR_P_UNDERRUN) { 438 /* We NAKed, no big deal... little reason to care. */ 439 csr |= MUSB_TXCSR_P_WZC_BITS; 440 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 441 musb_writew(epio, MUSB_TXCSR, csr); 442 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", 443 epnum, request); 444 } 445 446 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 447 /* 448 * SHOULD NOT HAPPEN... has with CPPI though, after 449 * changing SENDSTALL (and other cases); harmless? 450 */ 451 musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name); 452 return; 453 } 454 455 if (req) { 456 457 trace_musb_req_tx(req); 458 459 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 460 csr |= MUSB_TXCSR_P_WZC_BITS; 461 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 462 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 463 musb_writew(epio, MUSB_TXCSR, csr); 464 /* Ensure writebuffer is empty. */ 465 csr = musb_readw(epio, MUSB_TXCSR); 466 request->actual += musb_ep->dma->actual_len; 467 musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p", 468 epnum, csr, musb_ep->dma->actual_len, request); 469 } 470 471 /* 472 * First, maybe a terminating short packet. Some DMA 473 * engines might handle this by themselves. 474 */ 475 if ((request->zero && request->length) 476 && (request->length % musb_ep->packet_sz == 0) 477 && (request->actual == request->length)) { 478 479 /* 480 * On DMA completion, FIFO may not be 481 * available yet... 482 */ 483 if (csr & MUSB_TXCSR_TXPKTRDY) 484 return; 485 486 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 487 | MUSB_TXCSR_TXPKTRDY); 488 request->zero = 0; 489 } 490 491 if (request->actual == request->length) { 492 musb_g_giveback(musb_ep, request, 0); 493 /* 494 * In the giveback function the MUSB lock is 495 * released and acquired after sometime. During 496 * this time period the INDEX register could get 497 * changed by the gadget_queue function especially 498 * on SMP systems. Reselect the INDEX to be sure 499 * we are reading/modifying the right registers 500 */ 501 musb_ep_select(mbase, epnum); 502 req = musb_ep->desc ? next_request(musb_ep) : NULL; 503 if (!req) { 504 musb_dbg(musb, "%s idle now", 505 musb_ep->end_point.name); 506 return; 507 } 508 } 509 510 txstate(musb, req); 511 } 512 } 513 514 /* ------------------------------------------------------------ */ 515 516 /* 517 * Context: controller locked, IRQs blocked, endpoint selected 518 */ 519 static void rxstate(struct musb *musb, struct musb_request *req) 520 { 521 const u8 epnum = req->epnum; 522 struct usb_request *request = &req->request; 523 struct musb_ep *musb_ep; 524 void __iomem *epio = musb->endpoints[epnum].regs; 525 unsigned len = 0; 526 u16 fifo_count; 527 u16 csr = musb_readw(epio, MUSB_RXCSR); 528 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 529 u8 use_mode_1; 530 531 if (hw_ep->is_shared_fifo) 532 musb_ep = &hw_ep->ep_in; 533 else 534 musb_ep = &hw_ep->ep_out; 535 536 fifo_count = musb_ep->packet_sz; 537 538 /* Check if EP is disabled */ 539 if (!musb_ep->desc) { 540 musb_dbg(musb, "ep:%s disabled - ignore request", 541 musb_ep->end_point.name); 542 return; 543 } 544 545 /* We shouldn't get here while DMA is active, but we do... */ 546 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 547 musb_dbg(musb, "DMA pending..."); 548 return; 549 } 550 551 if (csr & MUSB_RXCSR_P_SENDSTALL) { 552 musb_dbg(musb, "%s stalling, RXCSR %04x", 553 musb_ep->end_point.name, csr); 554 return; 555 } 556 557 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) { 558 struct dma_controller *c = musb->dma_controller; 559 struct dma_channel *channel = musb_ep->dma; 560 561 /* NOTE: CPPI won't actually stop advancing the DMA 562 * queue after short packet transfers, so this is almost 563 * always going to run as IRQ-per-packet DMA so that 564 * faults will be handled correctly. 565 */ 566 if (c->channel_program(channel, 567 musb_ep->packet_sz, 568 !request->short_not_ok, 569 request->dma + request->actual, 570 request->length - request->actual)) { 571 572 /* make sure that if an rxpkt arrived after the irq, 573 * the cppi engine will be ready to take it as soon 574 * as DMA is enabled 575 */ 576 csr &= ~(MUSB_RXCSR_AUTOCLEAR 577 | MUSB_RXCSR_DMAMODE); 578 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 579 musb_writew(epio, MUSB_RXCSR, csr); 580 return; 581 } 582 } 583 584 if (csr & MUSB_RXCSR_RXPKTRDY) { 585 fifo_count = musb_readw(epio, MUSB_RXCOUNT); 586 587 /* 588 * Enable Mode 1 on RX transfers only when short_not_ok flag 589 * is set. Currently short_not_ok flag is set only from 590 * file_storage and f_mass_storage drivers 591 */ 592 593 if (request->short_not_ok && fifo_count == musb_ep->packet_sz) 594 use_mode_1 = 1; 595 else 596 use_mode_1 = 0; 597 598 if (request->actual < request->length) { 599 if (!is_buffer_mapped(req)) 600 goto buffer_aint_mapped; 601 602 if (musb_dma_inventra(musb)) { 603 struct dma_controller *c; 604 struct dma_channel *channel; 605 int use_dma = 0; 606 unsigned int transfer_size; 607 608 c = musb->dma_controller; 609 channel = musb_ep->dma; 610 611 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in 612 * mode 0 only. So we do not get endpoint interrupts due to DMA 613 * completion. We only get interrupts from DMA controller. 614 * 615 * We could operate in DMA mode 1 if we knew the size of the transfer 616 * in advance. For mass storage class, request->length = what the host 617 * sends, so that'd work. But for pretty much everything else, 618 * request->length is routinely more than what the host sends. For 619 * most these gadgets, end of is signified either by a short packet, 620 * or filling the last byte of the buffer. (Sending extra data in 621 * that last pckate should trigger an overflow fault.) But in mode 1, 622 * we don't get DMA completion interrupt for short packets. 623 * 624 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), 625 * to get endpoint interrupt on every DMA req, but that didn't seem 626 * to work reliably. 627 * 628 * REVISIT an updated g_file_storage can set req->short_not_ok, which 629 * then becomes usable as a runtime "use mode 1" hint... 630 */ 631 632 /* Experimental: Mode1 works with mass storage use cases */ 633 if (use_mode_1) { 634 csr |= MUSB_RXCSR_AUTOCLEAR; 635 musb_writew(epio, MUSB_RXCSR, csr); 636 csr |= MUSB_RXCSR_DMAENAB; 637 musb_writew(epio, MUSB_RXCSR, csr); 638 639 /* 640 * this special sequence (enabling and then 641 * disabling MUSB_RXCSR_DMAMODE) is required 642 * to get DMAReq to activate 643 */ 644 musb_writew(epio, MUSB_RXCSR, 645 csr | MUSB_RXCSR_DMAMODE); 646 musb_writew(epio, MUSB_RXCSR, csr); 647 648 transfer_size = min_t(unsigned int, 649 request->length - 650 request->actual, 651 channel->max_len); 652 musb_ep->dma->desired_mode = 1; 653 } else { 654 if (!musb_ep->hb_mult && 655 musb_ep->hw_ep->rx_double_buffered) 656 csr |= MUSB_RXCSR_AUTOCLEAR; 657 csr |= MUSB_RXCSR_DMAENAB; 658 musb_writew(epio, MUSB_RXCSR, csr); 659 660 transfer_size = min(request->length - request->actual, 661 (unsigned)fifo_count); 662 musb_ep->dma->desired_mode = 0; 663 } 664 665 use_dma = c->channel_program( 666 channel, 667 musb_ep->packet_sz, 668 channel->desired_mode, 669 request->dma 670 + request->actual, 671 transfer_size); 672 673 if (use_dma) 674 return; 675 } 676 677 if ((musb_dma_ux500(musb)) && 678 (request->actual < request->length)) { 679 680 struct dma_controller *c; 681 struct dma_channel *channel; 682 unsigned int transfer_size = 0; 683 684 c = musb->dma_controller; 685 channel = musb_ep->dma; 686 687 /* In case first packet is short */ 688 if (fifo_count < musb_ep->packet_sz) 689 transfer_size = fifo_count; 690 else if (request->short_not_ok) 691 transfer_size = min_t(unsigned int, 692 request->length - 693 request->actual, 694 channel->max_len); 695 else 696 transfer_size = min_t(unsigned int, 697 request->length - 698 request->actual, 699 (unsigned)fifo_count); 700 701 csr &= ~MUSB_RXCSR_DMAMODE; 702 csr |= (MUSB_RXCSR_DMAENAB | 703 MUSB_RXCSR_AUTOCLEAR); 704 705 musb_writew(epio, MUSB_RXCSR, csr); 706 707 if (transfer_size <= musb_ep->packet_sz) { 708 musb_ep->dma->desired_mode = 0; 709 } else { 710 musb_ep->dma->desired_mode = 1; 711 /* Mode must be set after DMAENAB */ 712 csr |= MUSB_RXCSR_DMAMODE; 713 musb_writew(epio, MUSB_RXCSR, csr); 714 } 715 716 if (c->channel_program(channel, 717 musb_ep->packet_sz, 718 channel->desired_mode, 719 request->dma 720 + request->actual, 721 transfer_size)) 722 723 return; 724 } 725 726 len = request->length - request->actual; 727 musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d", 728 musb_ep->end_point.name, 729 fifo_count, len, 730 musb_ep->packet_sz); 731 732 fifo_count = min_t(unsigned, len, fifo_count); 733 734 if (tusb_dma_omap(musb)) { 735 struct dma_controller *c = musb->dma_controller; 736 struct dma_channel *channel = musb_ep->dma; 737 u32 dma_addr = request->dma + request->actual; 738 int ret; 739 740 ret = c->channel_program(channel, 741 musb_ep->packet_sz, 742 channel->desired_mode, 743 dma_addr, 744 fifo_count); 745 if (ret) 746 return; 747 } 748 749 /* 750 * Unmap the dma buffer back to cpu if dma channel 751 * programming fails. This buffer is mapped if the 752 * channel allocation is successful 753 */ 754 unmap_dma_buffer(req, musb); 755 756 /* 757 * Clear DMAENAB and AUTOCLEAR for the 758 * PIO mode transfer 759 */ 760 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); 761 musb_writew(epio, MUSB_RXCSR, csr); 762 763 buffer_aint_mapped: 764 fifo_count = min_t(unsigned int, 765 request->length - request->actual, 766 (unsigned int)fifo_count); 767 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 768 (request->buf + request->actual)); 769 request->actual += fifo_count; 770 771 /* REVISIT if we left anything in the fifo, flush 772 * it and report -EOVERFLOW 773 */ 774 775 /* ack the read! */ 776 csr |= MUSB_RXCSR_P_WZC_BITS; 777 csr &= ~MUSB_RXCSR_RXPKTRDY; 778 musb_writew(epio, MUSB_RXCSR, csr); 779 } 780 } 781 782 /* reach the end or short packet detected */ 783 if (request->actual == request->length || 784 fifo_count < musb_ep->packet_sz) 785 musb_g_giveback(musb_ep, request, 0); 786 } 787 788 /* 789 * Data ready for a request; called from IRQ 790 */ 791 void musb_g_rx(struct musb *musb, u8 epnum) 792 { 793 u16 csr; 794 struct musb_request *req; 795 struct usb_request *request; 796 void __iomem *mbase = musb->mregs; 797 struct musb_ep *musb_ep; 798 void __iomem *epio = musb->endpoints[epnum].regs; 799 struct dma_channel *dma; 800 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 801 802 if (hw_ep->is_shared_fifo) 803 musb_ep = &hw_ep->ep_in; 804 else 805 musb_ep = &hw_ep->ep_out; 806 807 musb_ep_select(mbase, epnum); 808 809 req = next_request(musb_ep); 810 if (!req) 811 return; 812 813 trace_musb_req_rx(req); 814 request = &req->request; 815 816 csr = musb_readw(epio, MUSB_RXCSR); 817 dma = is_dma_capable() ? musb_ep->dma : NULL; 818 819 musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name, 820 csr, dma ? " (dma)" : "", request); 821 822 if (csr & MUSB_RXCSR_P_SENTSTALL) { 823 csr |= MUSB_RXCSR_P_WZC_BITS; 824 csr &= ~MUSB_RXCSR_P_SENTSTALL; 825 musb_writew(epio, MUSB_RXCSR, csr); 826 return; 827 } 828 829 if (csr & MUSB_RXCSR_P_OVERRUN) { 830 /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 831 csr &= ~MUSB_RXCSR_P_OVERRUN; 832 musb_writew(epio, MUSB_RXCSR, csr); 833 834 musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request); 835 if (request->status == -EINPROGRESS) 836 request->status = -EOVERFLOW; 837 } 838 if (csr & MUSB_RXCSR_INCOMPRX) { 839 /* REVISIT not necessarily an error */ 840 musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name); 841 } 842 843 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 844 /* "should not happen"; likely RXPKTRDY pending for DMA */ 845 musb_dbg(musb, "%s busy, csr %04x", 846 musb_ep->end_point.name, csr); 847 return; 848 } 849 850 if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 851 csr &= ~(MUSB_RXCSR_AUTOCLEAR 852 | MUSB_RXCSR_DMAENAB 853 | MUSB_RXCSR_DMAMODE); 854 musb_writew(epio, MUSB_RXCSR, 855 MUSB_RXCSR_P_WZC_BITS | csr); 856 857 request->actual += musb_ep->dma->actual_len; 858 859 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 860 defined(CONFIG_USB_UX500_DMA) 861 /* Autoclear doesn't clear RxPktRdy for short packets */ 862 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) 863 || (dma->actual_len 864 & (musb_ep->packet_sz - 1))) { 865 /* ack the read! */ 866 csr &= ~MUSB_RXCSR_RXPKTRDY; 867 musb_writew(epio, MUSB_RXCSR, csr); 868 } 869 870 /* incomplete, and not short? wait for next IN packet */ 871 if ((request->actual < request->length) 872 && (musb_ep->dma->actual_len 873 == musb_ep->packet_sz)) { 874 /* In double buffer case, continue to unload fifo if 875 * there is Rx packet in FIFO. 876 **/ 877 csr = musb_readw(epio, MUSB_RXCSR); 878 if ((csr & MUSB_RXCSR_RXPKTRDY) && 879 hw_ep->rx_double_buffered) 880 goto exit; 881 return; 882 } 883 #endif 884 musb_g_giveback(musb_ep, request, 0); 885 /* 886 * In the giveback function the MUSB lock is 887 * released and acquired after sometime. During 888 * this time period the INDEX register could get 889 * changed by the gadget_queue function especially 890 * on SMP systems. Reselect the INDEX to be sure 891 * we are reading/modifying the right registers 892 */ 893 musb_ep_select(mbase, epnum); 894 895 req = next_request(musb_ep); 896 if (!req) 897 return; 898 } 899 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 900 defined(CONFIG_USB_UX500_DMA) 901 exit: 902 #endif 903 /* Analyze request */ 904 rxstate(musb, req); 905 } 906 907 /* ------------------------------------------------------------ */ 908 909 static int musb_gadget_enable(struct usb_ep *ep, 910 const struct usb_endpoint_descriptor *desc) 911 { 912 unsigned long flags; 913 struct musb_ep *musb_ep; 914 struct musb_hw_ep *hw_ep; 915 void __iomem *regs; 916 struct musb *musb; 917 void __iomem *mbase; 918 u8 epnum; 919 u16 csr; 920 unsigned tmp; 921 int status = -EINVAL; 922 923 if (!ep || !desc) 924 return -EINVAL; 925 926 musb_ep = to_musb_ep(ep); 927 hw_ep = musb_ep->hw_ep; 928 regs = hw_ep->regs; 929 musb = musb_ep->musb; 930 mbase = musb->mregs; 931 epnum = musb_ep->current_epnum; 932 933 spin_lock_irqsave(&musb->lock, flags); 934 935 if (musb_ep->desc) { 936 status = -EBUSY; 937 goto fail; 938 } 939 musb_ep->type = usb_endpoint_type(desc); 940 941 /* check direction and (later) maxpacket size against endpoint */ 942 if (usb_endpoint_num(desc) != epnum) 943 goto fail; 944 945 /* REVISIT this rules out high bandwidth periodic transfers */ 946 tmp = usb_endpoint_maxp_mult(desc) - 1; 947 if (tmp) { 948 int ok; 949 950 if (usb_endpoint_dir_in(desc)) 951 ok = musb->hb_iso_tx; 952 else 953 ok = musb->hb_iso_rx; 954 955 if (!ok) { 956 musb_dbg(musb, "no support for high bandwidth ISO"); 957 goto fail; 958 } 959 musb_ep->hb_mult = tmp; 960 } else { 961 musb_ep->hb_mult = 0; 962 } 963 964 musb_ep->packet_sz = usb_endpoint_maxp(desc); 965 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); 966 967 /* enable the interrupts for the endpoint, set the endpoint 968 * packet size (or fail), set the mode, clear the fifo 969 */ 970 musb_ep_select(mbase, epnum); 971 if (usb_endpoint_dir_in(desc)) { 972 973 if (hw_ep->is_shared_fifo) 974 musb_ep->is_in = 1; 975 if (!musb_ep->is_in) 976 goto fail; 977 978 if (tmp > hw_ep->max_packet_sz_tx) { 979 musb_dbg(musb, "packet size beyond hardware FIFO size"); 980 goto fail; 981 } 982 983 musb->intrtxe |= (1 << epnum); 984 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 985 986 /* REVISIT if can_bulk_split(), use by updating "tmp"; 987 * likewise high bandwidth periodic tx 988 */ 989 /* Set TXMAXP with the FIFO size of the endpoint 990 * to disable double buffering mode. 991 */ 992 if (can_bulk_split(musb, musb_ep->type)) 993 musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / 994 musb_ep->packet_sz) - 1; 995 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz 996 | (musb_ep->hb_mult << 11)); 997 998 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 999 if (musb_readw(regs, MUSB_TXCSR) 1000 & MUSB_TXCSR_FIFONOTEMPTY) 1001 csr |= MUSB_TXCSR_FLUSHFIFO; 1002 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1003 csr |= MUSB_TXCSR_P_ISO; 1004 1005 /* set twice in case of double buffering */ 1006 musb_writew(regs, MUSB_TXCSR, csr); 1007 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1008 musb_writew(regs, MUSB_TXCSR, csr); 1009 1010 } else { 1011 1012 if (hw_ep->is_shared_fifo) 1013 musb_ep->is_in = 0; 1014 if (musb_ep->is_in) 1015 goto fail; 1016 1017 if (tmp > hw_ep->max_packet_sz_rx) { 1018 musb_dbg(musb, "packet size beyond hardware FIFO size"); 1019 goto fail; 1020 } 1021 1022 musb->intrrxe |= (1 << epnum); 1023 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); 1024 1025 /* REVISIT if can_bulk_combine() use by updating "tmp" 1026 * likewise high bandwidth periodic rx 1027 */ 1028 /* Set RXMAXP with the FIFO size of the endpoint 1029 * to disable double buffering mode. 1030 */ 1031 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz 1032 | (musb_ep->hb_mult << 11)); 1033 1034 /* force shared fifo to OUT-only mode */ 1035 if (hw_ep->is_shared_fifo) { 1036 csr = musb_readw(regs, MUSB_TXCSR); 1037 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 1038 musb_writew(regs, MUSB_TXCSR, csr); 1039 } 1040 1041 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 1042 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1043 csr |= MUSB_RXCSR_P_ISO; 1044 else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 1045 csr |= MUSB_RXCSR_DISNYET; 1046 1047 /* set twice in case of double buffering */ 1048 musb_writew(regs, MUSB_RXCSR, csr); 1049 musb_writew(regs, MUSB_RXCSR, csr); 1050 } 1051 1052 /* NOTE: all the I/O code _should_ work fine without DMA, in case 1053 * for some reason you run out of channels here. 1054 */ 1055 if (is_dma_capable() && musb->dma_controller) { 1056 struct dma_controller *c = musb->dma_controller; 1057 1058 musb_ep->dma = c->channel_alloc(c, hw_ep, 1059 (desc->bEndpointAddress & USB_DIR_IN)); 1060 } else 1061 musb_ep->dma = NULL; 1062 1063 musb_ep->desc = desc; 1064 musb_ep->busy = 0; 1065 musb_ep->wedged = 0; 1066 status = 0; 1067 1068 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 1069 musb_driver_name, musb_ep->end_point.name, 1070 musb_ep_xfertype_string(musb_ep->type), 1071 musb_ep->is_in ? "IN" : "OUT", 1072 musb_ep->dma ? "dma, " : "", 1073 musb_ep->packet_sz); 1074 1075 schedule_delayed_work(&musb->irq_work, 0); 1076 1077 fail: 1078 spin_unlock_irqrestore(&musb->lock, flags); 1079 return status; 1080 } 1081 1082 /* 1083 * Disable an endpoint flushing all requests queued. 1084 */ 1085 static int musb_gadget_disable(struct usb_ep *ep) 1086 { 1087 unsigned long flags; 1088 struct musb *musb; 1089 u8 epnum; 1090 struct musb_ep *musb_ep; 1091 void __iomem *epio; 1092 1093 musb_ep = to_musb_ep(ep); 1094 musb = musb_ep->musb; 1095 epnum = musb_ep->current_epnum; 1096 epio = musb->endpoints[epnum].regs; 1097 1098 spin_lock_irqsave(&musb->lock, flags); 1099 musb_ep_select(musb->mregs, epnum); 1100 1101 /* zero the endpoint sizes */ 1102 if (musb_ep->is_in) { 1103 musb->intrtxe &= ~(1 << epnum); 1104 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); 1105 musb_writew(epio, MUSB_TXMAXP, 0); 1106 } else { 1107 musb->intrrxe &= ~(1 << epnum); 1108 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); 1109 musb_writew(epio, MUSB_RXMAXP, 0); 1110 } 1111 1112 /* abort all pending DMA and requests */ 1113 nuke(musb_ep, -ESHUTDOWN); 1114 1115 musb_ep->desc = NULL; 1116 musb_ep->end_point.desc = NULL; 1117 1118 schedule_delayed_work(&musb->irq_work, 0); 1119 1120 spin_unlock_irqrestore(&(musb->lock), flags); 1121 1122 musb_dbg(musb, "%s", musb_ep->end_point.name); 1123 1124 return 0; 1125 } 1126 1127 /* 1128 * Allocate a request for an endpoint. 1129 * Reused by ep0 code. 1130 */ 1131 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1132 { 1133 struct musb_ep *musb_ep = to_musb_ep(ep); 1134 struct musb_request *request; 1135 1136 request = kzalloc(sizeof *request, gfp_flags); 1137 if (!request) 1138 return NULL; 1139 1140 request->request.dma = DMA_ADDR_INVALID; 1141 request->epnum = musb_ep->current_epnum; 1142 request->ep = musb_ep; 1143 1144 trace_musb_req_alloc(request); 1145 return &request->request; 1146 } 1147 1148 /* 1149 * Free a request 1150 * Reused by ep0 code. 1151 */ 1152 void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1153 { 1154 struct musb_request *request = to_musb_request(req); 1155 1156 trace_musb_req_free(request); 1157 kfree(request); 1158 } 1159 1160 /* 1161 * Context: controller locked, IRQs blocked. 1162 */ 1163 void musb_ep_restart(struct musb *musb, struct musb_request *req) 1164 { 1165 u16 csr; 1166 void __iomem *epio = req->ep->hw_ep->regs; 1167 1168 trace_musb_req_start(req); 1169 musb_ep_select(musb->mregs, req->epnum); 1170 if (req->tx) { 1171 txstate(musb, req); 1172 } else { 1173 csr = musb_readw(epio, MUSB_RXCSR); 1174 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1175 musb_writew(epio, MUSB_RXCSR, csr); 1176 musb_writew(epio, MUSB_RXCSR, csr); 1177 } 1178 } 1179 1180 static int musb_ep_restart_resume_work(struct musb *musb, void *data) 1181 { 1182 struct musb_request *req = data; 1183 1184 musb_ep_restart(musb, req); 1185 1186 return 0; 1187 } 1188 1189 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1190 gfp_t gfp_flags) 1191 { 1192 struct musb_ep *musb_ep; 1193 struct musb_request *request; 1194 struct musb *musb; 1195 int status; 1196 unsigned long lockflags; 1197 1198 if (!ep || !req) 1199 return -EINVAL; 1200 if (!req->buf) 1201 return -ENODATA; 1202 1203 musb_ep = to_musb_ep(ep); 1204 musb = musb_ep->musb; 1205 1206 request = to_musb_request(req); 1207 request->musb = musb; 1208 1209 if (request->ep != musb_ep) 1210 return -EINVAL; 1211 1212 status = pm_runtime_get(musb->controller); 1213 if ((status != -EINPROGRESS) && status < 0) { 1214 dev_err(musb->controller, 1215 "pm runtime get failed in %s\n", 1216 __func__); 1217 pm_runtime_put_noidle(musb->controller); 1218 1219 return status; 1220 } 1221 status = 0; 1222 1223 trace_musb_req_enq(request); 1224 1225 /* request is mine now... */ 1226 request->request.actual = 0; 1227 request->request.status = -EINPROGRESS; 1228 request->epnum = musb_ep->current_epnum; 1229 request->tx = musb_ep->is_in; 1230 1231 map_dma_buffer(request, musb, musb_ep); 1232 1233 spin_lock_irqsave(&musb->lock, lockflags); 1234 1235 /* don't queue if the ep is down */ 1236 if (!musb_ep->desc) { 1237 musb_dbg(musb, "req %p queued to %s while ep %s", 1238 req, ep->name, "disabled"); 1239 status = -ESHUTDOWN; 1240 unmap_dma_buffer(request, musb); 1241 goto unlock; 1242 } 1243 1244 /* add request to the list */ 1245 list_add_tail(&request->list, &musb_ep->req_list); 1246 1247 /* it this is the head of the queue, start i/o ... */ 1248 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { 1249 status = musb_queue_resume_work(musb, 1250 musb_ep_restart_resume_work, 1251 request); 1252 if (status < 0) { 1253 dev_err(musb->controller, "%s resume work: %i\n", 1254 __func__, status); 1255 list_del(&request->list); 1256 } 1257 } 1258 1259 unlock: 1260 spin_unlock_irqrestore(&musb->lock, lockflags); 1261 pm_runtime_mark_last_busy(musb->controller); 1262 pm_runtime_put_autosuspend(musb->controller); 1263 1264 return status; 1265 } 1266 1267 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1268 { 1269 struct musb_ep *musb_ep = to_musb_ep(ep); 1270 struct musb_request *req = to_musb_request(request); 1271 struct musb_request *r; 1272 unsigned long flags; 1273 int status = 0; 1274 struct musb *musb = musb_ep->musb; 1275 1276 if (!ep || !request || req->ep != musb_ep) 1277 return -EINVAL; 1278 1279 trace_musb_req_deq(req); 1280 1281 spin_lock_irqsave(&musb->lock, flags); 1282 1283 list_for_each_entry(r, &musb_ep->req_list, list) { 1284 if (r == req) 1285 break; 1286 } 1287 if (r != req) { 1288 dev_err(musb->controller, "request %p not queued to %s\n", 1289 request, ep->name); 1290 status = -EINVAL; 1291 goto done; 1292 } 1293 1294 /* if the hardware doesn't have the request, easy ... */ 1295 if (musb_ep->req_list.next != &req->list || musb_ep->busy) 1296 musb_g_giveback(musb_ep, request, -ECONNRESET); 1297 1298 /* ... else abort the dma transfer ... */ 1299 else if (is_dma_capable() && musb_ep->dma) { 1300 struct dma_controller *c = musb->dma_controller; 1301 1302 musb_ep_select(musb->mregs, musb_ep->current_epnum); 1303 if (c->channel_abort) 1304 status = c->channel_abort(musb_ep->dma); 1305 else 1306 status = -EBUSY; 1307 if (status == 0) 1308 musb_g_giveback(musb_ep, request, -ECONNRESET); 1309 } else { 1310 /* NOTE: by sticking to easily tested hardware/driver states, 1311 * we leave counting of in-flight packets imprecise. 1312 */ 1313 musb_g_giveback(musb_ep, request, -ECONNRESET); 1314 } 1315 1316 done: 1317 spin_unlock_irqrestore(&musb->lock, flags); 1318 return status; 1319 } 1320 1321 /* 1322 * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any 1323 * data but will queue requests. 1324 * 1325 * exported to ep0 code 1326 */ 1327 static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1328 { 1329 struct musb_ep *musb_ep = to_musb_ep(ep); 1330 u8 epnum = musb_ep->current_epnum; 1331 struct musb *musb = musb_ep->musb; 1332 void __iomem *epio = musb->endpoints[epnum].regs; 1333 void __iomem *mbase; 1334 unsigned long flags; 1335 u16 csr; 1336 struct musb_request *request; 1337 int status = 0; 1338 1339 if (!ep) 1340 return -EINVAL; 1341 mbase = musb->mregs; 1342 1343 spin_lock_irqsave(&musb->lock, flags); 1344 1345 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1346 status = -EINVAL; 1347 goto done; 1348 } 1349 1350 musb_ep_select(mbase, epnum); 1351 1352 request = next_request(musb_ep); 1353 if (value) { 1354 if (request) { 1355 musb_dbg(musb, "request in progress, cannot halt %s", 1356 ep->name); 1357 status = -EAGAIN; 1358 goto done; 1359 } 1360 /* Cannot portably stall with non-empty FIFO */ 1361 if (musb_ep->is_in) { 1362 csr = musb_readw(epio, MUSB_TXCSR); 1363 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1364 musb_dbg(musb, "FIFO busy, cannot halt %s", 1365 ep->name); 1366 status = -EAGAIN; 1367 goto done; 1368 } 1369 } 1370 } else 1371 musb_ep->wedged = 0; 1372 1373 /* set/clear the stall and toggle bits */ 1374 musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear"); 1375 if (musb_ep->is_in) { 1376 csr = musb_readw(epio, MUSB_TXCSR); 1377 csr |= MUSB_TXCSR_P_WZC_BITS 1378 | MUSB_TXCSR_CLRDATATOG; 1379 if (value) 1380 csr |= MUSB_TXCSR_P_SENDSTALL; 1381 else 1382 csr &= ~(MUSB_TXCSR_P_SENDSTALL 1383 | MUSB_TXCSR_P_SENTSTALL); 1384 csr &= ~MUSB_TXCSR_TXPKTRDY; 1385 musb_writew(epio, MUSB_TXCSR, csr); 1386 } else { 1387 csr = musb_readw(epio, MUSB_RXCSR); 1388 csr |= MUSB_RXCSR_P_WZC_BITS 1389 | MUSB_RXCSR_FLUSHFIFO 1390 | MUSB_RXCSR_CLRDATATOG; 1391 if (value) 1392 csr |= MUSB_RXCSR_P_SENDSTALL; 1393 else 1394 csr &= ~(MUSB_RXCSR_P_SENDSTALL 1395 | MUSB_RXCSR_P_SENTSTALL); 1396 musb_writew(epio, MUSB_RXCSR, csr); 1397 } 1398 1399 /* maybe start the first request in the queue */ 1400 if (!musb_ep->busy && !value && request) { 1401 musb_dbg(musb, "restarting the request"); 1402 musb_ep_restart(musb, request); 1403 } 1404 1405 done: 1406 spin_unlock_irqrestore(&musb->lock, flags); 1407 return status; 1408 } 1409 1410 /* 1411 * Sets the halt feature with the clear requests ignored 1412 */ 1413 static int musb_gadget_set_wedge(struct usb_ep *ep) 1414 { 1415 struct musb_ep *musb_ep = to_musb_ep(ep); 1416 1417 if (!ep) 1418 return -EINVAL; 1419 1420 musb_ep->wedged = 1; 1421 1422 return usb_ep_set_halt(ep); 1423 } 1424 1425 static int musb_gadget_fifo_status(struct usb_ep *ep) 1426 { 1427 struct musb_ep *musb_ep = to_musb_ep(ep); 1428 void __iomem *epio = musb_ep->hw_ep->regs; 1429 int retval = -EINVAL; 1430 1431 if (musb_ep->desc && !musb_ep->is_in) { 1432 struct musb *musb = musb_ep->musb; 1433 int epnum = musb_ep->current_epnum; 1434 void __iomem *mbase = musb->mregs; 1435 unsigned long flags; 1436 1437 spin_lock_irqsave(&musb->lock, flags); 1438 1439 musb_ep_select(mbase, epnum); 1440 /* FIXME return zero unless RXPKTRDY is set */ 1441 retval = musb_readw(epio, MUSB_RXCOUNT); 1442 1443 spin_unlock_irqrestore(&musb->lock, flags); 1444 } 1445 return retval; 1446 } 1447 1448 static void musb_gadget_fifo_flush(struct usb_ep *ep) 1449 { 1450 struct musb_ep *musb_ep = to_musb_ep(ep); 1451 struct musb *musb = musb_ep->musb; 1452 u8 epnum = musb_ep->current_epnum; 1453 void __iomem *epio = musb->endpoints[epnum].regs; 1454 void __iomem *mbase; 1455 unsigned long flags; 1456 u16 csr; 1457 1458 mbase = musb->mregs; 1459 1460 spin_lock_irqsave(&musb->lock, flags); 1461 musb_ep_select(mbase, (u8) epnum); 1462 1463 /* disable interrupts */ 1464 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); 1465 1466 if (musb_ep->is_in) { 1467 csr = musb_readw(epio, MUSB_TXCSR); 1468 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1469 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1470 /* 1471 * Setting both TXPKTRDY and FLUSHFIFO makes controller 1472 * to interrupt current FIFO loading, but not flushing 1473 * the already loaded ones. 1474 */ 1475 csr &= ~MUSB_TXCSR_TXPKTRDY; 1476 musb_writew(epio, MUSB_TXCSR, csr); 1477 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1478 musb_writew(epio, MUSB_TXCSR, csr); 1479 } 1480 } else { 1481 csr = musb_readw(epio, MUSB_RXCSR); 1482 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1483 musb_writew(epio, MUSB_RXCSR, csr); 1484 musb_writew(epio, MUSB_RXCSR, csr); 1485 } 1486 1487 /* re-enable interrupt */ 1488 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); 1489 spin_unlock_irqrestore(&musb->lock, flags); 1490 } 1491 1492 static const struct usb_ep_ops musb_ep_ops = { 1493 .enable = musb_gadget_enable, 1494 .disable = musb_gadget_disable, 1495 .alloc_request = musb_alloc_request, 1496 .free_request = musb_free_request, 1497 .queue = musb_gadget_queue, 1498 .dequeue = musb_gadget_dequeue, 1499 .set_halt = musb_gadget_set_halt, 1500 .set_wedge = musb_gadget_set_wedge, 1501 .fifo_status = musb_gadget_fifo_status, 1502 .fifo_flush = musb_gadget_fifo_flush 1503 }; 1504 1505 /* ----------------------------------------------------------------------- */ 1506 1507 static int musb_gadget_get_frame(struct usb_gadget *gadget) 1508 { 1509 struct musb *musb = gadget_to_musb(gadget); 1510 1511 return (int)musb_readw(musb->mregs, MUSB_FRAME); 1512 } 1513 1514 static int musb_gadget_wakeup(struct usb_gadget *gadget) 1515 { 1516 struct musb *musb = gadget_to_musb(gadget); 1517 void __iomem *mregs = musb->mregs; 1518 unsigned long flags; 1519 int status = -EINVAL; 1520 u8 power, devctl; 1521 int retries; 1522 1523 spin_lock_irqsave(&musb->lock, flags); 1524 1525 switch (musb_get_state(musb)) { 1526 case OTG_STATE_B_PERIPHERAL: 1527 /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1528 * that's part of the standard usb 1.1 state machine, and 1529 * doesn't affect OTG transitions. 1530 */ 1531 if (musb->may_wakeup && musb->is_suspended) 1532 break; 1533 goto done; 1534 case OTG_STATE_B_IDLE: 1535 /* Start SRP ... OTG not required. */ 1536 devctl = musb_readb(mregs, MUSB_DEVCTL); 1537 musb_dbg(musb, "Sending SRP: devctl: %02x", devctl); 1538 devctl |= MUSB_DEVCTL_SESSION; 1539 musb_writeb(mregs, MUSB_DEVCTL, devctl); 1540 devctl = musb_readb(mregs, MUSB_DEVCTL); 1541 retries = 100; 1542 while (!(devctl & MUSB_DEVCTL_SESSION)) { 1543 devctl = musb_readb(mregs, MUSB_DEVCTL); 1544 if (retries-- < 1) 1545 break; 1546 } 1547 retries = 10000; 1548 while (devctl & MUSB_DEVCTL_SESSION) { 1549 devctl = musb_readb(mregs, MUSB_DEVCTL); 1550 if (retries-- < 1) 1551 break; 1552 } 1553 1554 if (musb->xceiv) { 1555 spin_unlock_irqrestore(&musb->lock, flags); 1556 otg_start_srp(musb->xceiv->otg); 1557 spin_lock_irqsave(&musb->lock, flags); 1558 } 1559 1560 /* Block idling for at least 1s */ 1561 musb_platform_try_idle(musb, 1562 jiffies + msecs_to_jiffies(1 * HZ)); 1563 1564 status = 0; 1565 goto done; 1566 default: 1567 musb_dbg(musb, "Unhandled wake: %s", 1568 musb_otg_state_string(musb)); 1569 goto done; 1570 } 1571 1572 status = 0; 1573 1574 power = musb_readb(mregs, MUSB_POWER); 1575 power |= MUSB_POWER_RESUME; 1576 musb_writeb(mregs, MUSB_POWER, power); 1577 musb_dbg(musb, "issue wakeup"); 1578 1579 /* FIXME do this next chunk in a timer callback, no udelay */ 1580 mdelay(2); 1581 1582 power = musb_readb(mregs, MUSB_POWER); 1583 power &= ~MUSB_POWER_RESUME; 1584 musb_writeb(mregs, MUSB_POWER, power); 1585 done: 1586 spin_unlock_irqrestore(&musb->lock, flags); 1587 return status; 1588 } 1589 1590 static int 1591 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1592 { 1593 gadget->is_selfpowered = !!is_selfpowered; 1594 return 0; 1595 } 1596 1597 static void musb_pullup(struct musb *musb, int is_on) 1598 { 1599 u8 power; 1600 1601 power = musb_readb(musb->mregs, MUSB_POWER); 1602 if (is_on) 1603 power |= MUSB_POWER_SOFTCONN; 1604 else 1605 power &= ~MUSB_POWER_SOFTCONN; 1606 1607 /* FIXME if on, HdrcStart; if off, HdrcStop */ 1608 1609 musb_dbg(musb, "gadget D+ pullup %s", 1610 str_on_off(is_on)); 1611 musb_writeb(musb->mregs, MUSB_POWER, power); 1612 } 1613 1614 #if 0 1615 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1616 { 1617 musb_dbg(musb, "<= %s =>\n", __func__); 1618 1619 /* 1620 * FIXME iff driver's softconnect flag is set (as it is during probe, 1621 * though that can clear it), just musb_pullup(). 1622 */ 1623 1624 return -EINVAL; 1625 } 1626 #endif 1627 1628 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1629 { 1630 struct musb *musb = gadget_to_musb(gadget); 1631 1632 return usb_phy_set_power(musb->xceiv, mA); 1633 } 1634 1635 static void musb_gadget_work(struct work_struct *work) 1636 { 1637 struct musb *musb; 1638 unsigned long flags; 1639 1640 musb = container_of(work, struct musb, gadget_work.work); 1641 pm_runtime_get_sync(musb->controller); 1642 spin_lock_irqsave(&musb->lock, flags); 1643 musb_pullup(musb, musb->softconnect); 1644 spin_unlock_irqrestore(&musb->lock, flags); 1645 pm_runtime_mark_last_busy(musb->controller); 1646 pm_runtime_put_autosuspend(musb->controller); 1647 } 1648 1649 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1650 { 1651 struct musb *musb = gadget_to_musb(gadget); 1652 unsigned long flags; 1653 1654 is_on = !!is_on; 1655 1656 /* NOTE: this assumes we are sensing vbus; we'd rather 1657 * not pullup unless the B-session is active. 1658 */ 1659 spin_lock_irqsave(&musb->lock, flags); 1660 if (is_on != musb->softconnect) { 1661 musb->softconnect = is_on; 1662 schedule_delayed_work(&musb->gadget_work, 0); 1663 } 1664 spin_unlock_irqrestore(&musb->lock, flags); 1665 1666 return 0; 1667 } 1668 1669 static int musb_gadget_start(struct usb_gadget *g, 1670 struct usb_gadget_driver *driver); 1671 static int musb_gadget_stop(struct usb_gadget *g); 1672 1673 static const struct usb_gadget_ops musb_gadget_operations = { 1674 .get_frame = musb_gadget_get_frame, 1675 .wakeup = musb_gadget_wakeup, 1676 .set_selfpowered = musb_gadget_set_self_powered, 1677 /* .vbus_session = musb_gadget_vbus_session, */ 1678 .vbus_draw = musb_gadget_vbus_draw, 1679 .pullup = musb_gadget_pullup, 1680 .udc_start = musb_gadget_start, 1681 .udc_stop = musb_gadget_stop, 1682 }; 1683 1684 /* ----------------------------------------------------------------------- */ 1685 1686 /* Registration */ 1687 1688 /* Only this registration code "knows" the rule (from USB standards) 1689 * about there being only one external upstream port. It assumes 1690 * all peripheral ports are external... 1691 */ 1692 1693 static void 1694 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1695 { 1696 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1697 1698 memset(ep, 0, sizeof *ep); 1699 1700 ep->current_epnum = epnum; 1701 ep->musb = musb; 1702 ep->hw_ep = hw_ep; 1703 ep->is_in = is_in; 1704 1705 INIT_LIST_HEAD(&ep->req_list); 1706 1707 sprintf(ep->name, "ep%d%s", epnum, 1708 (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1709 is_in ? "in" : "out")); 1710 ep->end_point.name = ep->name; 1711 INIT_LIST_HEAD(&ep->end_point.ep_list); 1712 if (!epnum) { 1713 usb_ep_set_maxpacket_limit(&ep->end_point, 64); 1714 ep->end_point.caps.type_control = true; 1715 ep->end_point.ops = &musb_g_ep0_ops; 1716 musb->g.ep0 = &ep->end_point; 1717 } else { 1718 if (is_in) 1719 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx); 1720 else 1721 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx); 1722 ep->end_point.caps.type_iso = true; 1723 ep->end_point.caps.type_bulk = true; 1724 ep->end_point.caps.type_int = true; 1725 ep->end_point.ops = &musb_ep_ops; 1726 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1727 } 1728 1729 if (!epnum || hw_ep->is_shared_fifo) { 1730 ep->end_point.caps.dir_in = true; 1731 ep->end_point.caps.dir_out = true; 1732 } else if (is_in) 1733 ep->end_point.caps.dir_in = true; 1734 else 1735 ep->end_point.caps.dir_out = true; 1736 } 1737 1738 /* 1739 * Initialize the endpoints exposed to peripheral drivers, with backlinks 1740 * to the rest of the driver state. 1741 */ 1742 static inline void musb_g_init_endpoints(struct musb *musb) 1743 { 1744 u8 epnum; 1745 struct musb_hw_ep *hw_ep; 1746 1747 /* initialize endpoint list just once */ 1748 INIT_LIST_HEAD(&(musb->g.ep_list)); 1749 1750 for (epnum = 0, hw_ep = musb->endpoints; 1751 epnum < musb->nr_endpoints; 1752 epnum++, hw_ep++) { 1753 if (hw_ep->is_shared_fifo /* || !epnum */) { 1754 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1755 } else { 1756 if (hw_ep->max_packet_sz_tx) { 1757 init_peripheral_ep(musb, &hw_ep->ep_in, 1758 epnum, 1); 1759 } 1760 if (hw_ep->max_packet_sz_rx) { 1761 init_peripheral_ep(musb, &hw_ep->ep_out, 1762 epnum, 0); 1763 } 1764 } 1765 } 1766 } 1767 1768 /* called once during driver setup to initialize and link into 1769 * the driver model; memory is zeroed. 1770 */ 1771 int musb_gadget_setup(struct musb *musb) 1772 { 1773 int status; 1774 1775 /* REVISIT minor race: if (erroneously) setting up two 1776 * musb peripherals at the same time, only the bus lock 1777 * is probably held. 1778 */ 1779 1780 musb->g.ops = &musb_gadget_operations; 1781 musb->g.max_speed = USB_SPEED_HIGH; 1782 musb->g.speed = USB_SPEED_UNKNOWN; 1783 1784 MUSB_DEV_MODE(musb); 1785 musb_set_state(musb, OTG_STATE_B_IDLE); 1786 1787 /* this "gadget" abstracts/virtualizes the controller */ 1788 musb->g.name = musb_driver_name; 1789 /* don't support otg protocols */ 1790 musb->g.is_otg = 0; 1791 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); 1792 musb_g_init_endpoints(musb); 1793 1794 musb->is_active = 0; 1795 musb_platform_try_idle(musb, 0); 1796 1797 status = usb_add_gadget_udc(musb->controller, &musb->g); 1798 if (status) 1799 goto err; 1800 1801 return 0; 1802 err: 1803 musb->g.dev.parent = NULL; 1804 device_unregister(&musb->g.dev); 1805 return status; 1806 } 1807 1808 void musb_gadget_cleanup(struct musb *musb) 1809 { 1810 if (musb->port_mode == MUSB_HOST) 1811 return; 1812 1813 cancel_delayed_work_sync(&musb->gadget_work); 1814 usb_del_gadget_udc(&musb->g); 1815 } 1816 1817 /* 1818 * Register the gadget driver. Used by gadget drivers when 1819 * registering themselves with the controller. 1820 * 1821 * -EINVAL something went wrong (not driver) 1822 * -EBUSY another gadget is already using the controller 1823 * -ENOMEM no memory to perform the operation 1824 * 1825 * @param driver the gadget driver 1826 * @return <0 if error, 0 if everything is fine 1827 */ 1828 static int musb_gadget_start(struct usb_gadget *g, 1829 struct usb_gadget_driver *driver) 1830 { 1831 struct musb *musb = gadget_to_musb(g); 1832 unsigned long flags; 1833 int retval = 0; 1834 1835 if (driver->max_speed < USB_SPEED_HIGH) { 1836 retval = -EINVAL; 1837 goto err; 1838 } 1839 1840 pm_runtime_get_sync(musb->controller); 1841 1842 musb->softconnect = 0; 1843 musb->gadget_driver = driver; 1844 1845 spin_lock_irqsave(&musb->lock, flags); 1846 musb->is_active = 1; 1847 1848 if (musb->xceiv) 1849 otg_set_peripheral(musb->xceiv->otg, &musb->g); 1850 else 1851 phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE); 1852 1853 musb_set_state(musb, OTG_STATE_B_IDLE); 1854 spin_unlock_irqrestore(&musb->lock, flags); 1855 1856 musb_start(musb); 1857 1858 /* REVISIT: funcall to other code, which also 1859 * handles power budgeting ... this way also 1860 * ensures HdrcStart is indirectly called. 1861 */ 1862 if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID) 1863 musb_platform_set_vbus(musb, 1); 1864 1865 pm_runtime_mark_last_busy(musb->controller); 1866 pm_runtime_put_autosuspend(musb->controller); 1867 1868 return 0; 1869 1870 err: 1871 return retval; 1872 } 1873 1874 /* 1875 * Unregister the gadget driver. Used by gadget drivers when 1876 * unregistering themselves from the controller. 1877 * 1878 * @param driver the gadget driver to unregister 1879 */ 1880 static int musb_gadget_stop(struct usb_gadget *g) 1881 { 1882 struct musb *musb = gadget_to_musb(g); 1883 unsigned long flags; 1884 1885 pm_runtime_get_sync(musb->controller); 1886 1887 /* 1888 * REVISIT always use otg_set_peripheral() here too; 1889 * this needs to shut down the OTG engine. 1890 */ 1891 1892 spin_lock_irqsave(&musb->lock, flags); 1893 1894 musb_hnp_stop(musb); 1895 1896 (void) musb_gadget_vbus_draw(&musb->g, 0); 1897 1898 musb_set_state(musb, OTG_STATE_UNDEFINED); 1899 musb_stop(musb); 1900 1901 if (musb->xceiv) 1902 otg_set_peripheral(musb->xceiv->otg, NULL); 1903 else 1904 phy_set_mode(musb->phy, PHY_MODE_INVALID); 1905 1906 musb->is_active = 0; 1907 musb->gadget_driver = NULL; 1908 musb_platform_try_idle(musb, 0); 1909 spin_unlock_irqrestore(&musb->lock, flags); 1910 1911 /* 1912 * FIXME we need to be able to register another 1913 * gadget driver here and have everything work; 1914 * that currently misbehaves. 1915 */ 1916 1917 /* Force check of devctl register for PM runtime */ 1918 pm_runtime_mark_last_busy(musb->controller); 1919 pm_runtime_put_autosuspend(musb->controller); 1920 1921 return 0; 1922 } 1923 1924 /* ----------------------------------------------------------------------- */ 1925 1926 /* lifecycle operations called through plat_uds.c */ 1927 1928 void musb_g_resume(struct musb *musb) 1929 { 1930 musb->is_suspended = 0; 1931 switch (musb_get_state(musb)) { 1932 case OTG_STATE_B_IDLE: 1933 break; 1934 case OTG_STATE_B_WAIT_ACON: 1935 case OTG_STATE_B_PERIPHERAL: 1936 musb->is_active = 1; 1937 if (musb->gadget_driver && musb->gadget_driver->resume) { 1938 spin_unlock(&musb->lock); 1939 musb->gadget_driver->resume(&musb->g); 1940 spin_lock(&musb->lock); 1941 } 1942 break; 1943 default: 1944 WARNING("unhandled RESUME transition (%s)\n", 1945 musb_otg_state_string(musb)); 1946 } 1947 } 1948 1949 /* called when SOF packets stop for 3+ msec */ 1950 void musb_g_suspend(struct musb *musb) 1951 { 1952 u8 devctl; 1953 1954 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1955 musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl); 1956 1957 switch (musb_get_state(musb)) { 1958 case OTG_STATE_B_IDLE: 1959 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 1960 musb_set_state(musb, OTG_STATE_B_PERIPHERAL); 1961 break; 1962 case OTG_STATE_B_PERIPHERAL: 1963 musb->is_suspended = 1; 1964 if (musb->gadget_driver && musb->gadget_driver->suspend) { 1965 spin_unlock(&musb->lock); 1966 musb->gadget_driver->suspend(&musb->g); 1967 spin_lock(&musb->lock); 1968 } 1969 break; 1970 default: 1971 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 1972 * A_PERIPHERAL may need care too 1973 */ 1974 WARNING("unhandled SUSPEND transition (%s)", 1975 musb_otg_state_string(musb)); 1976 } 1977 } 1978 1979 /* Called during SRP */ 1980 void musb_g_wakeup(struct musb *musb) 1981 { 1982 musb_gadget_wakeup(&musb->g); 1983 } 1984 1985 /* called when VBUS drops below session threshold, and in other cases */ 1986 void musb_g_disconnect(struct musb *musb) 1987 { 1988 void __iomem *mregs = musb->mregs; 1989 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 1990 1991 musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl); 1992 1993 /* clear HR */ 1994 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 1995 1996 /* don't draw vbus until new b-default session */ 1997 (void) musb_gadget_vbus_draw(&musb->g, 0); 1998 1999 musb->g.speed = USB_SPEED_UNKNOWN; 2000 if (musb->gadget_driver && musb->gadget_driver->disconnect) { 2001 spin_unlock(&musb->lock); 2002 musb->gadget_driver->disconnect(&musb->g); 2003 spin_lock(&musb->lock); 2004 } 2005 2006 switch (musb_get_state(musb)) { 2007 default: 2008 musb_dbg(musb, "Unhandled disconnect %s, setting a_idle", 2009 musb_otg_state_string(musb)); 2010 musb_set_state(musb, OTG_STATE_A_IDLE); 2011 MUSB_HST_MODE(musb); 2012 break; 2013 case OTG_STATE_A_PERIPHERAL: 2014 musb_set_state(musb, OTG_STATE_A_WAIT_BCON); 2015 MUSB_HST_MODE(musb); 2016 break; 2017 case OTG_STATE_B_WAIT_ACON: 2018 case OTG_STATE_B_HOST: 2019 case OTG_STATE_B_PERIPHERAL: 2020 case OTG_STATE_B_IDLE: 2021 musb_set_state(musb, OTG_STATE_B_IDLE); 2022 break; 2023 case OTG_STATE_B_SRP_INIT: 2024 break; 2025 } 2026 2027 musb->is_active = 0; 2028 } 2029 2030 void musb_g_reset(struct musb *musb) 2031 __releases(musb->lock) 2032 __acquires(musb->lock) 2033 { 2034 void __iomem *mbase = musb->mregs; 2035 u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 2036 u8 power; 2037 2038 musb_dbg(musb, "<== %s driver '%s'", 2039 (devctl & MUSB_DEVCTL_BDEVICE) 2040 ? "B-Device" : "A-Device", 2041 musb->gadget_driver 2042 ? musb->gadget_driver->driver.name 2043 : NULL 2044 ); 2045 2046 /* report reset, if we didn't already (flushing EP state) */ 2047 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) { 2048 spin_unlock(&musb->lock); 2049 usb_gadget_udc_reset(&musb->g, musb->gadget_driver); 2050 spin_lock(&musb->lock); 2051 } 2052 2053 /* clear HR */ 2054 else if (devctl & MUSB_DEVCTL_HR) 2055 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2056 2057 2058 /* what speed did we negotiate? */ 2059 power = musb_readb(mbase, MUSB_POWER); 2060 musb->g.speed = (power & MUSB_POWER_HSMODE) 2061 ? USB_SPEED_HIGH : USB_SPEED_FULL; 2062 2063 /* start in USB_STATE_DEFAULT */ 2064 musb->is_active = 1; 2065 musb->is_suspended = 0; 2066 MUSB_DEV_MODE(musb); 2067 musb->address = 0; 2068 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2069 2070 musb->may_wakeup = 0; 2071 musb->g.b_hnp_enable = 0; 2072 musb->g.a_alt_hnp_support = 0; 2073 musb->g.a_hnp_support = 0; 2074 musb->g.quirk_zlp_not_supp = 1; 2075 2076 /* Normal reset, as B-Device; 2077 * or else after HNP, as A-Device 2078 */ 2079 if (!musb->g.is_otg) { 2080 /* USB device controllers that are not OTG compatible 2081 * may not have DEVCTL register in silicon. 2082 * In that case, do not rely on devctl for setting 2083 * peripheral mode. 2084 */ 2085 musb_set_state(musb, OTG_STATE_B_PERIPHERAL); 2086 musb->g.is_a_peripheral = 0; 2087 } else if (devctl & MUSB_DEVCTL_BDEVICE) { 2088 musb_set_state(musb, OTG_STATE_B_PERIPHERAL); 2089 musb->g.is_a_peripheral = 0; 2090 } else { 2091 musb_set_state(musb, OTG_STATE_A_PERIPHERAL); 2092 musb->g.is_a_peripheral = 1; 2093 } 2094 2095 /* start with default limits on VBUS power draw */ 2096 (void) musb_gadget_vbus_draw(&musb->g, 8); 2097 } 2098