1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cadence CDNSP DRD Driver. 4 * 5 * Copyright (C) 2020 Cadence. 6 * 7 * Author: Pawel Laszczak <pawell@cadence.com> 8 * 9 */ 10 11 #include <linux/moduleparam.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/module.h> 14 #include <linux/iopoll.h> 15 #include <linux/delay.h> 16 #include <linux/log2.h> 17 #include <linux/slab.h> 18 #include <linux/string_choices.h> 19 #include <linux/pci.h> 20 #include <linux/irq.h> 21 #include <linux/dmi.h> 22 23 #include "core.h" 24 #include "gadget-export.h" 25 #include "drd.h" 26 #include "cdnsp-gadget.h" 27 #include "cdnsp-trace.h" 28 29 unsigned int cdnsp_port_speed(unsigned int port_status) 30 { 31 /*Detect gadget speed based on PORTSC register*/ 32 if (DEV_SUPERSPEEDPLUS(port_status)) 33 return USB_SPEED_SUPER_PLUS; 34 else if (DEV_SUPERSPEED(port_status)) 35 return USB_SPEED_SUPER; 36 else if (DEV_HIGHSPEED(port_status)) 37 return USB_SPEED_HIGH; 38 else if (DEV_FULLSPEED(port_status)) 39 return USB_SPEED_FULL; 40 41 /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/ 42 return USB_SPEED_UNKNOWN; 43 } 44 45 /* 46 * Given a port state, this function returns a value that would result in the 47 * port being in the same state, if the value was written to the port status 48 * control register. 49 * Save Read Only (RO) bits and save read/write bits where 50 * writing a 0 clears the bit and writing a 1 sets the bit (RWS). 51 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect. 52 */ 53 u32 cdnsp_port_state_to_neutral(u32 state) 54 { 55 /* Save read-only status and port state. */ 56 return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS); 57 } 58 59 /** 60 * cdnsp_find_next_ext_cap - Find the offset of the extended capabilities 61 * with capability ID id. 62 * @base: PCI MMIO registers base address. 63 * @start: Address at which to start looking, (0 or HCC_PARAMS to start at 64 * beginning of list) 65 * @id: Extended capability ID to search for. 66 * 67 * Returns the offset of the next matching extended capability structure. 68 * Some capabilities can occur several times, 69 * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all. 70 */ 71 int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id) 72 { 73 u32 offset = start; 74 u32 next; 75 u32 val; 76 77 if (!start || start == HCC_PARAMS_OFFSET) { 78 val = readl(base + HCC_PARAMS_OFFSET); 79 if (val == ~0) 80 return 0; 81 82 offset = HCC_EXT_CAPS(val) << 2; 83 if (!offset) 84 return 0; 85 } 86 87 do { 88 val = readl(base + offset); 89 if (val == ~0) 90 return 0; 91 92 if (EXT_CAPS_ID(val) == id && offset != start) 93 return offset; 94 95 next = EXT_CAPS_NEXT(val); 96 offset += next << 2; 97 } while (next); 98 99 return 0; 100 } 101 102 void cdnsp_set_link_state(struct cdnsp_device *pdev, 103 __le32 __iomem *port_regs, 104 u32 link_state) 105 { 106 int port_num = 0xFF; 107 u32 temp; 108 109 temp = readl(port_regs); 110 temp = cdnsp_port_state_to_neutral(temp); 111 temp |= PORT_WKCONN_E | PORT_WKDISC_E; 112 writel(temp, port_regs); 113 114 temp &= ~PORT_PLS_MASK; 115 temp |= PORT_LINK_STROBE | link_state; 116 117 if (pdev->active_port) 118 port_num = pdev->active_port->port_num; 119 120 trace_cdnsp_handle_port_status(port_num, readl(port_regs)); 121 writel(temp, port_regs); 122 trace_cdnsp_link_state_changed(port_num, readl(port_regs)); 123 } 124 125 static void cdnsp_disable_port(struct cdnsp_device *pdev, 126 __le32 __iomem *port_regs) 127 { 128 u32 temp = cdnsp_port_state_to_neutral(readl(port_regs)); 129 130 writel(temp | PORT_PED, port_regs); 131 } 132 133 static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, 134 __le32 __iomem *port_regs) 135 { 136 u32 portsc = readl(port_regs); 137 138 writel(cdnsp_port_state_to_neutral(portsc) | 139 (portsc & PORT_CHANGE_BITS), port_regs); 140 } 141 142 static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) 143 { 144 struct cdns *cdns = dev_get_drvdata(pdev->dev); 145 __le32 __iomem *reg; 146 void __iomem *base; 147 u32 offset = 0; 148 u32 val; 149 150 if (!cdns->override_apb_timeout) 151 return; 152 153 base = &pdev->cap_regs->hc_capbase; 154 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 155 reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; 156 157 val = le32_to_cpu(readl(reg)); 158 val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); 159 writel(cpu_to_le32(val), reg); 160 } 161 162 static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 163 { 164 __le32 __iomem *reg; 165 void __iomem *base; 166 u32 offset = 0; 167 168 base = &pdev->cap_regs->hc_capbase; 169 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 170 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 171 172 bit = readl(reg) | bit; 173 writel(bit, reg); 174 } 175 176 static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) 177 { 178 __le32 __iomem *reg; 179 void __iomem *base; 180 u32 offset = 0; 181 182 base = &pdev->cap_regs->hc_capbase; 183 offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); 184 reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; 185 186 bit = readl(reg) & ~bit; 187 writel(bit, reg); 188 } 189 190 /* 191 * Disable interrupts and begin the controller halting process. 192 */ 193 static void cdnsp_quiesce(struct cdnsp_device *pdev) 194 { 195 u32 halted; 196 u32 mask; 197 u32 cmd; 198 199 mask = ~(u32)(CDNSP_IRQS); 200 201 halted = readl(&pdev->op_regs->status) & STS_HALT; 202 if (!halted) 203 mask &= ~(CMD_R_S | CMD_DEVEN); 204 205 cmd = readl(&pdev->op_regs->command); 206 cmd &= mask; 207 writel(cmd, &pdev->op_regs->command); 208 } 209 210 /* 211 * Force controller into halt state. 212 * 213 * Disable any IRQs and clear the run/stop bit. 214 * Controller will complete any current and actively pipelined transactions, and 215 * should halt within 16 ms of the run/stop bit being cleared. 216 * Read controller Halted bit in the status register to see when the 217 * controller is finished. 218 */ 219 int cdnsp_halt(struct cdnsp_device *pdev) 220 { 221 int ret; 222 u32 val; 223 224 cdnsp_quiesce(pdev); 225 226 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, 227 val & STS_HALT, 1, 228 CDNSP_MAX_HALT_USEC); 229 if (ret) { 230 dev_err(pdev->dev, "ERROR: Device halt failed\n"); 231 return ret; 232 } 233 234 pdev->cdnsp_state |= CDNSP_STATE_HALTED; 235 236 return 0; 237 } 238 239 /* 240 * device controller died, register read returns 0xffffffff, or command never 241 * ends. 242 */ 243 void cdnsp_died(struct cdnsp_device *pdev) 244 { 245 dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n"); 246 pdev->cdnsp_state |= CDNSP_STATE_DYING; 247 cdnsp_halt(pdev); 248 } 249 250 /* 251 * Set the run bit and wait for the device to be running. 252 */ 253 static int cdnsp_start(struct cdnsp_device *pdev) 254 { 255 u32 temp; 256 int ret; 257 258 temp = readl(&pdev->op_regs->command); 259 temp |= (CMD_R_S | CMD_DEVEN); 260 writel(temp, &pdev->op_regs->command); 261 262 pdev->cdnsp_state = 0; 263 264 /* 265 * Wait for the STS_HALT Status bit to be 0 to indicate the device is 266 * running. 267 */ 268 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 269 !(temp & STS_HALT), 1, 270 CDNSP_MAX_HALT_USEC); 271 if (ret) { 272 pdev->cdnsp_state = CDNSP_STATE_DYING; 273 dev_err(pdev->dev, "ERROR: Controller run failed\n"); 274 } 275 276 return ret; 277 } 278 279 /* 280 * Reset a halted controller. 281 * 282 * This resets pipelines, timers, counters, state machines, etc. 283 * Transactions will be terminated immediately, and operational registers 284 * will be set to their defaults. 285 */ 286 int cdnsp_reset(struct cdnsp_device *pdev) 287 { 288 u32 command; 289 u32 temp; 290 int ret; 291 292 temp = readl(&pdev->op_regs->status); 293 294 if (temp == ~(u32)0) { 295 dev_err(pdev->dev, "Device not accessible, reset failed.\n"); 296 return -ENODEV; 297 } 298 299 if ((temp & STS_HALT) == 0) { 300 dev_err(pdev->dev, "Controller not halted, aborting reset.\n"); 301 return -EINVAL; 302 } 303 304 command = readl(&pdev->op_regs->command); 305 command |= CMD_RESET; 306 writel(command, &pdev->op_regs->command); 307 308 ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, 309 !(temp & CMD_RESET), 1, 310 10 * 1000); 311 if (ret) { 312 dev_err(pdev->dev, "ERROR: Controller reset failed\n"); 313 return ret; 314 } 315 316 /* 317 * CDNSP cannot write any doorbells or operational registers other 318 * than status until the "Controller Not Ready" flag is cleared. 319 */ 320 ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, 321 !(temp & STS_CNR), 1, 322 10 * 1000); 323 324 if (ret) { 325 dev_err(pdev->dev, "ERROR: Controller not ready to work\n"); 326 return ret; 327 } 328 329 dev_dbg(pdev->dev, "Controller ready to work"); 330 331 return ret; 332 } 333 334 /* 335 * cdnsp_get_endpoint_index - Find the index for an endpoint given its 336 * descriptor.Use the return value to right shift 1 for the bitmask. 337 * 338 * Index = (epnum * 2) + direction - 1, 339 * where direction = 0 for OUT, 1 for IN. 340 * For control endpoints, the IN index is used (OUT index is unused), so 341 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 342 */ 343 static unsigned int 344 cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc) 345 { 346 unsigned int index = (unsigned int)usb_endpoint_num(desc); 347 348 if (usb_endpoint_xfer_control(desc)) 349 return index * 2; 350 351 return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 352 } 353 354 /* 355 * Find the flag for this endpoint (for use in the control context). Use the 356 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 357 * bit 1, etc. 358 */ 359 static unsigned int 360 cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) 361 { 362 return 1 << (cdnsp_get_endpoint_index(desc) + 1); 363 } 364 365 int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 366 { 367 struct cdnsp_device *pdev = pep->pdev; 368 struct usb_request *request; 369 int ret; 370 371 if (preq->epnum == 0 && !list_empty(&pep->pending_list)) { 372 trace_cdnsp_request_enqueue_busy(preq); 373 return -EBUSY; 374 } 375 376 request = &preq->request; 377 request->actual = 0; 378 request->status = -EINPROGRESS; 379 preq->direction = pep->direction; 380 preq->epnum = pep->number; 381 preq->td.drbl = 0; 382 383 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); 384 if (ret) { 385 trace_cdnsp_request_enqueue_error(preq); 386 return ret; 387 } 388 389 list_add_tail(&preq->list, &pep->pending_list); 390 391 trace_cdnsp_request_enqueue(preq); 392 393 switch (usb_endpoint_type(pep->endpoint.desc)) { 394 case USB_ENDPOINT_XFER_CONTROL: 395 ret = cdnsp_queue_ctrl_tx(pdev, preq); 396 break; 397 case USB_ENDPOINT_XFER_BULK: 398 case USB_ENDPOINT_XFER_INT: 399 ret = cdnsp_queue_bulk_tx(pdev, preq); 400 break; 401 case USB_ENDPOINT_XFER_ISOC: 402 ret = cdnsp_queue_isoc_tx(pdev, preq); 403 } 404 405 if (ret) 406 goto unmap; 407 408 return 0; 409 410 unmap: 411 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 412 pep->direction); 413 list_del(&preq->list); 414 trace_cdnsp_request_enqueue_error(preq); 415 416 return ret; 417 } 418 419 /* 420 * Remove the request's TD from the endpoint ring. This may cause the 421 * controller to stop USB transfers, potentially stopping in the middle of a 422 * TRB buffer. The controller should pick up where it left off in the TD, 423 * unless a Set Transfer Ring Dequeue Pointer is issued. 424 * 425 * The TRBs that make up the buffers for the canceled request will be "removed" 426 * from the ring. Since the ring is a contiguous structure, they can't be 427 * physically removed. Instead, there are two options: 428 * 429 * 1) If the controller is in the middle of processing the request to be 430 * canceled, we simply move the ring's dequeue pointer past those TRBs 431 * using the Set Transfer Ring Dequeue Pointer command. This will be 432 * the common case, when drivers timeout on the last submitted request 433 * and attempt to cancel. 434 * 435 * 2) If the controller is in the middle of a different TD, we turn the TRBs 436 * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained. 437 * The controller will need to invalidate the any TRBs it has cached after 438 * the stop endpoint command. 439 * 440 * 3) The TD may have completed by the time the Stop Endpoint Command 441 * completes, so software needs to handle that case too. 442 * 443 */ 444 int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) 445 { 446 struct cdnsp_device *pdev = pep->pdev; 447 int ret_stop = 0; 448 int ret_rem; 449 450 trace_cdnsp_request_dequeue(preq); 451 452 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) 453 ret_stop = cdnsp_cmd_stop_ep(pdev, pep); 454 455 ret_rem = cdnsp_remove_request(pdev, preq, pep); 456 457 return ret_rem ? ret_rem : ret_stop; 458 } 459 460 static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) 461 { 462 struct cdnsp_input_control_ctx *ctrl_ctx; 463 struct cdnsp_slot_ctx *slot_ctx; 464 struct cdnsp_ep_ctx *ep_ctx; 465 int i; 466 467 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 468 469 /* 470 * When a device's add flag and drop flag are zero, any subsequent 471 * configure endpoint command will leave that endpoint's state 472 * untouched. Make sure we don't leave any old state in the input 473 * endpoint contexts. 474 */ 475 ctrl_ctx->drop_flags = 0; 476 ctrl_ctx->add_flags = 0; 477 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 478 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 479 480 /* Endpoint 0 is always valid */ 481 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 482 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) { 483 ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i); 484 ep_ctx->ep_info = 0; 485 ep_ctx->ep_info2 = 0; 486 ep_ctx->deq = 0; 487 ep_ctx->tx_info = 0; 488 } 489 } 490 491 /* Issue a configure endpoint command and wait for it to finish. */ 492 static int cdnsp_configure_endpoint(struct cdnsp_device *pdev) 493 { 494 int ret; 495 496 cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); 497 cdnsp_ring_cmd_db(pdev); 498 ret = cdnsp_wait_for_cmd_compl(pdev); 499 if (ret) { 500 dev_err(pdev->dev, 501 "ERR: unexpected command completion code 0x%x.\n", ret); 502 return -EINVAL; 503 } 504 505 return ret; 506 } 507 508 static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev, 509 struct cdnsp_ep *pep) 510 { 511 struct cdnsp_segment *segment; 512 union cdnsp_trb *event; 513 u32 cycle_state; 514 u32 data; 515 516 event = pdev->event_ring->dequeue; 517 segment = pdev->event_ring->deq_seg; 518 cycle_state = pdev->event_ring->cycle_state; 519 520 while (1) { 521 data = le32_to_cpu(event->trans_event.flags); 522 523 /* Check the owner of the TRB. */ 524 if ((data & TRB_CYCLE) != cycle_state) 525 break; 526 527 if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && 528 TRB_TO_EP_ID(data) == (pep->idx + 1)) { 529 data |= TRB_EVENT_INVALIDATE; 530 event->trans_event.flags = cpu_to_le32(data); 531 } 532 533 if (cdnsp_last_trb_on_seg(segment, event)) { 534 cycle_state ^= 1; 535 segment = pdev->event_ring->deq_seg->next; 536 event = segment->trbs; 537 } else { 538 event++; 539 } 540 } 541 } 542 543 int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) 544 { 545 struct cdnsp_segment *event_deq_seg; 546 union cdnsp_trb *cmd_trb; 547 dma_addr_t cmd_deq_dma; 548 union cdnsp_trb *event; 549 u32 cycle_state; 550 int ret, val; 551 u64 cmd_dma; 552 u32 flags; 553 554 cmd_trb = pdev->cmd.command_trb; 555 pdev->cmd.status = 0; 556 557 trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic); 558 559 ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, 560 !CMD_RING_BUSY(val), 1, 561 CDNSP_CMD_TIMEOUT); 562 if (ret) { 563 dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); 564 trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic); 565 pdev->cdnsp_state = CDNSP_STATE_DYING; 566 return -ETIMEDOUT; 567 } 568 569 event = pdev->event_ring->dequeue; 570 event_deq_seg = pdev->event_ring->deq_seg; 571 cycle_state = pdev->event_ring->cycle_state; 572 573 cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); 574 if (!cmd_deq_dma) 575 return -EINVAL; 576 577 while (1) { 578 flags = le32_to_cpu(event->event_cmd.flags); 579 580 /* Check the owner of the TRB. */ 581 if ((flags & TRB_CYCLE) != cycle_state) 582 return -EINVAL; 583 584 cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); 585 586 /* 587 * Check whether the completion event is for last queued 588 * command. 589 */ 590 if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || 591 cmd_dma != (u64)cmd_deq_dma) { 592 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 593 event++; 594 continue; 595 } 596 597 if (cdnsp_last_trb_on_ring(pdev->event_ring, 598 event_deq_seg, event)) 599 cycle_state ^= 1; 600 601 event_deq_seg = event_deq_seg->next; 602 event = event_deq_seg->trbs; 603 continue; 604 } 605 606 trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic); 607 608 pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); 609 if (pdev->cmd.status == COMP_SUCCESS) 610 return 0; 611 612 return -pdev->cmd.status; 613 } 614 } 615 616 int cdnsp_halt_endpoint(struct cdnsp_device *pdev, 617 struct cdnsp_ep *pep, 618 int value) 619 { 620 int ret; 621 622 trace_cdnsp_ep_halt(value ? "Set" : "Clear"); 623 624 ret = cdnsp_cmd_stop_ep(pdev, pep); 625 if (ret) 626 return ret; 627 628 if (value) { 629 if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { 630 cdnsp_queue_halt_endpoint(pdev, pep->idx); 631 cdnsp_ring_cmd_db(pdev); 632 ret = cdnsp_wait_for_cmd_compl(pdev); 633 } 634 635 pep->ep_state |= EP_HALTED; 636 } else { 637 cdnsp_queue_reset_ep(pdev, pep->idx); 638 cdnsp_ring_cmd_db(pdev); 639 ret = cdnsp_wait_for_cmd_compl(pdev); 640 trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx); 641 642 if (ret) 643 return ret; 644 645 pep->ep_state &= ~EP_HALTED; 646 647 if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) 648 cdnsp_ring_doorbell_for_active_rings(pdev, pep); 649 650 pep->ep_state &= ~EP_WEDGE; 651 } 652 653 return 0; 654 } 655 656 static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev, 657 struct cdnsp_ep *pep) 658 { 659 struct cdnsp_input_control_ctx *ctrl_ctx; 660 struct cdnsp_slot_ctx *slot_ctx; 661 int ret = 0; 662 u32 ep_sts; 663 int i; 664 665 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 666 667 /* Don't issue the command if there's no endpoints to update. */ 668 if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) 669 return 0; 670 671 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 672 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 673 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 674 675 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 676 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 677 for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) { 678 __le32 le32 = cpu_to_le32(BIT(i)); 679 680 if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || 681 (ctrl_ctx->add_flags & le32) || i == 1) { 682 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 683 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 684 break; 685 } 686 } 687 688 ep_sts = GET_EP_CTX_STATE(pep->out_ctx); 689 690 if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && 691 ep_sts == EP_STATE_DISABLED) || 692 (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) 693 ret = cdnsp_configure_endpoint(pdev); 694 695 trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx)); 696 trace_cdnsp_handle_cmd_config_ep(pep->out_ctx); 697 698 cdnsp_zero_in_ctx(pdev); 699 700 return ret; 701 } 702 703 /* 704 * This submits a Reset Device Command, which will set the device state to 0, 705 * set the device address to 0, and disable all the endpoints except the default 706 * control endpoint. The USB core should come back and call 707 * cdnsp_setup_device(), and then re-set up the configuration. 708 */ 709 int cdnsp_reset_device(struct cdnsp_device *pdev) 710 { 711 struct cdnsp_slot_ctx *slot_ctx; 712 int slot_state; 713 int ret, i; 714 715 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 716 slot_ctx->dev_info = 0; 717 pdev->device_address = 0; 718 719 /* If device is not setup, there is no point in resetting it. */ 720 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 721 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 722 trace_cdnsp_reset_device(slot_ctx); 723 724 if (slot_state <= SLOT_STATE_DEFAULT && 725 pdev->eps[0].ep_state & EP_HALTED) { 726 cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); 727 } 728 729 /* 730 * During Reset Device command controller shall transition the 731 * endpoint ep0 to the Running State. 732 */ 733 pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED); 734 pdev->eps[0].ep_state |= EP_ENABLED; 735 736 if (slot_state <= SLOT_STATE_DEFAULT) 737 return 0; 738 739 cdnsp_queue_reset_device(pdev); 740 cdnsp_ring_cmd_db(pdev); 741 ret = cdnsp_wait_for_cmd_compl(pdev); 742 743 /* 744 * After Reset Device command all not default endpoints 745 * are in Disabled state. 746 */ 747 for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) 748 pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED; 749 750 trace_cdnsp_handle_cmd_reset_dev(slot_ctx); 751 752 if (ret) 753 dev_err(pdev->dev, "Reset device failed with error code %d", 754 ret); 755 756 return ret; 757 } 758 759 /* 760 * Sets the MaxPStreams field and the Linear Stream Array field. 761 * Sets the dequeue pointer to the stream context array. 762 */ 763 static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev, 764 struct cdnsp_ep_ctx *ep_ctx, 765 struct cdnsp_stream_info *stream_info) 766 { 767 u32 max_primary_streams; 768 769 /* MaxPStreams is the number of stream context array entries, not the 770 * number we're actually using. Must be in 2^(MaxPstreams + 1) format. 771 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. 772 */ 773 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; 774 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); 775 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) 776 | EP_HAS_LSA); 777 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); 778 } 779 780 /* 781 * The drivers use this function to prepare a bulk endpoints to use streams. 782 * 783 * Don't allow the call to succeed if endpoint only supports one stream 784 * (which means it doesn't support streams at all). 785 */ 786 int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep) 787 { 788 unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc); 789 unsigned int num_stream_ctxs; 790 int ret; 791 792 if (num_streams == 0) 793 return 0; 794 795 if (num_streams > STREAM_NUM_STREAMS) 796 return -EINVAL; 797 798 /* 799 * Add two to the number of streams requested to account for 800 * stream 0 that is reserved for controller usage and one additional 801 * for TASK SET FULL response. 802 */ 803 num_streams += 2; 804 805 /* The stream context array size must be a power of two */ 806 num_stream_ctxs = roundup_pow_of_two(num_streams); 807 808 trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams); 809 810 ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams); 811 if (ret) 812 return ret; 813 814 cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info); 815 816 pep->ep_state |= EP_HAS_STREAMS; 817 pep->stream_info.td_count = 0; 818 pep->stream_info.first_prime_det = 0; 819 820 /* Subtract 1 for stream 0, which drivers can't use. */ 821 return num_streams - 1; 822 } 823 824 int cdnsp_disable_slot(struct cdnsp_device *pdev) 825 { 826 int ret; 827 828 cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT); 829 cdnsp_ring_cmd_db(pdev); 830 ret = cdnsp_wait_for_cmd_compl(pdev); 831 832 pdev->slot_id = 0; 833 pdev->active_port = NULL; 834 835 trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 836 837 memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE); 838 memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE); 839 840 return ret; 841 } 842 843 int cdnsp_enable_slot(struct cdnsp_device *pdev) 844 { 845 struct cdnsp_slot_ctx *slot_ctx; 846 int slot_state; 847 int ret; 848 849 /* If device is not setup, there is no point in resetting it */ 850 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 851 slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 852 853 if (slot_state != SLOT_STATE_DISABLED) 854 return 0; 855 856 cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT); 857 cdnsp_ring_cmd_db(pdev); 858 ret = cdnsp_wait_for_cmd_compl(pdev); 859 if (ret) 860 goto show_trace; 861 862 pdev->slot_id = 1; 863 864 show_trace: 865 trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx)); 866 867 return ret; 868 } 869 870 /* 871 * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY 872 * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS. 873 */ 874 int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup) 875 { 876 struct cdnsp_input_control_ctx *ctrl_ctx; 877 struct cdnsp_slot_ctx *slot_ctx; 878 int dev_state = 0; 879 int ret; 880 881 if (!pdev->slot_id) { 882 trace_cdnsp_slot_id("incorrect"); 883 return -EINVAL; 884 } 885 886 if (!pdev->active_port->port_num) 887 return -EINVAL; 888 889 slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx); 890 dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); 891 892 if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) { 893 trace_cdnsp_slot_already_in_default(slot_ctx); 894 return 0; 895 } 896 897 slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx); 898 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 899 900 if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { 901 ret = cdnsp_setup_addressable_priv_dev(pdev); 902 if (ret) 903 return ret; 904 } 905 906 cdnsp_copy_ep0_dequeue_into_input_ctx(pdev); 907 908 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 909 ctrl_ctx->drop_flags = 0; 910 911 trace_cdnsp_setup_device_slot(slot_ctx); 912 913 cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup); 914 cdnsp_ring_cmd_db(pdev); 915 ret = cdnsp_wait_for_cmd_compl(pdev); 916 917 trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx)); 918 919 /* Zero the input context control for later use. */ 920 ctrl_ctx->add_flags = 0; 921 ctrl_ctx->drop_flags = 0; 922 923 return ret; 924 } 925 926 void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev, 927 struct usb_request *req, 928 int enable) 929 { 930 if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) 931 return; 932 933 trace_cdnsp_lpm(enable); 934 935 if (enable) 936 writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, 937 &pdev->active_port->regs->portpmsc); 938 else 939 writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); 940 } 941 942 static int cdnsp_get_frame(struct cdnsp_device *pdev) 943 { 944 return readl(&pdev->run_regs->microframe_index) >> 3; 945 } 946 947 static int cdnsp_gadget_ep_enable(struct usb_ep *ep, 948 const struct usb_endpoint_descriptor *desc) 949 { 950 struct cdnsp_input_control_ctx *ctrl_ctx; 951 struct cdnsp_device *pdev; 952 struct cdnsp_ep *pep; 953 unsigned long flags; 954 u32 added_ctxs; 955 int ret; 956 957 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || 958 !desc->wMaxPacketSize) 959 return -EINVAL; 960 961 pep = to_cdnsp_ep(ep); 962 pdev = pep->pdev; 963 pep->ep_state &= ~EP_UNCONFIGURED; 964 965 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED, 966 "%s is already enabled\n", pep->name)) 967 return 0; 968 969 spin_lock_irqsave(&pdev->lock, flags); 970 971 added_ctxs = cdnsp_get_endpoint_flag(desc); 972 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 973 dev_err(pdev->dev, "ERROR: Bad endpoint number\n"); 974 ret = -EINVAL; 975 goto unlock; 976 } 977 978 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 979 980 if (pdev->gadget.speed == USB_SPEED_FULL) { 981 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) 982 pep->interval = desc->bInterval << 3; 983 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) 984 pep->interval = BIT(desc->bInterval - 1) << 3; 985 } 986 987 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { 988 if (pep->interval > BIT(12)) { 989 dev_err(pdev->dev, "bInterval %d not supported\n", 990 desc->bInterval); 991 ret = -EINVAL; 992 goto unlock; 993 } 994 cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 995 } 996 997 ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC); 998 if (ret) 999 goto unlock; 1000 1001 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 1002 ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); 1003 ctrl_ctx->drop_flags = 0; 1004 1005 ret = cdnsp_update_eps_configuration(pdev, pep); 1006 if (ret) { 1007 cdnsp_free_endpoint_rings(pdev, pep); 1008 goto unlock; 1009 } 1010 1011 pep->ep_state |= EP_ENABLED; 1012 pep->ep_state &= ~EP_STOPPED; 1013 1014 unlock: 1015 trace_cdnsp_ep_enable_end(pep, 0); 1016 spin_unlock_irqrestore(&pdev->lock, flags); 1017 1018 return ret; 1019 } 1020 1021 static int cdnsp_gadget_ep_disable(struct usb_ep *ep) 1022 { 1023 struct cdnsp_input_control_ctx *ctrl_ctx; 1024 struct cdnsp_request *preq; 1025 struct cdnsp_device *pdev; 1026 struct cdnsp_ep *pep; 1027 unsigned long flags; 1028 u32 drop_flag; 1029 int ret = 0; 1030 1031 if (!ep) 1032 return -EINVAL; 1033 1034 pep = to_cdnsp_ep(ep); 1035 pdev = pep->pdev; 1036 1037 spin_lock_irqsave(&pdev->lock, flags); 1038 1039 if (!(pep->ep_state & EP_ENABLED)) { 1040 dev_err(pdev->dev, "%s is already disabled\n", pep->name); 1041 ret = -EINVAL; 1042 goto finish; 1043 } 1044 1045 pep->ep_state |= EP_DIS_IN_RROGRESS; 1046 1047 /* Endpoint was unconfigured by Reset Device command. */ 1048 if (!(pep->ep_state & EP_UNCONFIGURED)) 1049 cdnsp_cmd_stop_ep(pdev, pep); 1050 1051 /* Remove all queued USB requests. */ 1052 while (!list_empty(&pep->pending_list)) { 1053 preq = next_request(&pep->pending_list); 1054 cdnsp_ep_dequeue(pep, preq); 1055 } 1056 1057 cdnsp_invalidate_ep_events(pdev, pep); 1058 1059 pep->ep_state &= ~EP_DIS_IN_RROGRESS; 1060 drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc); 1061 ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx); 1062 ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); 1063 ctrl_ctx->add_flags = 0; 1064 1065 cdnsp_endpoint_zero(pdev, pep); 1066 1067 if (!(pep->ep_state & EP_UNCONFIGURED)) 1068 ret = cdnsp_update_eps_configuration(pdev, pep); 1069 1070 cdnsp_free_endpoint_rings(pdev, pep); 1071 1072 pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED); 1073 pep->ep_state |= EP_STOPPED; 1074 1075 finish: 1076 trace_cdnsp_ep_disable_end(pep, 0); 1077 spin_unlock_irqrestore(&pdev->lock, flags); 1078 1079 return ret; 1080 } 1081 1082 static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep, 1083 gfp_t gfp_flags) 1084 { 1085 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1086 struct cdnsp_request *preq; 1087 1088 preq = kzalloc(sizeof(*preq), gfp_flags); 1089 if (!preq) 1090 return NULL; 1091 1092 preq->epnum = pep->number; 1093 preq->pep = pep; 1094 1095 trace_cdnsp_alloc_request(preq); 1096 1097 return &preq->request; 1098 } 1099 1100 static void cdnsp_gadget_ep_free_request(struct usb_ep *ep, 1101 struct usb_request *request) 1102 { 1103 struct cdnsp_request *preq = to_cdnsp_request(request); 1104 1105 trace_cdnsp_free_request(preq); 1106 kfree(preq); 1107 } 1108 1109 static int cdnsp_gadget_ep_queue(struct usb_ep *ep, 1110 struct usb_request *request, 1111 gfp_t gfp_flags) 1112 { 1113 struct cdnsp_request *preq; 1114 struct cdnsp_device *pdev; 1115 struct cdnsp_ep *pep; 1116 unsigned long flags; 1117 int ret; 1118 1119 if (!request || !ep) 1120 return -EINVAL; 1121 1122 pep = to_cdnsp_ep(ep); 1123 pdev = pep->pdev; 1124 1125 if (!(pep->ep_state & EP_ENABLED)) { 1126 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", 1127 pep->name); 1128 return -EINVAL; 1129 } 1130 1131 preq = to_cdnsp_request(request); 1132 spin_lock_irqsave(&pdev->lock, flags); 1133 ret = cdnsp_ep_enqueue(pep, preq); 1134 spin_unlock_irqrestore(&pdev->lock, flags); 1135 1136 return ret; 1137 } 1138 1139 static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep, 1140 struct usb_request *request) 1141 { 1142 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1143 struct cdnsp_device *pdev = pep->pdev; 1144 unsigned long flags; 1145 int ret; 1146 1147 if (request->status != -EINPROGRESS) 1148 return 0; 1149 1150 if (!pep->endpoint.desc) { 1151 dev_err(pdev->dev, 1152 "%s: can't dequeue to disabled endpoint\n", 1153 pep->name); 1154 return -ESHUTDOWN; 1155 } 1156 1157 /* Requests has been dequeued during disabling endpoint. */ 1158 if (!(pep->ep_state & EP_ENABLED)) 1159 return 0; 1160 1161 spin_lock_irqsave(&pdev->lock, flags); 1162 ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request)); 1163 spin_unlock_irqrestore(&pdev->lock, flags); 1164 1165 return ret; 1166 } 1167 1168 static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value) 1169 { 1170 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1171 struct cdnsp_device *pdev = pep->pdev; 1172 struct cdnsp_request *preq; 1173 unsigned long flags; 1174 int ret; 1175 1176 spin_lock_irqsave(&pdev->lock, flags); 1177 1178 preq = next_request(&pep->pending_list); 1179 if (value) { 1180 if (preq) { 1181 trace_cdnsp_ep_busy_try_halt_again(pep, 0); 1182 ret = -EAGAIN; 1183 goto done; 1184 } 1185 } 1186 1187 ret = cdnsp_halt_endpoint(pdev, pep, value); 1188 1189 done: 1190 spin_unlock_irqrestore(&pdev->lock, flags); 1191 return ret; 1192 } 1193 1194 static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep) 1195 { 1196 struct cdnsp_ep *pep = to_cdnsp_ep(ep); 1197 struct cdnsp_device *pdev = pep->pdev; 1198 unsigned long flags; 1199 int ret; 1200 1201 spin_lock_irqsave(&pdev->lock, flags); 1202 pep->ep_state |= EP_WEDGE; 1203 ret = cdnsp_halt_endpoint(pdev, pep, 1); 1204 spin_unlock_irqrestore(&pdev->lock, flags); 1205 1206 return ret; 1207 } 1208 1209 static const struct usb_ep_ops cdnsp_gadget_ep0_ops = { 1210 .enable = cdnsp_gadget_ep_enable, 1211 .disable = cdnsp_gadget_ep_disable, 1212 .alloc_request = cdnsp_gadget_ep_alloc_request, 1213 .free_request = cdnsp_gadget_ep_free_request, 1214 .queue = cdnsp_gadget_ep_queue, 1215 .dequeue = cdnsp_gadget_ep_dequeue, 1216 .set_halt = cdnsp_gadget_ep_set_halt, 1217 .set_wedge = cdnsp_gadget_ep_set_wedge, 1218 }; 1219 1220 static const struct usb_ep_ops cdnsp_gadget_ep_ops = { 1221 .enable = cdnsp_gadget_ep_enable, 1222 .disable = cdnsp_gadget_ep_disable, 1223 .alloc_request = cdnsp_gadget_ep_alloc_request, 1224 .free_request = cdnsp_gadget_ep_free_request, 1225 .queue = cdnsp_gadget_ep_queue, 1226 .dequeue = cdnsp_gadget_ep_dequeue, 1227 .set_halt = cdnsp_gadget_ep_set_halt, 1228 .set_wedge = cdnsp_gadget_ep_set_wedge, 1229 }; 1230 1231 void cdnsp_gadget_giveback(struct cdnsp_ep *pep, 1232 struct cdnsp_request *preq, 1233 int status) 1234 { 1235 struct cdnsp_device *pdev = pep->pdev; 1236 1237 list_del(&preq->list); 1238 1239 if (preq->request.status == -EINPROGRESS) 1240 preq->request.status = status; 1241 1242 usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, 1243 preq->direction); 1244 1245 trace_cdnsp_request_giveback(preq); 1246 1247 if (preq != &pdev->ep0_preq) { 1248 spin_unlock(&pdev->lock); 1249 usb_gadget_giveback_request(&pep->endpoint, &preq->request); 1250 spin_lock(&pdev->lock); 1251 } 1252 } 1253 1254 static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = { 1255 .bLength = USB_DT_ENDPOINT_SIZE, 1256 .bDescriptorType = USB_DT_ENDPOINT, 1257 .bmAttributes = USB_ENDPOINT_XFER_CONTROL, 1258 }; 1259 1260 static int cdnsp_run(struct cdnsp_device *pdev, 1261 enum usb_device_speed speed) 1262 { 1263 u32 fs_speed = 0; 1264 u32 temp; 1265 int ret; 1266 1267 temp = readl(&pdev->ir_set->irq_control); 1268 temp &= ~IMOD_INTERVAL_MASK; 1269 temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); 1270 writel(temp, &pdev->ir_set->irq_control); 1271 1272 temp = readl(&pdev->port3x_regs->mode_addr); 1273 1274 switch (speed) { 1275 case USB_SPEED_SUPER_PLUS: 1276 temp |= CFG_3XPORT_SSP_SUPPORT; 1277 break; 1278 case USB_SPEED_SUPER: 1279 temp &= ~CFG_3XPORT_SSP_SUPPORT; 1280 break; 1281 case USB_SPEED_HIGH: 1282 break; 1283 case USB_SPEED_FULL: 1284 fs_speed = PORT_REG6_FORCE_FS; 1285 break; 1286 default: 1287 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n", 1288 speed); 1289 fallthrough; 1290 case USB_SPEED_UNKNOWN: 1291 /* Default to superspeed. */ 1292 speed = USB_SPEED_SUPER; 1293 break; 1294 } 1295 1296 if (speed >= USB_SPEED_SUPER) { 1297 writel(temp, &pdev->port3x_regs->mode_addr); 1298 cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc, 1299 XDEV_RXDETECT); 1300 } else { 1301 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1302 } 1303 1304 cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc, 1305 XDEV_RXDETECT); 1306 1307 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1308 1309 writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6); 1310 1311 ret = cdnsp_start(pdev); 1312 if (ret) { 1313 ret = -ENODEV; 1314 goto err; 1315 } 1316 1317 temp = readl(&pdev->op_regs->command); 1318 temp |= (CMD_INTE); 1319 writel(temp, &pdev->op_regs->command); 1320 1321 temp = readl(&pdev->ir_set->irq_pending); 1322 writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); 1323 1324 trace_cdnsp_init("Controller ready to work"); 1325 return 0; 1326 err: 1327 cdnsp_halt(pdev); 1328 return ret; 1329 } 1330 1331 static int cdnsp_gadget_udc_start(struct usb_gadget *g, 1332 struct usb_gadget_driver *driver) 1333 { 1334 enum usb_device_speed max_speed = driver->max_speed; 1335 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1336 unsigned long flags; 1337 int ret; 1338 1339 spin_lock_irqsave(&pdev->lock, flags); 1340 pdev->gadget_driver = driver; 1341 1342 /* limit speed if necessary */ 1343 max_speed = min(driver->max_speed, g->max_speed); 1344 ret = cdnsp_run(pdev, max_speed); 1345 1346 spin_unlock_irqrestore(&pdev->lock, flags); 1347 1348 return ret; 1349 } 1350 1351 /* 1352 * Update Event Ring Dequeue Pointer: 1353 * - When all events have finished 1354 * - To avoid "Event Ring Full Error" condition 1355 */ 1356 void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev, 1357 union cdnsp_trb *event_ring_deq, 1358 u8 clear_ehb) 1359 { 1360 u64 temp_64; 1361 dma_addr_t deq; 1362 1363 temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue); 1364 1365 /* If necessary, update the HW's version of the event ring deq ptr. */ 1366 if (event_ring_deq != pdev->event_ring->dequeue) { 1367 deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg, 1368 pdev->event_ring->dequeue); 1369 temp_64 &= ERST_PTR_MASK; 1370 temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); 1371 } 1372 1373 /* Clear the event handler busy flag (RW1C). */ 1374 if (clear_ehb) 1375 temp_64 |= ERST_EHB; 1376 else 1377 temp_64 &= ~ERST_EHB; 1378 1379 cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue); 1380 } 1381 1382 static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev) 1383 { 1384 struct cdnsp_segment *seg; 1385 u64 val_64; 1386 int i; 1387 1388 cdnsp_initialize_ring_info(pdev->cmd_ring); 1389 1390 seg = pdev->cmd_ring->first_seg; 1391 for (i = 0; i < pdev->cmd_ring->num_segs; i++) { 1392 memset(seg->trbs, 0, 1393 sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1)); 1394 seg = seg->next; 1395 } 1396 1397 /* Set the address in the Command Ring Control register. */ 1398 val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring); 1399 val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | 1400 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | 1401 pdev->cmd_ring->cycle_state; 1402 cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring); 1403 } 1404 1405 static void cdnsp_consume_all_events(struct cdnsp_device *pdev) 1406 { 1407 struct cdnsp_segment *event_deq_seg; 1408 union cdnsp_trb *event_ring_deq; 1409 union cdnsp_trb *event; 1410 u32 cycle_bit; 1411 1412 event_ring_deq = pdev->event_ring->dequeue; 1413 event_deq_seg = pdev->event_ring->deq_seg; 1414 event = pdev->event_ring->dequeue; 1415 1416 /* Update ring dequeue pointer. */ 1417 while (1) { 1418 cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); 1419 1420 /* Does the controller or driver own the TRB? */ 1421 if (cycle_bit != pdev->event_ring->cycle_state) 1422 break; 1423 1424 cdnsp_inc_deq(pdev, pdev->event_ring); 1425 1426 if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) { 1427 event++; 1428 continue; 1429 } 1430 1431 if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg, 1432 event)) 1433 cycle_bit ^= 1; 1434 1435 event_deq_seg = event_deq_seg->next; 1436 event = event_deq_seg->trbs; 1437 } 1438 1439 cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1); 1440 } 1441 1442 static void cdnsp_stop(struct cdnsp_device *pdev) 1443 { 1444 u32 temp; 1445 1446 /* Remove internally queued request for ep0. */ 1447 if (!list_empty(&pdev->eps[0].pending_list)) { 1448 struct cdnsp_request *req; 1449 1450 req = next_request(&pdev->eps[0].pending_list); 1451 if (req == &pdev->ep0_preq) 1452 cdnsp_ep_dequeue(&pdev->eps[0], req); 1453 } 1454 1455 cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc); 1456 cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc); 1457 cdnsp_disable_slot(pdev); 1458 cdnsp_halt(pdev); 1459 1460 temp = readl(&pdev->op_regs->status); 1461 writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); 1462 temp = readl(&pdev->ir_set->irq_pending); 1463 writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); 1464 1465 cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); 1466 cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); 1467 1468 /* Clear interrupt line */ 1469 temp = readl(&pdev->ir_set->irq_pending); 1470 temp |= IMAN_IP; 1471 writel(temp, &pdev->ir_set->irq_pending); 1472 1473 cdnsp_consume_all_events(pdev); 1474 cdnsp_clear_cmd_ring(pdev); 1475 1476 trace_cdnsp_exit("Controller stopped."); 1477 } 1478 1479 /* 1480 * Stop controller. 1481 * This function is called by the gadget core when the driver is removed. 1482 * Disable slot, disable IRQs, and quiesce the controller. 1483 */ 1484 static int cdnsp_gadget_udc_stop(struct usb_gadget *g) 1485 { 1486 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1487 unsigned long flags; 1488 1489 spin_lock_irqsave(&pdev->lock, flags); 1490 cdnsp_stop(pdev); 1491 pdev->gadget_driver = NULL; 1492 spin_unlock_irqrestore(&pdev->lock, flags); 1493 1494 return 0; 1495 } 1496 1497 static int cdnsp_gadget_get_frame(struct usb_gadget *g) 1498 { 1499 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1500 1501 return cdnsp_get_frame(pdev); 1502 } 1503 1504 static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev) 1505 { 1506 struct cdnsp_port_regs __iomem *port_regs; 1507 u32 portpm, portsc; 1508 1509 port_regs = pdev->active_port->regs; 1510 portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; 1511 1512 /* Remote wakeup feature is not enabled by host. */ 1513 if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { 1514 portpm = readl(&port_regs->portpmsc); 1515 1516 if (!(portpm & PORT_RWE)) 1517 return; 1518 } 1519 1520 if (portsc == XDEV_U3 && !pdev->may_wakeup) 1521 return; 1522 1523 cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0); 1524 1525 pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING; 1526 } 1527 1528 static int cdnsp_gadget_wakeup(struct usb_gadget *g) 1529 { 1530 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1531 unsigned long flags; 1532 1533 spin_lock_irqsave(&pdev->lock, flags); 1534 __cdnsp_gadget_wakeup(pdev); 1535 spin_unlock_irqrestore(&pdev->lock, flags); 1536 1537 return 0; 1538 } 1539 1540 static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g, 1541 int is_selfpowered) 1542 { 1543 struct cdnsp_device *pdev = gadget_to_cdnsp(g); 1544 unsigned long flags; 1545 1546 spin_lock_irqsave(&pdev->lock, flags); 1547 g->is_selfpowered = !!is_selfpowered; 1548 spin_unlock_irqrestore(&pdev->lock, flags); 1549 1550 return 0; 1551 } 1552 1553 static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on) 1554 { 1555 struct cdnsp_device *pdev = gadget_to_cdnsp(gadget); 1556 struct cdns *cdns = dev_get_drvdata(pdev->dev); 1557 unsigned long flags; 1558 1559 trace_cdnsp_pullup(is_on); 1560 1561 /* 1562 * Disable events handling while controller is being 1563 * enabled/disabled. 1564 */ 1565 disable_irq(cdns->dev_irq); 1566 spin_lock_irqsave(&pdev->lock, flags); 1567 1568 if (!is_on) { 1569 cdnsp_reset_device(pdev); 1570 cdns_clear_vbus(cdns); 1571 } else { 1572 cdns_set_vbus(cdns); 1573 } 1574 1575 spin_unlock_irqrestore(&pdev->lock, flags); 1576 enable_irq(cdns->dev_irq); 1577 1578 return 0; 1579 } 1580 1581 static const struct usb_gadget_ops cdnsp_gadget_ops = { 1582 .get_frame = cdnsp_gadget_get_frame, 1583 .wakeup = cdnsp_gadget_wakeup, 1584 .set_selfpowered = cdnsp_gadget_set_selfpowered, 1585 .pullup = cdnsp_gadget_pullup, 1586 .udc_start = cdnsp_gadget_udc_start, 1587 .udc_stop = cdnsp_gadget_udc_stop, 1588 }; 1589 1590 static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev, 1591 struct cdnsp_ep *pep) 1592 { 1593 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1594 int endpoints; 1595 1596 reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID); 1597 1598 if (!pep->direction) { 1599 pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); 1600 pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); 1601 pep->buffering = (pep->buffering + 1) / 2; 1602 pep->buffering_period = (pep->buffering_period + 1) / 2; 1603 return; 1604 } 1605 1606 endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; 1607 1608 /* Set to XBUF_TX_TAG_MASK_0 register. */ 1609 reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); 1610 /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */ 1611 reg += pep->number * sizeof(u32) * 2; 1612 1613 pep->buffering = (readl(reg) + 1) / 2; 1614 pep->buffering_period = pep->buffering; 1615 } 1616 1617 static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev) 1618 { 1619 int max_streams = HCC_MAX_PSA(pdev->hcc_params); 1620 struct cdnsp_ep *pep; 1621 int i; 1622 1623 INIT_LIST_HEAD(&pdev->gadget.ep_list); 1624 1625 if (max_streams < STREAM_LOG_STREAMS) { 1626 dev_err(pdev->dev, "Stream size %d not supported\n", 1627 max_streams); 1628 return -EINVAL; 1629 } 1630 1631 max_streams = STREAM_LOG_STREAMS; 1632 1633 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1634 bool direction = !(i & 1); /* Start from OUT endpoint. */ 1635 u8 epnum = ((i + 1) >> 1); 1636 1637 if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction)) 1638 continue; 1639 1640 pep = &pdev->eps[i]; 1641 pep->pdev = pdev; 1642 pep->number = epnum; 1643 pep->direction = direction; /* 0 for OUT, 1 for IN. */ 1644 1645 /* 1646 * Ep0 is bidirectional, so ep0in and ep0out are represented by 1647 * pdev->eps[0] 1648 */ 1649 if (epnum == 0) { 1650 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1651 epnum, "BiDir"); 1652 1653 pep->idx = 0; 1654 usb_ep_set_maxpacket_limit(&pep->endpoint, 512); 1655 pep->endpoint.maxburst = 1; 1656 pep->endpoint.ops = &cdnsp_gadget_ep0_ops; 1657 pep->endpoint.desc = &cdnsp_gadget_ep0_desc; 1658 pep->endpoint.comp_desc = NULL; 1659 pep->endpoint.caps.type_control = true; 1660 pep->endpoint.caps.dir_in = true; 1661 pep->endpoint.caps.dir_out = true; 1662 1663 pdev->ep0_preq.epnum = pep->number; 1664 pdev->ep0_preq.pep = pep; 1665 pdev->gadget.ep0 = &pep->endpoint; 1666 } else { 1667 snprintf(pep->name, sizeof(pep->name), "ep%d%s", 1668 epnum, (pep->direction) ? "in" : "out"); 1669 1670 pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; 1671 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); 1672 1673 pep->endpoint.max_streams = max_streams; 1674 pep->endpoint.ops = &cdnsp_gadget_ep_ops; 1675 list_add_tail(&pep->endpoint.ep_list, 1676 &pdev->gadget.ep_list); 1677 1678 pep->endpoint.caps.type_iso = true; 1679 pep->endpoint.caps.type_bulk = true; 1680 pep->endpoint.caps.type_int = true; 1681 1682 pep->endpoint.caps.dir_in = direction; 1683 pep->endpoint.caps.dir_out = !direction; 1684 } 1685 1686 pep->endpoint.name = pep->name; 1687 pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx); 1688 pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx); 1689 cdnsp_get_ep_buffering(pdev, pep); 1690 1691 dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: " 1692 "CTRL: %s, INT: %s, BULK: %s, ISOC %s, " 1693 "SupDir IN: %s, OUT: %s\n", 1694 pep->name, 1024, 1695 str_yes_no(pep->endpoint.caps.type_control), 1696 str_yes_no(pep->endpoint.caps.type_int), 1697 str_yes_no(pep->endpoint.caps.type_bulk), 1698 str_yes_no(pep->endpoint.caps.type_iso), 1699 str_yes_no(pep->endpoint.caps.dir_in), 1700 str_yes_no(pep->endpoint.caps.dir_out)); 1701 1702 INIT_LIST_HEAD(&pep->pending_list); 1703 } 1704 1705 return 0; 1706 } 1707 1708 static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev) 1709 { 1710 struct cdnsp_ep *pep; 1711 int i; 1712 1713 for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) { 1714 pep = &pdev->eps[i]; 1715 if (pep->number != 0 && pep->out_ctx) 1716 list_del(&pep->endpoint.ep_list); 1717 } 1718 } 1719 1720 void cdnsp_disconnect_gadget(struct cdnsp_device *pdev) 1721 { 1722 pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING; 1723 1724 if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { 1725 spin_unlock(&pdev->lock); 1726 pdev->gadget_driver->disconnect(&pdev->gadget); 1727 spin_lock(&pdev->lock); 1728 } 1729 1730 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1731 usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); 1732 1733 pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING; 1734 } 1735 1736 void cdnsp_suspend_gadget(struct cdnsp_device *pdev) 1737 { 1738 if (pdev->gadget_driver && pdev->gadget_driver->suspend) { 1739 spin_unlock(&pdev->lock); 1740 pdev->gadget_driver->suspend(&pdev->gadget); 1741 spin_lock(&pdev->lock); 1742 } 1743 } 1744 1745 void cdnsp_resume_gadget(struct cdnsp_device *pdev) 1746 { 1747 if (pdev->gadget_driver && pdev->gadget_driver->resume) { 1748 spin_unlock(&pdev->lock); 1749 pdev->gadget_driver->resume(&pdev->gadget); 1750 spin_lock(&pdev->lock); 1751 } 1752 } 1753 1754 void cdnsp_irq_reset(struct cdnsp_device *pdev) 1755 { 1756 struct cdnsp_port_regs __iomem *port_regs; 1757 1758 cdnsp_reset_device(pdev); 1759 1760 port_regs = pdev->active_port->regs; 1761 pdev->gadget.speed = cdnsp_port_speed(readl(port_regs)); 1762 1763 spin_unlock(&pdev->lock); 1764 usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); 1765 spin_lock(&pdev->lock); 1766 1767 switch (pdev->gadget.speed) { 1768 case USB_SPEED_SUPER_PLUS: 1769 case USB_SPEED_SUPER: 1770 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1771 pdev->gadget.ep0->maxpacket = 512; 1772 break; 1773 case USB_SPEED_HIGH: 1774 case USB_SPEED_FULL: 1775 cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 1776 pdev->gadget.ep0->maxpacket = 64; 1777 break; 1778 default: 1779 /* Low speed is not supported. */ 1780 dev_err(pdev->dev, "Unknown device speed\n"); 1781 break; 1782 } 1783 1784 cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); 1785 cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY); 1786 usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); 1787 } 1788 1789 static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) 1790 { 1791 void __iomem *reg = &pdev->cap_regs->hc_capbase; 1792 1793 reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); 1794 pdev->rev_cap = reg; 1795 1796 pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); 1797 1798 dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", 1799 readl(&pdev->rev_cap->ctrl_revision), 1800 readl(&pdev->rev_cap->rtl_revision), 1801 readl(&pdev->rev_cap->ep_supported), 1802 readl(&pdev->rev_cap->rx_buff_size), 1803 readl(&pdev->rev_cap->tx_buff_size)); 1804 } 1805 1806 static int cdnsp_gen_setup(struct cdnsp_device *pdev) 1807 { 1808 int ret; 1809 u32 reg; 1810 1811 pdev->cap_regs = pdev->regs; 1812 pdev->op_regs = pdev->regs + 1813 HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); 1814 pdev->run_regs = pdev->regs + 1815 (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); 1816 1817 /* Cache read-only capability registers */ 1818 pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); 1819 pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); 1820 pdev->hci_version = HC_VERSION(pdev->hcc_params); 1821 pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); 1822 1823 /* 1824 * Override the APB timeout value to give the controller more time for 1825 * enabling UTMI clock and synchronizing APB and UTMI clock domains. 1826 * This fix is platform specific and is required to fixes issue with 1827 * reading incorrect value from PORTSC register after resuming 1828 * from L1 state. 1829 */ 1830 cdnsp_set_apb_timeout_value(pdev); 1831 1832 cdnsp_get_rev_cap(pdev); 1833 1834 /* Make sure the Device Controller is halted. */ 1835 ret = cdnsp_halt(pdev); 1836 if (ret) 1837 return ret; 1838 1839 /* Reset the internal controller memory state and registers. */ 1840 ret = cdnsp_reset(pdev); 1841 if (ret) 1842 return ret; 1843 1844 /* 1845 * Set dma_mask and coherent_dma_mask to 64-bits, 1846 * if controller supports 64-bit addressing. 1847 */ 1848 if (HCC_64BIT_ADDR(pdev->hcc_params) && 1849 !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { 1850 dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n"); 1851 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); 1852 } else { 1853 /* 1854 * This is to avoid error in cases where a 32-bit USB 1855 * controller is used on a 64-bit capable system. 1856 */ 1857 ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); 1858 if (ret) 1859 return ret; 1860 1861 dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n"); 1862 dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); 1863 } 1864 1865 spin_lock_init(&pdev->lock); 1866 1867 ret = cdnsp_mem_init(pdev); 1868 if (ret) 1869 return ret; 1870 1871 /* 1872 * Software workaround for U1: after transition 1873 * to U1 the controller starts gating clock, and in some cases, 1874 * it causes that controller stack. 1875 */ 1876 reg = readl(&pdev->port3x_regs->mode_2); 1877 reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; 1878 writel(reg, &pdev->port3x_regs->mode_2); 1879 1880 return 0; 1881 } 1882 1883 static int __cdnsp_gadget_init(struct cdns *cdns) 1884 { 1885 struct cdnsp_device *pdev; 1886 u32 max_speed; 1887 int ret = -ENOMEM; 1888 1889 cdns_drd_gadget_on(cdns); 1890 1891 pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 1892 if (!pdev) 1893 return -ENOMEM; 1894 1895 pm_runtime_get_sync(cdns->dev); 1896 1897 cdns->gadget_dev = pdev; 1898 pdev->dev = cdns->dev; 1899 pdev->regs = cdns->dev_regs; 1900 max_speed = usb_get_maximum_speed(cdns->dev); 1901 1902 switch (max_speed) { 1903 case USB_SPEED_FULL: 1904 case USB_SPEED_HIGH: 1905 case USB_SPEED_SUPER: 1906 case USB_SPEED_SUPER_PLUS: 1907 break; 1908 default: 1909 dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed); 1910 fallthrough; 1911 case USB_SPEED_UNKNOWN: 1912 /* Default to SSP */ 1913 max_speed = USB_SPEED_SUPER_PLUS; 1914 break; 1915 } 1916 1917 pdev->gadget.ops = &cdnsp_gadget_ops; 1918 pdev->gadget.name = "cdnsp-gadget"; 1919 pdev->gadget.speed = USB_SPEED_UNKNOWN; 1920 pdev->gadget.sg_supported = 1; 1921 pdev->gadget.max_speed = max_speed; 1922 pdev->gadget.lpm_capable = 1; 1923 1924 pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL); 1925 if (!pdev->setup_buf) 1926 goto free_pdev; 1927 1928 /* 1929 * Controller supports not aligned buffer but it should improve 1930 * performance. 1931 */ 1932 pdev->gadget.quirk_ep_out_aligned_size = true; 1933 1934 ret = cdnsp_gen_setup(pdev); 1935 if (ret) { 1936 dev_err(pdev->dev, "Generic initialization failed %d\n", ret); 1937 goto free_setup; 1938 } 1939 1940 ret = cdnsp_gadget_init_endpoints(pdev); 1941 if (ret) { 1942 dev_err(pdev->dev, "failed to initialize endpoints\n"); 1943 goto halt_pdev; 1944 } 1945 1946 ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); 1947 if (ret) { 1948 dev_err(pdev->dev, "failed to register udc\n"); 1949 goto free_endpoints; 1950 } 1951 1952 ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq, 1953 cdnsp_irq_handler, 1954 cdnsp_thread_irq_handler, IRQF_SHARED, 1955 dev_name(pdev->dev), pdev); 1956 if (ret) 1957 goto del_gadget; 1958 1959 return 0; 1960 1961 del_gadget: 1962 usb_del_gadget_udc(&pdev->gadget); 1963 free_endpoints: 1964 cdnsp_gadget_free_endpoints(pdev); 1965 halt_pdev: 1966 cdnsp_halt(pdev); 1967 cdnsp_reset(pdev); 1968 cdnsp_mem_cleanup(pdev); 1969 free_setup: 1970 kfree(pdev->setup_buf); 1971 free_pdev: 1972 kfree(pdev); 1973 1974 return ret; 1975 } 1976 1977 static void cdnsp_gadget_exit(struct cdns *cdns) 1978 { 1979 struct cdnsp_device *pdev = cdns->gadget_dev; 1980 1981 devm_free_irq(pdev->dev, cdns->dev_irq, pdev); 1982 pm_runtime_mark_last_busy(cdns->dev); 1983 pm_runtime_put_autosuspend(cdns->dev); 1984 usb_del_gadget_udc(&pdev->gadget); 1985 cdnsp_gadget_free_endpoints(pdev); 1986 cdnsp_mem_cleanup(pdev); 1987 kfree(pdev); 1988 cdns->gadget_dev = NULL; 1989 cdns_drd_gadget_off(cdns); 1990 } 1991 1992 static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup) 1993 { 1994 struct cdnsp_device *pdev = cdns->gadget_dev; 1995 unsigned long flags; 1996 1997 if (pdev->link_state == XDEV_U3) 1998 return 0; 1999 2000 spin_lock_irqsave(&pdev->lock, flags); 2001 cdnsp_disconnect_gadget(pdev); 2002 cdnsp_stop(pdev); 2003 spin_unlock_irqrestore(&pdev->lock, flags); 2004 2005 return 0; 2006 } 2007 2008 static int cdnsp_gadget_resume(struct cdns *cdns, bool lost_power) 2009 { 2010 struct cdnsp_device *pdev = cdns->gadget_dev; 2011 enum usb_device_speed max_speed; 2012 unsigned long flags; 2013 int ret; 2014 2015 if (!pdev->gadget_driver) 2016 return 0; 2017 2018 spin_lock_irqsave(&pdev->lock, flags); 2019 max_speed = pdev->gadget_driver->max_speed; 2020 2021 /* Limit speed if necessary. */ 2022 max_speed = min(max_speed, pdev->gadget.max_speed); 2023 2024 ret = cdnsp_run(pdev, max_speed); 2025 2026 if (pdev->link_state == XDEV_U3) 2027 __cdnsp_gadget_wakeup(pdev); 2028 2029 spin_unlock_irqrestore(&pdev->lock, flags); 2030 2031 return ret; 2032 } 2033 2034 /** 2035 * cdnsp_gadget_init - initialize device structure 2036 * @cdns: cdnsp instance 2037 * 2038 * This function initializes the gadget. 2039 */ 2040 int cdnsp_gadget_init(struct cdns *cdns) 2041 { 2042 struct cdns_role_driver *rdrv; 2043 2044 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 2045 if (!rdrv) 2046 return -ENOMEM; 2047 2048 rdrv->start = __cdnsp_gadget_init; 2049 rdrv->stop = cdnsp_gadget_exit; 2050 rdrv->suspend = cdnsp_gadget_suspend; 2051 rdrv->resume = cdnsp_gadget_resume; 2052 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 2053 rdrv->name = "gadget"; 2054 cdns->roles[USB_ROLE_DEVICE] = rdrv; 2055 2056 return 0; 2057 } 2058