1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * USB4 specific functionality 4 * 5 * Copyright (C) 2019, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rajmohan Mani <rajmohan.mani@intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/ktime.h> 12 #include <linux/units.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #define USB4_DATA_RETRIES 3 18 #define USB4_DATA_DWORDS 16 19 20 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) 21 #define USB4_NVM_READ_OFFSET_SHIFT 2 22 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) 23 #define USB4_NVM_READ_LENGTH_SHIFT 24 24 25 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK 26 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT 27 28 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) 29 #define USB4_DROM_ADDRESS_SHIFT 2 30 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) 31 #define USB4_DROM_SIZE_SHIFT 15 32 33 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) 34 35 #define USB4_BA_LENGTH_MASK GENMASK(7, 0) 36 #define USB4_BA_INDEX_MASK GENMASK(15, 0) 37 38 enum usb4_ba_index { 39 USB4_BA_MAX_USB3 = 0x1, 40 USB4_BA_MIN_DP_AUX = 0x2, 41 USB4_BA_MIN_DP_MAIN = 0x3, 42 USB4_BA_MAX_PCIE = 0x4, 43 USB4_BA_MAX_HI = 0x5, 44 }; 45 46 #define USB4_BA_VALUE_MASK GENMASK(31, 16) 47 #define USB4_BA_VALUE_SHIFT 16 48 49 /* Delays in us used with usb4_port_wait_for_bit() */ 50 #define USB4_PORT_DELAY 50 51 #define USB4_PORT_SB_DELAY 1000 52 53 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, 54 u32 *metadata, u8 *status, 55 const void *tx_data, size_t tx_dwords, 56 void *rx_data, size_t rx_dwords) 57 { 58 u32 val; 59 int ret; 60 61 if (metadata) { 62 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 63 if (ret) 64 return ret; 65 } 66 if (tx_dwords) { 67 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, 68 tx_dwords); 69 if (ret) 70 return ret; 71 } 72 73 val = opcode | ROUTER_CS_26_OV; 74 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 75 if (ret) 76 return ret; 77 78 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); 79 if (ret) 80 return ret; 81 82 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 83 if (ret) 84 return ret; 85 86 if (val & ROUTER_CS_26_ONS) 87 return -EOPNOTSUPP; 88 89 if (status) 90 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 91 ROUTER_CS_26_STATUS_SHIFT; 92 93 if (metadata) { 94 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 95 if (ret) 96 return ret; 97 } 98 if (rx_dwords) { 99 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, 100 rx_dwords); 101 if (ret) 102 return ret; 103 } 104 105 return 0; 106 } 107 108 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 109 u8 *status, const void *tx_data, size_t tx_dwords, 110 void *rx_data, size_t rx_dwords) 111 { 112 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 113 114 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) 115 return -EINVAL; 116 117 /* 118 * If the connection manager implementation provides USB4 router 119 * operation proxy callback, call it here instead of running the 120 * operation natively. 121 */ 122 if (cm_ops->usb4_switch_op) { 123 int ret; 124 125 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, 126 tx_data, tx_dwords, rx_data, 127 rx_dwords); 128 if (ret != -EOPNOTSUPP) 129 return ret; 130 131 /* 132 * If the proxy was not supported then run the native 133 * router operation instead. 134 */ 135 } 136 137 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, 138 tx_dwords, rx_data, rx_dwords); 139 } 140 141 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, 142 u32 *metadata, u8 *status) 143 { 144 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); 145 } 146 147 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, 148 u32 *metadata, u8 *status, 149 const void *tx_data, size_t tx_dwords, 150 void *rx_data, size_t rx_dwords) 151 { 152 return __usb4_switch_op(sw, opcode, metadata, status, tx_data, 153 tx_dwords, rx_data, rx_dwords); 154 } 155 156 /** 157 * usb4_switch_check_wakes() - Check for wakes and notify PM core about them 158 * @sw: Router whose wakes to check 159 * 160 * Checks wakes occurred during suspend and notify the PM core about them. 161 */ 162 void usb4_switch_check_wakes(struct tb_switch *sw) 163 { 164 bool wakeup_usb4 = false; 165 struct usb4_port *usb4; 166 struct tb_port *port; 167 bool wakeup = false; 168 u32 val; 169 170 if (tb_route(sw)) { 171 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) 172 return; 173 174 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", 175 (val & ROUTER_CS_6_WOPS) ? "yes" : "no", 176 (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); 177 178 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); 179 } 180 181 /* 182 * Check for any downstream ports for USB4 wake, 183 * connection wake and disconnection wake. 184 */ 185 tb_switch_for_each_port(sw, port) { 186 if (!port->cap_usb4) 187 continue; 188 189 if (tb_port_read(port, &val, TB_CFG_PORT, 190 port->cap_usb4 + PORT_CS_18, 1)) 191 break; 192 193 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", 194 (val & PORT_CS_18_WOU4S) ? "yes" : "no", 195 (val & PORT_CS_18_WOCS) ? "yes" : "no", 196 (val & PORT_CS_18_WODS) ? "yes" : "no"); 197 198 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | 199 PORT_CS_18_WODS); 200 201 usb4 = port->usb4; 202 if (device_may_wakeup(&usb4->dev) && wakeup_usb4) 203 pm_wakeup_event(&usb4->dev, 0); 204 205 wakeup |= wakeup_usb4; 206 } 207 208 if (wakeup) 209 pm_wakeup_event(&sw->dev, 0); 210 } 211 212 static bool link_is_usb4(struct tb_port *port) 213 { 214 u32 val; 215 216 if (!port->cap_usb4) 217 return false; 218 219 if (tb_port_read(port, &val, TB_CFG_PORT, 220 port->cap_usb4 + PORT_CS_18, 1)) 221 return false; 222 223 return !(val & PORT_CS_18_TCM); 224 } 225 226 /** 227 * usb4_switch_setup() - Additional setup for USB4 device 228 * @sw: USB4 router to setup 229 * 230 * USB4 routers need additional settings in order to enable all the 231 * tunneling. This function enables USB and PCIe tunneling if it can be 232 * enabled (e.g the parent switch also supports them). If USB tunneling 233 * is not available for some reason (like that there is Thunderbolt 3 234 * switch upstream) then the internal xHCI controller is enabled 235 * instead. 236 * 237 * This does not set the configuration valid bit of the router. To do 238 * that call usb4_switch_configuration_valid(). 239 */ 240 int usb4_switch_setup(struct tb_switch *sw) 241 { 242 struct tb_switch *parent = tb_switch_parent(sw); 243 struct tb_port *down; 244 bool tbt3, xhci; 245 u32 val = 0; 246 int ret; 247 248 if (!tb_route(sw)) 249 return 0; 250 251 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); 252 if (ret) 253 return ret; 254 255 down = tb_switch_downstream_port(sw); 256 sw->link_usb4 = link_is_usb4(down); 257 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 258 259 xhci = val & ROUTER_CS_6_HCI; 260 tbt3 = !(val & ROUTER_CS_6_TNS); 261 262 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", 263 tbt3 ? "yes" : "no", xhci ? "yes" : "no"); 264 265 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 266 if (ret) 267 return ret; 268 269 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && 270 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { 271 val |= ROUTER_CS_5_UTO; 272 xhci = false; 273 } 274 275 /* 276 * Only enable PCIe tunneling if the parent router supports it 277 * and it is not disabled. 278 */ 279 if (tb_acpi_may_tunnel_pcie() && 280 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { 281 val |= ROUTER_CS_5_PTO; 282 /* 283 * xHCI can be enabled if PCIe tunneling is supported 284 * and the parent does not have any USB3 dowstream 285 * adapters (so we cannot do USB 3.x tunneling). 286 */ 287 if (xhci) 288 val |= ROUTER_CS_5_HCO; 289 } 290 291 /* TBT3 supported by the CM */ 292 val &= ~ROUTER_CS_5_CNS; 293 294 return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 295 } 296 297 /** 298 * usb4_switch_configuration_valid() - Set tunneling configuration to be valid 299 * @sw: USB4 router 300 * 301 * Sets configuration valid bit for the router. Must be called before 302 * any tunnels can be set through the router and after 303 * usb4_switch_setup() has been called. Can be called to host and device 304 * routers (does nothing for the latter). 305 * 306 * Returns %0 in success and negative errno otherwise. 307 */ 308 int usb4_switch_configuration_valid(struct tb_switch *sw) 309 { 310 u32 val; 311 int ret; 312 313 if (!tb_route(sw)) 314 return 0; 315 316 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 317 if (ret) 318 return ret; 319 320 val |= ROUTER_CS_5_CV; 321 322 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 323 if (ret) 324 return ret; 325 326 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, 327 ROUTER_CS_6_CR, 50); 328 } 329 330 /** 331 * usb4_switch_read_uid() - Read UID from USB4 router 332 * @sw: USB4 router 333 * @uid: UID is stored here 334 * 335 * Reads 64-bit UID from USB4 router config space. 336 */ 337 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) 338 { 339 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); 340 } 341 342 static int usb4_switch_drom_read_block(void *data, 343 unsigned int dwaddress, void *buf, 344 size_t dwords) 345 { 346 struct tb_switch *sw = data; 347 u8 status = 0; 348 u32 metadata; 349 int ret; 350 351 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; 352 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & 353 USB4_DROM_ADDRESS_MASK; 354 355 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, 356 &status, NULL, 0, buf, dwords); 357 if (ret) 358 return ret; 359 360 return status ? -EIO : 0; 361 } 362 363 /** 364 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM 365 * @sw: USB4 router 366 * @address: Byte address inside DROM to start reading 367 * @buf: Buffer where the DROM content is stored 368 * @size: Number of bytes to read from DROM 369 * 370 * Uses USB4 router operations to read router DROM. For devices this 371 * should always work but for hosts it may return %-EOPNOTSUPP in which 372 * case the host router does not have DROM. 373 */ 374 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 375 size_t size) 376 { 377 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 378 usb4_switch_drom_read_block, sw); 379 } 380 381 /** 382 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding 383 * @sw: USB4 router 384 * 385 * Checks whether conditions are met so that lane bonding can be 386 * established with the upstream router. Call only for device routers. 387 */ 388 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) 389 { 390 struct tb_port *up; 391 int ret; 392 u32 val; 393 394 up = tb_upstream_port(sw); 395 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); 396 if (ret) 397 return false; 398 399 return !!(val & PORT_CS_18_BE); 400 } 401 402 /** 403 * usb4_switch_set_wake() - Enabled/disable wake 404 * @sw: USB4 router 405 * @flags: Wakeup flags (%0 to disable) 406 * 407 * Enables/disables router to wake up from sleep. 408 */ 409 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) 410 { 411 struct usb4_port *usb4; 412 struct tb_port *port; 413 u64 route = tb_route(sw); 414 u32 val; 415 int ret; 416 417 /* 418 * Enable wakes coming from all USB4 downstream ports (from 419 * child routers). For device routers do this also for the 420 * upstream USB4 port. 421 */ 422 tb_switch_for_each_port(sw, port) { 423 if (!tb_port_is_null(port)) 424 continue; 425 if (!route && tb_is_upstream_port(port)) 426 continue; 427 if (!port->cap_usb4) 428 continue; 429 430 ret = tb_port_read(port, &val, TB_CFG_PORT, 431 port->cap_usb4 + PORT_CS_19, 1); 432 if (ret) 433 return ret; 434 435 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); 436 437 if (tb_is_upstream_port(port)) { 438 val |= PORT_CS_19_WOU4; 439 } else { 440 bool configured = val & PORT_CS_19_PC; 441 usb4 = port->usb4; 442 443 if (((flags & TB_WAKE_ON_CONNECT) && 444 device_may_wakeup(&usb4->dev)) && !configured) 445 val |= PORT_CS_19_WOC; 446 if (((flags & TB_WAKE_ON_DISCONNECT) && 447 device_may_wakeup(&usb4->dev)) && configured) 448 val |= PORT_CS_19_WOD; 449 if ((flags & TB_WAKE_ON_USB4) && configured) 450 val |= PORT_CS_19_WOU4; 451 } 452 453 ret = tb_port_write(port, &val, TB_CFG_PORT, 454 port->cap_usb4 + PORT_CS_19, 1); 455 if (ret) 456 return ret; 457 } 458 459 /* 460 * Enable wakes from PCIe, USB 3.x and DP on this router. Only 461 * needed for device routers. 462 */ 463 if (route) { 464 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 465 if (ret) 466 return ret; 467 468 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); 469 if (flags & TB_WAKE_ON_USB3) 470 val |= ROUTER_CS_5_WOU; 471 if (flags & TB_WAKE_ON_PCIE) 472 val |= ROUTER_CS_5_WOP; 473 if (flags & TB_WAKE_ON_DP) 474 val |= ROUTER_CS_5_WOD; 475 476 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 477 if (ret) 478 return ret; 479 } 480 481 return 0; 482 } 483 484 /** 485 * usb4_switch_set_sleep() - Prepare the router to enter sleep 486 * @sw: USB4 router 487 * 488 * Sets sleep bit for the router. Returns when the router sleep ready 489 * bit has been asserted. 490 */ 491 int usb4_switch_set_sleep(struct tb_switch *sw) 492 { 493 int ret; 494 u32 val; 495 496 /* Set sleep bit and wait for sleep ready to be asserted */ 497 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 498 if (ret) 499 return ret; 500 501 val |= ROUTER_CS_5_SLP; 502 503 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 504 if (ret) 505 return ret; 506 507 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, 508 ROUTER_CS_6_SLPR, 500); 509 } 510 511 /** 512 * usb4_switch_nvm_sector_size() - Return router NVM sector size 513 * @sw: USB4 router 514 * 515 * If the router supports NVM operations this function returns the NVM 516 * sector size in bytes. If NVM operations are not supported returns 517 * %-EOPNOTSUPP. 518 */ 519 int usb4_switch_nvm_sector_size(struct tb_switch *sw) 520 { 521 u32 metadata; 522 u8 status; 523 int ret; 524 525 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, 526 &status); 527 if (ret) 528 return ret; 529 530 if (status) 531 return status == 0x2 ? -EOPNOTSUPP : -EIO; 532 533 return metadata & USB4_NVM_SECTOR_SIZE_MASK; 534 } 535 536 static int usb4_switch_nvm_read_block(void *data, 537 unsigned int dwaddress, void *buf, size_t dwords) 538 { 539 struct tb_switch *sw = data; 540 u8 status = 0; 541 u32 metadata; 542 int ret; 543 544 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & 545 USB4_NVM_READ_LENGTH_MASK; 546 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & 547 USB4_NVM_READ_OFFSET_MASK; 548 549 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, 550 &status, NULL, 0, buf, dwords); 551 if (ret) 552 return ret; 553 554 return status ? -EIO : 0; 555 } 556 557 /** 558 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM 559 * @sw: USB4 router 560 * @address: Starting address in bytes 561 * @buf: Read data is placed here 562 * @size: How many bytes to read 563 * 564 * Reads NVM contents of the router. If NVM is not supported returns 565 * %-EOPNOTSUPP. 566 */ 567 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 568 size_t size) 569 { 570 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 571 usb4_switch_nvm_read_block, sw); 572 } 573 574 /** 575 * usb4_switch_nvm_set_offset() - Set NVM write offset 576 * @sw: USB4 router 577 * @address: Start offset 578 * 579 * Explicitly sets NVM write offset. Normally when writing to NVM this 580 * is done automatically by usb4_switch_nvm_write(). 581 * 582 * Returns %0 in success and negative errno if there was a failure. 583 */ 584 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) 585 { 586 u32 metadata, dwaddress; 587 u8 status = 0; 588 int ret; 589 590 dwaddress = address / 4; 591 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 592 USB4_NVM_SET_OFFSET_MASK; 593 594 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, 595 &status); 596 if (ret) 597 return ret; 598 599 return status ? -EIO : 0; 600 } 601 602 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, 603 const void *buf, size_t dwords) 604 { 605 struct tb_switch *sw = data; 606 u8 status; 607 int ret; 608 609 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, 610 buf, dwords, NULL, 0); 611 if (ret) 612 return ret; 613 614 return status ? -EIO : 0; 615 } 616 617 /** 618 * usb4_switch_nvm_write() - Write to the router NVM 619 * @sw: USB4 router 620 * @address: Start address where to write in bytes 621 * @buf: Pointer to the data to write 622 * @size: Size of @buf in bytes 623 * 624 * Writes @buf to the router NVM using USB4 router operations. If NVM 625 * write is not supported returns %-EOPNOTSUPP. 626 */ 627 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, 628 const void *buf, size_t size) 629 { 630 int ret; 631 632 ret = usb4_switch_nvm_set_offset(sw, address); 633 if (ret) 634 return ret; 635 636 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 637 usb4_switch_nvm_write_next_block, sw); 638 } 639 640 /** 641 * usb4_switch_nvm_authenticate() - Authenticate new NVM 642 * @sw: USB4 router 643 * 644 * After the new NVM has been written via usb4_switch_nvm_write(), this 645 * function triggers NVM authentication process. The router gets power 646 * cycled and if the authentication is successful the new NVM starts 647 * running. In case of failure returns negative errno. 648 * 649 * The caller should call usb4_switch_nvm_authenticate_status() to read 650 * the status of the authentication after power cycle. It should be the 651 * first router operation to avoid the status being lost. 652 */ 653 int usb4_switch_nvm_authenticate(struct tb_switch *sw) 654 { 655 int ret; 656 657 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); 658 switch (ret) { 659 /* 660 * The router is power cycled once NVM_AUTH is started so it is 661 * expected to get any of the following errors back. 662 */ 663 case -EACCES: 664 case -ENOTCONN: 665 case -ETIMEDOUT: 666 return 0; 667 668 default: 669 return ret; 670 } 671 } 672 673 /** 674 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate 675 * @sw: USB4 router 676 * @status: Status code of the operation 677 * 678 * The function checks if there is status available from the last NVM 679 * authenticate router operation. If there is status then %0 is returned 680 * and the status code is placed in @status. Returns negative errno in case 681 * of failure. 682 * 683 * Must be called before any other router operation. 684 */ 685 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) 686 { 687 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 688 u16 opcode; 689 u32 val; 690 int ret; 691 692 if (cm_ops->usb4_switch_nvm_authenticate_status) { 693 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); 694 if (ret != -EOPNOTSUPP) 695 return ret; 696 } 697 698 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 699 if (ret) 700 return ret; 701 702 /* Check that the opcode is correct */ 703 opcode = val & ROUTER_CS_26_OPCODE_MASK; 704 if (opcode == USB4_SWITCH_OP_NVM_AUTH) { 705 if (val & ROUTER_CS_26_OV) 706 return -EBUSY; 707 if (val & ROUTER_CS_26_ONS) 708 return -EOPNOTSUPP; 709 710 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 711 ROUTER_CS_26_STATUS_SHIFT; 712 } else { 713 *status = 0; 714 } 715 716 return 0; 717 } 718 719 /** 720 * usb4_switch_credits_init() - Read buffer allocation parameters 721 * @sw: USB4 router 722 * 723 * Reads @sw buffer allocation parameters and initializes @sw buffer 724 * allocation fields accordingly. Specifically @sw->credits_allocation 725 * is set to %true if these parameters can be used in tunneling. 726 * 727 * Returns %0 on success and negative errno otherwise. 728 */ 729 int usb4_switch_credits_init(struct tb_switch *sw) 730 { 731 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 732 int ret, length, i, nports; 733 const struct tb_port *port; 734 u32 data[USB4_DATA_DWORDS]; 735 u32 metadata = 0; 736 u8 status = 0; 737 738 memset(data, 0, sizeof(data)); 739 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, 740 &status, NULL, 0, data, ARRAY_SIZE(data)); 741 if (ret) 742 return ret; 743 if (status) 744 return -EIO; 745 746 length = metadata & USB4_BA_LENGTH_MASK; 747 if (WARN_ON(length > ARRAY_SIZE(data))) 748 return -EMSGSIZE; 749 750 max_usb3 = -1; 751 min_dp_aux = -1; 752 min_dp_main = -1; 753 max_pcie = -1; 754 max_dma = -1; 755 756 tb_sw_dbg(sw, "credit allocation parameters:\n"); 757 758 for (i = 0; i < length; i++) { 759 u16 index, value; 760 761 index = data[i] & USB4_BA_INDEX_MASK; 762 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; 763 764 switch (index) { 765 case USB4_BA_MAX_USB3: 766 tb_sw_dbg(sw, " USB3: %u\n", value); 767 max_usb3 = value; 768 break; 769 case USB4_BA_MIN_DP_AUX: 770 tb_sw_dbg(sw, " DP AUX: %u\n", value); 771 min_dp_aux = value; 772 break; 773 case USB4_BA_MIN_DP_MAIN: 774 tb_sw_dbg(sw, " DP main: %u\n", value); 775 min_dp_main = value; 776 break; 777 case USB4_BA_MAX_PCIE: 778 tb_sw_dbg(sw, " PCIe: %u\n", value); 779 max_pcie = value; 780 break; 781 case USB4_BA_MAX_HI: 782 tb_sw_dbg(sw, " DMA: %u\n", value); 783 max_dma = value; 784 break; 785 default: 786 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", 787 index); 788 break; 789 } 790 } 791 792 /* 793 * Validate the buffer allocation preferences. If we find 794 * issues, log a warning and fall back using the hard-coded 795 * values. 796 */ 797 798 /* Host router must report baMaxHI */ 799 if (!tb_route(sw) && max_dma < 0) { 800 tb_sw_warn(sw, "host router is missing baMaxHI\n"); 801 goto err_invalid; 802 } 803 804 nports = 0; 805 tb_switch_for_each_port(sw, port) { 806 if (tb_port_is_null(port)) 807 nports++; 808 } 809 810 /* Must have DP buffer allocation (multiple USB4 ports) */ 811 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { 812 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); 813 goto err_invalid; 814 } 815 816 tb_switch_for_each_port(sw, port) { 817 if (tb_port_is_dpout(port) && min_dp_main < 0) { 818 tb_sw_warn(sw, "missing baMinDPmain"); 819 goto err_invalid; 820 } 821 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && 822 min_dp_aux < 0) { 823 tb_sw_warn(sw, "missing baMinDPaux"); 824 goto err_invalid; 825 } 826 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && 827 max_usb3 < 0) { 828 tb_sw_warn(sw, "missing baMaxUSB3"); 829 goto err_invalid; 830 } 831 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && 832 max_pcie < 0) { 833 tb_sw_warn(sw, "missing baMaxPCIe"); 834 goto err_invalid; 835 } 836 } 837 838 /* 839 * Buffer allocation passed the validation so we can use it in 840 * path creation. 841 */ 842 sw->credit_allocation = true; 843 if (max_usb3 > 0) 844 sw->max_usb3_credits = max_usb3; 845 if (min_dp_aux > 0) 846 sw->min_dp_aux_credits = min_dp_aux; 847 if (min_dp_main > 0) 848 sw->min_dp_main_credits = min_dp_main; 849 if (max_pcie > 0) 850 sw->max_pcie_credits = max_pcie; 851 if (max_dma > 0) 852 sw->max_dma_credits = max_dma; 853 854 return 0; 855 856 err_invalid: 857 return -EINVAL; 858 } 859 860 /** 861 * usb4_switch_query_dp_resource() - Query availability of DP IN resource 862 * @sw: USB4 router 863 * @in: DP IN adapter 864 * 865 * For DP tunneling this function can be used to query availability of 866 * DP IN resource. Returns true if the resource is available for DP 867 * tunneling, false otherwise. 868 */ 869 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 870 { 871 u32 metadata = in->port; 872 u8 status; 873 int ret; 874 875 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, 876 &status); 877 /* 878 * If DP resource allocation is not supported assume it is 879 * always available. 880 */ 881 if (ret == -EOPNOTSUPP) 882 return true; 883 if (ret) 884 return false; 885 886 return !status; 887 } 888 889 /** 890 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource 891 * @sw: USB4 router 892 * @in: DP IN adapter 893 * 894 * Allocates DP IN resource for DP tunneling using USB4 router 895 * operations. If the resource was allocated returns %0. Otherwise 896 * returns negative errno, in particular %-EBUSY if the resource is 897 * already allocated. 898 */ 899 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 900 { 901 u32 metadata = in->port; 902 u8 status; 903 int ret; 904 905 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, 906 &status); 907 if (ret == -EOPNOTSUPP) 908 return 0; 909 if (ret) 910 return ret; 911 912 return status ? -EBUSY : 0; 913 } 914 915 /** 916 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource 917 * @sw: USB4 router 918 * @in: DP IN adapter 919 * 920 * Releases the previously allocated DP IN resource. 921 */ 922 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 923 { 924 u32 metadata = in->port; 925 u8 status; 926 int ret; 927 928 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, 929 &status); 930 if (ret == -EOPNOTSUPP) 931 return 0; 932 if (ret) 933 return ret; 934 935 return status ? -EIO : 0; 936 } 937 938 /** 939 * usb4_port_index() - Finds matching USB4 port index 940 * @sw: USB4 router 941 * @port: USB4 protocol or lane adapter 942 * 943 * Finds matching USB4 port index (starting from %0) that given @port goes 944 * through. 945 */ 946 int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port) 947 { 948 struct tb_port *p; 949 int usb4_idx = 0; 950 951 /* Assume port is primary */ 952 tb_switch_for_each_port(sw, p) { 953 if (!tb_port_is_null(p)) 954 continue; 955 if (tb_is_upstream_port(p)) 956 continue; 957 if (!p->link_nr) { 958 if (p == port) 959 break; 960 usb4_idx++; 961 } 962 } 963 964 return usb4_idx; 965 } 966 967 /** 968 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter 969 * @sw: USB4 router 970 * @port: USB4 port 971 * 972 * USB4 routers have direct mapping between USB4 ports and PCIe 973 * downstream adapters where the PCIe topology is extended. This 974 * function returns the corresponding downstream PCIe adapter or %NULL 975 * if no such mapping was possible. 976 */ 977 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, 978 const struct tb_port *port) 979 { 980 int usb4_idx = usb4_port_index(sw, port); 981 struct tb_port *p; 982 int pcie_idx = 0; 983 984 /* Find PCIe down port matching usb4_port */ 985 tb_switch_for_each_port(sw, p) { 986 if (!tb_port_is_pcie_down(p)) 987 continue; 988 989 if (pcie_idx == usb4_idx) 990 return p; 991 992 pcie_idx++; 993 } 994 995 return NULL; 996 } 997 998 /** 999 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter 1000 * @sw: USB4 router 1001 * @port: USB4 port 1002 * 1003 * USB4 routers have direct mapping between USB4 ports and USB 3.x 1004 * downstream adapters where the USB 3.x topology is extended. This 1005 * function returns the corresponding downstream USB 3.x adapter or 1006 * %NULL if no such mapping was possible. 1007 */ 1008 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, 1009 const struct tb_port *port) 1010 { 1011 int usb4_idx = usb4_port_index(sw, port); 1012 struct tb_port *p; 1013 int usb_idx = 0; 1014 1015 /* Find USB3 down port matching usb4_port */ 1016 tb_switch_for_each_port(sw, p) { 1017 if (!tb_port_is_usb3_down(p)) 1018 continue; 1019 1020 if (usb_idx == usb4_idx) 1021 return p; 1022 1023 usb_idx++; 1024 } 1025 1026 return NULL; 1027 } 1028 1029 /** 1030 * usb4_switch_add_ports() - Add USB4 ports for this router 1031 * @sw: USB4 router 1032 * 1033 * For USB4 router finds all USB4 ports and registers devices for each. 1034 * Can be called to any router. 1035 * 1036 * Return %0 in case of success and negative errno in case of failure. 1037 */ 1038 int usb4_switch_add_ports(struct tb_switch *sw) 1039 { 1040 struct tb_port *port; 1041 1042 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) 1043 return 0; 1044 1045 tb_switch_for_each_port(sw, port) { 1046 struct usb4_port *usb4; 1047 1048 if (!tb_port_is_null(port)) 1049 continue; 1050 if (!port->cap_usb4) 1051 continue; 1052 1053 usb4 = usb4_port_device_add(port); 1054 if (IS_ERR(usb4)) { 1055 usb4_switch_remove_ports(sw); 1056 return PTR_ERR(usb4); 1057 } 1058 1059 port->usb4 = usb4; 1060 } 1061 1062 return 0; 1063 } 1064 1065 /** 1066 * usb4_switch_remove_ports() - Removes USB4 ports from this router 1067 * @sw: USB4 router 1068 * 1069 * Unregisters previously registered USB4 ports. 1070 */ 1071 void usb4_switch_remove_ports(struct tb_switch *sw) 1072 { 1073 struct tb_port *port; 1074 1075 tb_switch_for_each_port(sw, port) { 1076 if (port->usb4) { 1077 usb4_port_device_remove(port->usb4); 1078 port->usb4 = NULL; 1079 } 1080 } 1081 } 1082 1083 /** 1084 * usb4_port_unlock() - Unlock USB4 downstream port 1085 * @port: USB4 port to unlock 1086 * 1087 * Unlocks USB4 downstream port so that the connection manager can 1088 * access the router below this port. 1089 */ 1090 int usb4_port_unlock(struct tb_port *port) 1091 { 1092 int ret; 1093 u32 val; 1094 1095 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1096 if (ret) 1097 return ret; 1098 1099 val &= ~ADP_CS_4_LCK; 1100 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1101 } 1102 1103 /** 1104 * usb4_port_hotplug_enable() - Enables hotplug for a port 1105 * @port: USB4 port to operate on 1106 * 1107 * Enables hot plug events on a given port. This is only intended 1108 * to be used on lane, DP-IN, and DP-OUT adapters. 1109 */ 1110 int usb4_port_hotplug_enable(struct tb_port *port) 1111 { 1112 int ret; 1113 u32 val; 1114 1115 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1116 if (ret) 1117 return ret; 1118 1119 val &= ~ADP_CS_5_DHP; 1120 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1121 } 1122 1123 /** 1124 * usb4_port_reset() - Issue downstream port reset 1125 * @port: USB4 port to reset 1126 * 1127 * Issues downstream port reset to @port. 1128 */ 1129 int usb4_port_reset(struct tb_port *port) 1130 { 1131 int ret; 1132 u32 val; 1133 1134 if (!port->cap_usb4) 1135 return -EINVAL; 1136 1137 ret = tb_port_read(port, &val, TB_CFG_PORT, 1138 port->cap_usb4 + PORT_CS_19, 1); 1139 if (ret) 1140 return ret; 1141 1142 val |= PORT_CS_19_DPR; 1143 1144 ret = tb_port_write(port, &val, TB_CFG_PORT, 1145 port->cap_usb4 + PORT_CS_19, 1); 1146 if (ret) 1147 return ret; 1148 1149 fsleep(10000); 1150 1151 ret = tb_port_read(port, &val, TB_CFG_PORT, 1152 port->cap_usb4 + PORT_CS_19, 1); 1153 if (ret) 1154 return ret; 1155 1156 val &= ~PORT_CS_19_DPR; 1157 1158 return tb_port_write(port, &val, TB_CFG_PORT, 1159 port->cap_usb4 + PORT_CS_19, 1); 1160 } 1161 1162 static int usb4_port_set_configured(struct tb_port *port, bool configured) 1163 { 1164 int ret; 1165 u32 val; 1166 1167 if (!port->cap_usb4) 1168 return -EINVAL; 1169 1170 ret = tb_port_read(port, &val, TB_CFG_PORT, 1171 port->cap_usb4 + PORT_CS_19, 1); 1172 if (ret) 1173 return ret; 1174 1175 if (configured) 1176 val |= PORT_CS_19_PC; 1177 else 1178 val &= ~PORT_CS_19_PC; 1179 1180 return tb_port_write(port, &val, TB_CFG_PORT, 1181 port->cap_usb4 + PORT_CS_19, 1); 1182 } 1183 1184 /** 1185 * usb4_port_configure() - Set USB4 port configured 1186 * @port: USB4 router 1187 * 1188 * Sets the USB4 link to be configured for power management purposes. 1189 */ 1190 int usb4_port_configure(struct tb_port *port) 1191 { 1192 return usb4_port_set_configured(port, true); 1193 } 1194 1195 /** 1196 * usb4_port_unconfigure() - Set USB4 port unconfigured 1197 * @port: USB4 router 1198 * 1199 * Sets the USB4 link to be unconfigured for power management purposes. 1200 */ 1201 void usb4_port_unconfigure(struct tb_port *port) 1202 { 1203 usb4_port_set_configured(port, false); 1204 } 1205 1206 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) 1207 { 1208 int ret; 1209 u32 val; 1210 1211 if (!port->cap_usb4) 1212 return -EINVAL; 1213 1214 ret = tb_port_read(port, &val, TB_CFG_PORT, 1215 port->cap_usb4 + PORT_CS_19, 1); 1216 if (ret) 1217 return ret; 1218 1219 if (configured) 1220 val |= PORT_CS_19_PID; 1221 else 1222 val &= ~PORT_CS_19_PID; 1223 1224 return tb_port_write(port, &val, TB_CFG_PORT, 1225 port->cap_usb4 + PORT_CS_19, 1); 1226 } 1227 1228 /** 1229 * usb4_port_configure_xdomain() - Configure port for XDomain 1230 * @port: USB4 port connected to another host 1231 * @xd: XDomain that is connected to the port 1232 * 1233 * Marks the USB4 port as being connected to another host and updates 1234 * the link type. Returns %0 in success and negative errno in failure. 1235 */ 1236 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) 1237 { 1238 xd->link_usb4 = link_is_usb4(port); 1239 return usb4_set_xdomain_configured(port, true); 1240 } 1241 1242 /** 1243 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain 1244 * @port: USB4 port that was connected to another host 1245 * 1246 * Clears USB4 port from being marked as XDomain. 1247 */ 1248 void usb4_port_unconfigure_xdomain(struct tb_port *port) 1249 { 1250 usb4_set_xdomain_configured(port, false); 1251 } 1252 1253 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, 1254 u32 value, int timeout_msec, unsigned long delay_usec) 1255 { 1256 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1257 1258 do { 1259 u32 val; 1260 int ret; 1261 1262 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); 1263 if (ret) 1264 return ret; 1265 1266 if ((val & bit) == value) 1267 return 0; 1268 1269 fsleep(delay_usec); 1270 } while (ktime_before(ktime_get(), timeout)); 1271 1272 return -ETIMEDOUT; 1273 } 1274 1275 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1276 { 1277 if (dwords > USB4_DATA_DWORDS) 1278 return -EINVAL; 1279 1280 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1281 dwords); 1282 } 1283 1284 static int usb4_port_write_data(struct tb_port *port, const void *data, 1285 size_t dwords) 1286 { 1287 if (dwords > USB4_DATA_DWORDS) 1288 return -EINVAL; 1289 1290 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1291 dwords); 1292 } 1293 1294 /** 1295 * usb4_port_sb_read() - Read from sideband register 1296 * @port: USB4 port to read 1297 * @target: Sideband target 1298 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1299 * @reg: Sideband register index 1300 * @buf: Buffer where the sideband data is copied 1301 * @size: Size of @buf 1302 * 1303 * Reads data from sideband register @reg and copies it into @buf. 1304 * Returns %0 in case of success and negative errno in case of failure. 1305 */ 1306 int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index, 1307 u8 reg, void *buf, u8 size) 1308 { 1309 size_t dwords = DIV_ROUND_UP(size, 4); 1310 int ret; 1311 u32 val; 1312 1313 if (!port->cap_usb4) 1314 return -EINVAL; 1315 1316 val = reg; 1317 val |= size << PORT_CS_1_LENGTH_SHIFT; 1318 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1319 if (target == USB4_SB_TARGET_RETIMER) 1320 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1321 val |= PORT_CS_1_PND; 1322 1323 ret = tb_port_write(port, &val, TB_CFG_PORT, 1324 port->cap_usb4 + PORT_CS_1, 1); 1325 if (ret) 1326 return ret; 1327 1328 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1329 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY); 1330 if (ret) 1331 return ret; 1332 1333 ret = tb_port_read(port, &val, TB_CFG_PORT, 1334 port->cap_usb4 + PORT_CS_1, 1); 1335 if (ret) 1336 return ret; 1337 1338 if (val & PORT_CS_1_NR) 1339 return -ENODEV; 1340 if (val & PORT_CS_1_RC) 1341 return -EIO; 1342 1343 return buf ? usb4_port_read_data(port, buf, dwords) : 0; 1344 } 1345 1346 /** 1347 * usb4_port_sb_write() - Write to sideband register 1348 * @port: USB4 port to write 1349 * @target: Sideband target 1350 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1351 * @reg: Sideband register index 1352 * @buf: Data to write 1353 * @size: Size of @buf 1354 * 1355 * Writes @buf to sideband register @reg. Returns %0 in case of success 1356 * and negative errno in case of failure. 1357 */ 1358 int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, 1359 u8 index, u8 reg, const void *buf, u8 size) 1360 { 1361 size_t dwords = DIV_ROUND_UP(size, 4); 1362 int ret; 1363 u32 val; 1364 1365 if (!port->cap_usb4) 1366 return -EINVAL; 1367 1368 if (buf) { 1369 ret = usb4_port_write_data(port, buf, dwords); 1370 if (ret) 1371 return ret; 1372 } 1373 1374 val = reg; 1375 val |= size << PORT_CS_1_LENGTH_SHIFT; 1376 val |= PORT_CS_1_WNR_WRITE; 1377 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1378 if (target == USB4_SB_TARGET_RETIMER) 1379 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1380 val |= PORT_CS_1_PND; 1381 1382 ret = tb_port_write(port, &val, TB_CFG_PORT, 1383 port->cap_usb4 + PORT_CS_1, 1); 1384 if (ret) 1385 return ret; 1386 1387 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1388 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY); 1389 if (ret) 1390 return ret; 1391 1392 ret = tb_port_read(port, &val, TB_CFG_PORT, 1393 port->cap_usb4 + PORT_CS_1, 1); 1394 if (ret) 1395 return ret; 1396 1397 if (val & PORT_CS_1_NR) 1398 return -ENODEV; 1399 if (val & PORT_CS_1_RC) 1400 return -EIO; 1401 1402 return 0; 1403 } 1404 1405 static int usb4_port_sb_opcode_err_to_errno(u32 val) 1406 { 1407 switch (val) { 1408 case 0: 1409 return 0; 1410 case USB4_SB_OPCODE_ERR: 1411 return -EAGAIN; 1412 case USB4_SB_OPCODE_ONS: 1413 return -EOPNOTSUPP; 1414 default: 1415 return -EIO; 1416 } 1417 } 1418 1419 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, 1420 u8 index, enum usb4_sb_opcode opcode, int timeout_msec) 1421 { 1422 ktime_t timeout; 1423 u32 val; 1424 int ret; 1425 1426 val = opcode; 1427 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, 1428 sizeof(val)); 1429 if (ret) 1430 return ret; 1431 1432 timeout = ktime_add_ms(ktime_get(), timeout_msec); 1433 1434 do { 1435 /* Check results */ 1436 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, 1437 &val, sizeof(val)); 1438 if (ret) 1439 return ret; 1440 1441 if (val != opcode) 1442 return usb4_port_sb_opcode_err_to_errno(val); 1443 1444 fsleep(USB4_PORT_SB_DELAY); 1445 } while (ktime_before(ktime_get(), timeout)); 1446 1447 return -ETIMEDOUT; 1448 } 1449 1450 static int usb4_port_set_router_offline(struct tb_port *port, bool offline) 1451 { 1452 u32 val = !offline; 1453 int ret; 1454 1455 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1456 USB4_SB_METADATA, &val, sizeof(val)); 1457 if (ret) 1458 return ret; 1459 1460 val = USB4_SB_OPCODE_ROUTER_OFFLINE; 1461 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1462 USB4_SB_OPCODE, &val, sizeof(val)); 1463 } 1464 1465 /** 1466 * usb4_port_router_offline() - Put the USB4 port to offline mode 1467 * @port: USB4 port 1468 * 1469 * This function puts the USB4 port into offline mode. In this mode the 1470 * port does not react on hotplug events anymore. This needs to be 1471 * called before retimer access is done when the USB4 links is not up. 1472 * 1473 * Returns %0 in case of success and negative errno if there was an 1474 * error. 1475 */ 1476 int usb4_port_router_offline(struct tb_port *port) 1477 { 1478 return usb4_port_set_router_offline(port, true); 1479 } 1480 1481 /** 1482 * usb4_port_router_online() - Put the USB4 port back to online 1483 * @port: USB4 port 1484 * 1485 * Makes the USB4 port functional again. 1486 */ 1487 int usb4_port_router_online(struct tb_port *port) 1488 { 1489 return usb4_port_set_router_offline(port, false); 1490 } 1491 1492 /** 1493 * usb4_port_enumerate_retimers() - Send RT broadcast transaction 1494 * @port: USB4 port 1495 * 1496 * This forces the USB4 port to send broadcast RT transaction which 1497 * makes the retimers on the link to assign index to themselves. Returns 1498 * %0 in case of success and negative errno if there was an error. 1499 */ 1500 int usb4_port_enumerate_retimers(struct tb_port *port) 1501 { 1502 u32 val; 1503 1504 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; 1505 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1506 USB4_SB_OPCODE, &val, sizeof(val)); 1507 } 1508 1509 /** 1510 * usb4_port_clx_supported() - Check if CLx is supported by the link 1511 * @port: Port to check for CLx support for 1512 * 1513 * PORT_CS_18_CPS bit reflects if the link supports CLx including 1514 * active cables (if connected on the link). 1515 */ 1516 bool usb4_port_clx_supported(struct tb_port *port) 1517 { 1518 int ret; 1519 u32 val; 1520 1521 ret = tb_port_read(port, &val, TB_CFG_PORT, 1522 port->cap_usb4 + PORT_CS_18, 1); 1523 if (ret) 1524 return false; 1525 1526 return !!(val & PORT_CS_18_CPS); 1527 } 1528 1529 /** 1530 * usb4_port_asym_supported() - If the port supports asymmetric link 1531 * @port: USB4 port 1532 * 1533 * Checks if the port and the cable supports asymmetric link and returns 1534 * %true in that case. 1535 */ 1536 bool usb4_port_asym_supported(struct tb_port *port) 1537 { 1538 u32 val; 1539 1540 if (!port->cap_usb4) 1541 return false; 1542 1543 if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1)) 1544 return false; 1545 1546 return !!(val & PORT_CS_18_CSA); 1547 } 1548 1549 /** 1550 * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric 1551 * @port: USB4 port 1552 * @width: Asymmetric width to configure 1553 * 1554 * Sets USB4 port link width to @width. Can be called for widths where 1555 * usb4_port_asym_width_supported() returned @true. 1556 */ 1557 int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width) 1558 { 1559 u32 val; 1560 int ret; 1561 1562 if (!port->cap_phy) 1563 return -EINVAL; 1564 1565 ret = tb_port_read(port, &val, TB_CFG_PORT, 1566 port->cap_phy + LANE_ADP_CS_1, 1); 1567 if (ret) 1568 return ret; 1569 1570 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK; 1571 switch (width) { 1572 case TB_LINK_WIDTH_DUAL: 1573 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1574 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL); 1575 break; 1576 case TB_LINK_WIDTH_ASYM_TX: 1577 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1578 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX); 1579 break; 1580 case TB_LINK_WIDTH_ASYM_RX: 1581 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1582 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX); 1583 break; 1584 default: 1585 return -EINVAL; 1586 } 1587 1588 return tb_port_write(port, &val, TB_CFG_PORT, 1589 port->cap_phy + LANE_ADP_CS_1, 1); 1590 } 1591 1592 /** 1593 * usb4_port_asym_start() - Start symmetry change and wait for completion 1594 * @port: USB4 port 1595 * 1596 * Start symmetry change of the link to asymmetric or symmetric 1597 * (according to what was previously set in tb_port_set_link_width(). 1598 * Wait for completion of the change. 1599 * 1600 * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or 1601 * a negative errno in case of a failure. 1602 */ 1603 int usb4_port_asym_start(struct tb_port *port) 1604 { 1605 int ret; 1606 u32 val; 1607 1608 ret = tb_port_read(port, &val, TB_CFG_PORT, 1609 port->cap_usb4 + PORT_CS_19, 1); 1610 if (ret) 1611 return ret; 1612 1613 val &= ~PORT_CS_19_START_ASYM; 1614 val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1); 1615 1616 ret = tb_port_write(port, &val, TB_CFG_PORT, 1617 port->cap_usb4 + PORT_CS_19, 1); 1618 if (ret) 1619 return ret; 1620 1621 /* 1622 * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4 1623 * port started the symmetry transition. 1624 */ 1625 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19, 1626 PORT_CS_19_START_ASYM, 0, 1000, 1627 USB4_PORT_DELAY); 1628 if (ret) 1629 return ret; 1630 1631 /* Then wait for the transtion to be completed */ 1632 return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18, 1633 PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY); 1634 } 1635 1636 /** 1637 * usb4_port_margining_caps() - Read USB4 port marginig capabilities 1638 * @port: USB4 port 1639 * @target: Sideband target 1640 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1641 * @caps: Array with at least two elements to hold the results 1642 * @ncaps: Number of elements in the caps array 1643 * 1644 * Reads the USB4 port lane margining capabilities into @caps. 1645 */ 1646 int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, 1647 u8 index, u32 *caps, size_t ncaps) 1648 { 1649 int ret; 1650 1651 ret = usb4_port_sb_op(port, target, index, 1652 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); 1653 if (ret) 1654 return ret; 1655 1656 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps, 1657 sizeof(*caps) * ncaps); 1658 } 1659 1660 /** 1661 * usb4_port_hw_margin() - Run hardware lane margining on port 1662 * @port: USB4 port 1663 * @target: Sideband target 1664 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1665 * @params: Parameters for USB4 hardware margining 1666 * @results: Array to hold the results 1667 * @nresults: Number of elements in the results array 1668 * 1669 * Runs hardware lane margining on USB4 port and returns the result in 1670 * @results. 1671 */ 1672 int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, 1673 u8 index, const struct usb4_port_margining_params *params, 1674 u32 *results, size_t nresults) 1675 { 1676 u32 val; 1677 int ret; 1678 1679 if (WARN_ON_ONCE(!params)) 1680 return -EINVAL; 1681 1682 val = params->lanes; 1683 if (params->time) 1684 val |= USB4_MARGIN_HW_TIME; 1685 if (params->right_high || params->upper_eye) 1686 val |= USB4_MARGIN_HW_RHU; 1687 if (params->ber_level) 1688 val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level); 1689 if (params->optional_voltage_offset_range) 1690 val |= USB4_MARGIN_HW_OPT_VOLTAGE; 1691 1692 ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, 1693 sizeof(val)); 1694 if (ret) 1695 return ret; 1696 1697 ret = usb4_port_sb_op(port, target, index, 1698 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); 1699 if (ret) 1700 return ret; 1701 1702 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results, 1703 sizeof(*results) * nresults); 1704 } 1705 1706 /** 1707 * usb4_port_sw_margin() - Run software lane margining on port 1708 * @port: USB4 port 1709 * @target: Sideband target 1710 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1711 * @params: Parameters for USB4 software margining 1712 * @results: Data word for the operation completion data 1713 * 1714 * Runs software lane margining on USB4 port. Read back the error 1715 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in 1716 * success and negative errno otherwise. 1717 */ 1718 int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, 1719 u8 index, const struct usb4_port_margining_params *params, 1720 u32 *results) 1721 { 1722 u32 val; 1723 int ret; 1724 1725 if (WARN_ON_ONCE(!params)) 1726 return -EINVAL; 1727 1728 val = params->lanes; 1729 if (params->time) 1730 val |= USB4_MARGIN_SW_TIME; 1731 if (params->optional_voltage_offset_range) 1732 val |= USB4_MARGIN_SW_OPT_VOLTAGE; 1733 if (params->right_high) 1734 val |= USB4_MARGIN_SW_RH; 1735 if (params->upper_eye) 1736 val |= USB4_MARGIN_SW_UPPER_EYE; 1737 val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter); 1738 val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset); 1739 1740 ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, 1741 sizeof(val)); 1742 if (ret) 1743 return ret; 1744 1745 ret = usb4_port_sb_op(port, target, index, 1746 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); 1747 if (ret) 1748 return ret; 1749 1750 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results, 1751 sizeof(*results)); 1752 1753 } 1754 1755 /** 1756 * usb4_port_sw_margin_errors() - Read the software margining error counters 1757 * @port: USB4 port 1758 * @target: Sideband target 1759 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER 1760 * @errors: Error metadata is copied here. 1761 * 1762 * This reads back the software margining error counters from the port. 1763 * Returns %0 in success and negative errno otherwise. 1764 */ 1765 int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, 1766 u8 index, u32 *errors) 1767 { 1768 int ret; 1769 1770 ret = usb4_port_sb_op(port, target, index, 1771 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); 1772 if (ret) 1773 return ret; 1774 1775 return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors, 1776 sizeof(*errors)); 1777 } 1778 1779 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, 1780 enum usb4_sb_opcode opcode, 1781 int timeout_msec) 1782 { 1783 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, 1784 timeout_msec); 1785 } 1786 1787 /** 1788 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions 1789 * @port: USB4 port 1790 * @index: Retimer index 1791 * 1792 * Enables sideband channel transations on SBTX. Can be used when USB4 1793 * link does not go up, for example if there is no device connected. 1794 */ 1795 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) 1796 { 1797 int ret; 1798 1799 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1800 500); 1801 1802 if (ret != -ENODEV) 1803 return ret; 1804 1805 /* 1806 * Per the USB4 retimer spec, the retimer is not required to 1807 * send an RT (Retimer Transaction) response for the first 1808 * SET_INBOUND_SBTX command 1809 */ 1810 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1811 500); 1812 } 1813 1814 /** 1815 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions 1816 * @port: USB4 port 1817 * @index: Retimer index 1818 * 1819 * Disables sideband channel transations on SBTX. The reverse of 1820 * usb4_port_retimer_set_inbound_sbtx(). 1821 */ 1822 int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) 1823 { 1824 return usb4_port_retimer_op(port, index, 1825 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); 1826 } 1827 1828 /** 1829 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer 1830 * @port: USB4 port 1831 * @index: Retimer index 1832 * 1833 * If the retimer at @index is last one (connected directly to the 1834 * Type-C port) this function returns %1. If it is not returns %0. If 1835 * the retimer is not present returns %-ENODEV. Otherwise returns 1836 * negative errno. 1837 */ 1838 int usb4_port_retimer_is_last(struct tb_port *port, u8 index) 1839 { 1840 u32 metadata; 1841 int ret; 1842 1843 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, 1844 500); 1845 if (ret) 1846 return ret; 1847 1848 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 1849 USB4_SB_METADATA, &metadata, sizeof(metadata)); 1850 return ret ? ret : metadata & 1; 1851 } 1852 1853 /** 1854 * usb4_port_retimer_is_cable() - Is the retimer cable retimer 1855 * @port: USB4 port 1856 * @index: Retimer index 1857 * 1858 * If the retimer at @index is last cable retimer this function returns 1859 * %1 and %0 if it is on-board retimer. In case a retimer is not present 1860 * at @index returns %-ENODEV. Otherwise returns negative errno. 1861 */ 1862 int usb4_port_retimer_is_cable(struct tb_port *port, u8 index) 1863 { 1864 u32 metadata; 1865 int ret; 1866 1867 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER, 1868 500); 1869 if (ret) 1870 return ret; 1871 1872 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 1873 USB4_SB_METADATA, &metadata, sizeof(metadata)); 1874 return ret ? ret : metadata & 1; 1875 } 1876 1877 /** 1878 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size 1879 * @port: USB4 port 1880 * @index: Retimer index 1881 * 1882 * Reads NVM sector size (in bytes) of a retimer at @index. This 1883 * operation can be used to determine whether the retimer supports NVM 1884 * upgrade for example. Returns sector size in bytes or negative errno 1885 * in case of error. Specifically returns %-ENODEV if there is no 1886 * retimer at @index. 1887 */ 1888 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) 1889 { 1890 u32 metadata; 1891 int ret; 1892 1893 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, 1894 500); 1895 if (ret) 1896 return ret; 1897 1898 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 1899 USB4_SB_METADATA, &metadata, sizeof(metadata)); 1900 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; 1901 } 1902 1903 /** 1904 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset 1905 * @port: USB4 port 1906 * @index: Retimer index 1907 * @address: Start offset 1908 * 1909 * Exlicitly sets NVM write offset. Normally when writing to NVM this is 1910 * done automatically by usb4_port_retimer_nvm_write(). 1911 * 1912 * Returns %0 in success and negative errno if there was a failure. 1913 */ 1914 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, 1915 unsigned int address) 1916 { 1917 u32 metadata, dwaddress; 1918 int ret; 1919 1920 dwaddress = address / 4; 1921 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 1922 USB4_NVM_SET_OFFSET_MASK; 1923 1924 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1925 USB4_SB_METADATA, &metadata, sizeof(metadata)); 1926 if (ret) 1927 return ret; 1928 1929 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, 1930 500); 1931 } 1932 1933 struct retimer_info { 1934 struct tb_port *port; 1935 u8 index; 1936 }; 1937 1938 static int usb4_port_retimer_nvm_write_next_block(void *data, 1939 unsigned int dwaddress, const void *buf, size_t dwords) 1940 1941 { 1942 const struct retimer_info *info = data; 1943 struct tb_port *port = info->port; 1944 u8 index = info->index; 1945 int ret; 1946 1947 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1948 USB4_SB_DATA, buf, dwords * 4); 1949 if (ret) 1950 return ret; 1951 1952 return usb4_port_retimer_op(port, index, 1953 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); 1954 } 1955 1956 /** 1957 * usb4_port_retimer_nvm_write() - Write to retimer NVM 1958 * @port: USB4 port 1959 * @index: Retimer index 1960 * @address: Byte address where to start the write 1961 * @buf: Data to write 1962 * @size: Size in bytes how much to write 1963 * 1964 * Writes @size bytes from @buf to the retimer NVM. Used for NVM 1965 * upgrade. Returns %0 if the data was written successfully and negative 1966 * errno in case of failure. Specifically returns %-ENODEV if there is 1967 * no retimer at @index. 1968 */ 1969 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, 1970 const void *buf, size_t size) 1971 { 1972 struct retimer_info info = { .port = port, .index = index }; 1973 int ret; 1974 1975 ret = usb4_port_retimer_nvm_set_offset(port, index, address); 1976 if (ret) 1977 return ret; 1978 1979 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 1980 usb4_port_retimer_nvm_write_next_block, &info); 1981 } 1982 1983 /** 1984 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade 1985 * @port: USB4 port 1986 * @index: Retimer index 1987 * 1988 * After the new NVM image has been written via usb4_port_retimer_nvm_write() 1989 * this function can be used to trigger the NVM upgrade process. If 1990 * successful the retimer restarts with the new NVM and may not have the 1991 * index set so one needs to call usb4_port_enumerate_retimers() to 1992 * force index to be assigned. 1993 */ 1994 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) 1995 { 1996 u32 val; 1997 1998 /* 1999 * We need to use the raw operation here because once the 2000 * authentication completes the retimer index is not set anymore 2001 * so we do not get back the status now. 2002 */ 2003 val = USB4_SB_OPCODE_NVM_AUTH_WRITE; 2004 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 2005 USB4_SB_OPCODE, &val, sizeof(val)); 2006 } 2007 2008 /** 2009 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade 2010 * @port: USB4 port 2011 * @index: Retimer index 2012 * @status: Raw status code read from metadata 2013 * 2014 * This can be called after usb4_port_retimer_nvm_authenticate() and 2015 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. 2016 * 2017 * Returns %0 if the authentication status was successfully read. The 2018 * completion metadata (the result) is then stored into @status. If 2019 * reading the status fails, returns negative errno. 2020 */ 2021 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, 2022 u32 *status) 2023 { 2024 u32 metadata, val; 2025 int ret; 2026 2027 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 2028 USB4_SB_OPCODE, &val, sizeof(val)); 2029 if (ret) 2030 return ret; 2031 2032 ret = usb4_port_sb_opcode_err_to_errno(val); 2033 switch (ret) { 2034 case 0: 2035 *status = 0; 2036 return 0; 2037 2038 case -EAGAIN: 2039 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 2040 USB4_SB_METADATA, &metadata, 2041 sizeof(metadata)); 2042 if (ret) 2043 return ret; 2044 2045 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; 2046 return 0; 2047 2048 default: 2049 return ret; 2050 } 2051 } 2052 2053 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, 2054 void *buf, size_t dwords) 2055 { 2056 const struct retimer_info *info = data; 2057 struct tb_port *port = info->port; 2058 u8 index = info->index; 2059 u32 metadata; 2060 int ret; 2061 2062 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 2063 if (dwords < USB4_DATA_DWORDS) 2064 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 2065 2066 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 2067 USB4_SB_METADATA, &metadata, sizeof(metadata)); 2068 if (ret) 2069 return ret; 2070 2071 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); 2072 if (ret) 2073 return ret; 2074 2075 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 2076 USB4_SB_DATA, buf, dwords * 4); 2077 } 2078 2079 /** 2080 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM 2081 * @port: USB4 port 2082 * @index: Retimer index 2083 * @address: NVM address (in bytes) to start reading 2084 * @buf: Data read from NVM is stored here 2085 * @size: Number of bytes to read 2086 * 2087 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 2088 * read was successful and negative errno in case of failure. 2089 * Specifically returns %-ENODEV if there is no retimer at @index. 2090 */ 2091 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, 2092 unsigned int address, void *buf, size_t size) 2093 { 2094 struct retimer_info info = { .port = port, .index = index }; 2095 2096 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 2097 usb4_port_retimer_nvm_read_block, &info); 2098 } 2099 2100 static inline unsigned int 2101 usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) 2102 { 2103 /* Take the possible bandwidth limitation into account */ 2104 if (port->max_bw) 2105 return min(bw, port->max_bw); 2106 return bw; 2107 } 2108 2109 /** 2110 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate 2111 * @port: USB3 adapter port 2112 * 2113 * Return maximum supported link rate of a USB3 adapter in Mb/s. 2114 * Negative errno in case of error. 2115 */ 2116 int usb4_usb3_port_max_link_rate(struct tb_port *port) 2117 { 2118 int ret, lr; 2119 u32 val; 2120 2121 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 2122 return -EINVAL; 2123 2124 ret = tb_port_read(port, &val, TB_CFG_PORT, 2125 port->cap_adap + ADP_USB3_CS_4, 1); 2126 if (ret) 2127 return ret; 2128 2129 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; 2130 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; 2131 2132 return usb4_usb3_port_max_bandwidth(port, ret); 2133 } 2134 2135 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) 2136 { 2137 int ret; 2138 u32 val; 2139 2140 if (!tb_port_is_usb3_down(port)) 2141 return -EINVAL; 2142 if (tb_route(port->sw)) 2143 return -EINVAL; 2144 2145 ret = tb_port_read(port, &val, TB_CFG_PORT, 2146 port->cap_adap + ADP_USB3_CS_2, 1); 2147 if (ret) 2148 return ret; 2149 2150 if (request) 2151 val |= ADP_USB3_CS_2_CMR; 2152 else 2153 val &= ~ADP_USB3_CS_2_CMR; 2154 2155 ret = tb_port_write(port, &val, TB_CFG_PORT, 2156 port->cap_adap + ADP_USB3_CS_2, 1); 2157 if (ret) 2158 return ret; 2159 2160 /* 2161 * We can use val here directly as the CMR bit is in the same place 2162 * as HCA. Just mask out others. 2163 */ 2164 val &= ADP_USB3_CS_2_CMR; 2165 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, 2166 ADP_USB3_CS_1_HCA, val, 1500, 2167 USB4_PORT_DELAY); 2168 } 2169 2170 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) 2171 { 2172 return usb4_usb3_port_cm_request(port, true); 2173 } 2174 2175 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) 2176 { 2177 return usb4_usb3_port_cm_request(port, false); 2178 } 2179 2180 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) 2181 { 2182 unsigned long uframes; 2183 2184 uframes = bw * 512UL << scale; 2185 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA); 2186 } 2187 2188 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) 2189 { 2190 unsigned long uframes; 2191 2192 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ 2193 uframes = ((unsigned long)mbps * MEGA) / 8000; 2194 return DIV_ROUND_UP(uframes, 512UL << scale); 2195 } 2196 2197 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, 2198 int *upstream_bw, 2199 int *downstream_bw) 2200 { 2201 u32 val, bw, scale; 2202 int ret; 2203 2204 ret = tb_port_read(port, &val, TB_CFG_PORT, 2205 port->cap_adap + ADP_USB3_CS_2, 1); 2206 if (ret) 2207 return ret; 2208 2209 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2210 port->cap_adap + ADP_USB3_CS_3, 1); 2211 if (ret) 2212 return ret; 2213 2214 scale &= ADP_USB3_CS_3_SCALE_MASK; 2215 2216 bw = val & ADP_USB3_CS_2_AUBW_MASK; 2217 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2218 2219 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; 2220 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2221 2222 return 0; 2223 } 2224 2225 /** 2226 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 2227 * @port: USB3 adapter port 2228 * @upstream_bw: Allocated upstream bandwidth is stored here 2229 * @downstream_bw: Allocated downstream bandwidth is stored here 2230 * 2231 * Stores currently allocated USB3 bandwidth into @upstream_bw and 2232 * @downstream_bw in Mb/s. Returns %0 in case of success and negative 2233 * errno in failure. 2234 */ 2235 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, 2236 int *downstream_bw) 2237 { 2238 int ret; 2239 2240 ret = usb4_usb3_port_set_cm_request(port); 2241 if (ret) 2242 return ret; 2243 2244 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, 2245 downstream_bw); 2246 usb4_usb3_port_clear_cm_request(port); 2247 2248 return ret; 2249 } 2250 2251 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, 2252 int *upstream_bw, 2253 int *downstream_bw) 2254 { 2255 u32 val, bw, scale; 2256 int ret; 2257 2258 ret = tb_port_read(port, &val, TB_CFG_PORT, 2259 port->cap_adap + ADP_USB3_CS_1, 1); 2260 if (ret) 2261 return ret; 2262 2263 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2264 port->cap_adap + ADP_USB3_CS_3, 1); 2265 if (ret) 2266 return ret; 2267 2268 scale &= ADP_USB3_CS_3_SCALE_MASK; 2269 2270 bw = val & ADP_USB3_CS_1_CUBW_MASK; 2271 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2272 2273 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; 2274 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2275 2276 return 0; 2277 } 2278 2279 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, 2280 int upstream_bw, 2281 int downstream_bw) 2282 { 2283 u32 val, ubw, dbw, scale; 2284 int ret, max_bw; 2285 2286 /* Figure out suitable scale */ 2287 scale = 0; 2288 max_bw = max(upstream_bw, downstream_bw); 2289 while (scale < 64) { 2290 if (mbps_to_usb3_bw(max_bw, scale) < 4096) 2291 break; 2292 scale++; 2293 } 2294 2295 if (WARN_ON(scale >= 64)) 2296 return -EINVAL; 2297 2298 ret = tb_port_write(port, &scale, TB_CFG_PORT, 2299 port->cap_adap + ADP_USB3_CS_3, 1); 2300 if (ret) 2301 return ret; 2302 2303 ubw = mbps_to_usb3_bw(upstream_bw, scale); 2304 dbw = mbps_to_usb3_bw(downstream_bw, scale); 2305 2306 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); 2307 2308 ret = tb_port_read(port, &val, TB_CFG_PORT, 2309 port->cap_adap + ADP_USB3_CS_2, 1); 2310 if (ret) 2311 return ret; 2312 2313 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); 2314 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; 2315 val |= ubw; 2316 2317 return tb_port_write(port, &val, TB_CFG_PORT, 2318 port->cap_adap + ADP_USB3_CS_2, 1); 2319 } 2320 2321 /** 2322 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 2323 * @port: USB3 adapter port 2324 * @upstream_bw: New upstream bandwidth 2325 * @downstream_bw: New downstream bandwidth 2326 * 2327 * This can be used to set how much bandwidth is allocated for the USB3 2328 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the 2329 * new values programmed to the USB3 adapter allocation registers. If 2330 * the values are lower than what is currently consumed the allocation 2331 * is set to what is currently consumed instead (consumed bandwidth 2332 * cannot be taken away by CM). The actual new values are returned in 2333 * @upstream_bw and @downstream_bw. 2334 * 2335 * Returns %0 in case of success and negative errno if there was a 2336 * failure. 2337 */ 2338 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, 2339 int *downstream_bw) 2340 { 2341 int ret, consumed_up, consumed_down, allocate_up, allocate_down; 2342 2343 ret = usb4_usb3_port_set_cm_request(port); 2344 if (ret) 2345 return ret; 2346 2347 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2348 &consumed_down); 2349 if (ret) 2350 goto err_request; 2351 2352 /* Don't allow it go lower than what is consumed */ 2353 allocate_up = max(*upstream_bw, consumed_up); 2354 allocate_down = max(*downstream_bw, consumed_down); 2355 2356 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, 2357 allocate_down); 2358 if (ret) 2359 goto err_request; 2360 2361 *upstream_bw = allocate_up; 2362 *downstream_bw = allocate_down; 2363 2364 err_request: 2365 usb4_usb3_port_clear_cm_request(port); 2366 return ret; 2367 } 2368 2369 /** 2370 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth 2371 * @port: USB3 adapter port 2372 * @upstream_bw: New allocated upstream bandwidth 2373 * @downstream_bw: New allocated downstream bandwidth 2374 * 2375 * Releases USB3 allocated bandwidth down to what is actually consumed. 2376 * The new bandwidth is returned in @upstream_bw and @downstream_bw. 2377 * 2378 * Returns 0% in success and negative errno in case of failure. 2379 */ 2380 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, 2381 int *downstream_bw) 2382 { 2383 int ret, consumed_up, consumed_down; 2384 2385 ret = usb4_usb3_port_set_cm_request(port); 2386 if (ret) 2387 return ret; 2388 2389 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2390 &consumed_down); 2391 if (ret) 2392 goto err_request; 2393 2394 /* 2395 * Always keep 900 Mb/s to make sure xHCI has at least some 2396 * bandwidth available for isochronous traffic. 2397 */ 2398 if (consumed_up < 900) 2399 consumed_up = 900; 2400 if (consumed_down < 900) 2401 consumed_down = 900; 2402 2403 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2404 consumed_down); 2405 if (ret) 2406 goto err_request; 2407 2408 *upstream_bw = consumed_up; 2409 *downstream_bw = consumed_down; 2410 2411 err_request: 2412 usb4_usb3_port_clear_cm_request(port); 2413 return ret; 2414 } 2415 2416 static bool is_usb4_dpin(const struct tb_port *port) 2417 { 2418 if (!tb_port_is_dpin(port)) 2419 return false; 2420 if (!tb_switch_is_usb4(port->sw)) 2421 return false; 2422 return true; 2423 } 2424 2425 /** 2426 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter 2427 * @port: DP IN adapter 2428 * @cm_id: CM ID to assign 2429 * 2430 * Sets CM ID for the @port. Returns %0 on success and negative errno 2431 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not 2432 * support this. 2433 */ 2434 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id) 2435 { 2436 u32 val; 2437 int ret; 2438 2439 if (!is_usb4_dpin(port)) 2440 return -EOPNOTSUPP; 2441 2442 ret = tb_port_read(port, &val, TB_CFG_PORT, 2443 port->cap_adap + ADP_DP_CS_2, 1); 2444 if (ret) 2445 return ret; 2446 2447 val &= ~ADP_DP_CS_2_CM_ID_MASK; 2448 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT; 2449 2450 return tb_port_write(port, &val, TB_CFG_PORT, 2451 port->cap_adap + ADP_DP_CS_2, 1); 2452 } 2453 2454 /** 2455 * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode 2456 * supported 2457 * @port: DP IN adapter to check 2458 * 2459 * Can be called to any DP IN adapter. Returns true if the adapter 2460 * supports USB4 bandwidth allocation mode, false otherwise. 2461 */ 2462 bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port) 2463 { 2464 int ret; 2465 u32 val; 2466 2467 if (!is_usb4_dpin(port)) 2468 return false; 2469 2470 ret = tb_port_read(port, &val, TB_CFG_PORT, 2471 port->cap_adap + DP_LOCAL_CAP, 1); 2472 if (ret) 2473 return false; 2474 2475 return !!(val & DP_COMMON_CAP_BW_MODE); 2476 } 2477 2478 /** 2479 * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode 2480 * enabled 2481 * @port: DP IN adapter to check 2482 * 2483 * Can be called to any DP IN adapter. Returns true if the bandwidth 2484 * allocation mode has been enabled, false otherwise. 2485 */ 2486 bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port) 2487 { 2488 int ret; 2489 u32 val; 2490 2491 if (!is_usb4_dpin(port)) 2492 return false; 2493 2494 ret = tb_port_read(port, &val, TB_CFG_PORT, 2495 port->cap_adap + ADP_DP_CS_8, 1); 2496 if (ret) 2497 return false; 2498 2499 return !!(val & ADP_DP_CS_8_DPME); 2500 } 2501 2502 /** 2503 * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for 2504 * bandwidth allocation mode 2505 * @port: DP IN adapter 2506 * @supported: Does the CM support bandwidth allocation mode 2507 * 2508 * Can be called to any DP IN adapter. Sets or clears the CM support bit 2509 * of the DP IN adapter. Returns %0 in success and negative errno 2510 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter 2511 * does not support this. 2512 */ 2513 int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, 2514 bool supported) 2515 { 2516 u32 val; 2517 int ret; 2518 2519 if (!is_usb4_dpin(port)) 2520 return -EOPNOTSUPP; 2521 2522 ret = tb_port_read(port, &val, TB_CFG_PORT, 2523 port->cap_adap + ADP_DP_CS_2, 1); 2524 if (ret) 2525 return ret; 2526 2527 if (supported) 2528 val |= ADP_DP_CS_2_CMMS; 2529 else 2530 val &= ~ADP_DP_CS_2_CMMS; 2531 2532 return tb_port_write(port, &val, TB_CFG_PORT, 2533 port->cap_adap + ADP_DP_CS_2, 1); 2534 } 2535 2536 /** 2537 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter 2538 * @port: DP IN adapter 2539 * 2540 * Reads bandwidth allocation Group ID from the DP IN adapter and 2541 * returns it. If the adapter does not support setting Group_ID 2542 * %-EOPNOTSUPP is returned. 2543 */ 2544 int usb4_dp_port_group_id(struct tb_port *port) 2545 { 2546 u32 val; 2547 int ret; 2548 2549 if (!is_usb4_dpin(port)) 2550 return -EOPNOTSUPP; 2551 2552 ret = tb_port_read(port, &val, TB_CFG_PORT, 2553 port->cap_adap + ADP_DP_CS_2, 1); 2554 if (ret) 2555 return ret; 2556 2557 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT; 2558 } 2559 2560 /** 2561 * usb4_dp_port_set_group_id() - Set adapter Group ID 2562 * @port: DP IN adapter 2563 * @group_id: Group ID for the adapter 2564 * 2565 * Sets bandwidth allocation mode Group ID for the DP IN adapter. 2566 * Returns %0 in case of success and negative errno otherwise. 2567 * Specifically returns %-EOPNOTSUPP if the adapter does not support 2568 * this. 2569 */ 2570 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id) 2571 { 2572 u32 val; 2573 int ret; 2574 2575 if (!is_usb4_dpin(port)) 2576 return -EOPNOTSUPP; 2577 2578 ret = tb_port_read(port, &val, TB_CFG_PORT, 2579 port->cap_adap + ADP_DP_CS_2, 1); 2580 if (ret) 2581 return ret; 2582 2583 val &= ~ADP_DP_CS_2_GROUP_ID_MASK; 2584 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT; 2585 2586 return tb_port_write(port, &val, TB_CFG_PORT, 2587 port->cap_adap + ADP_DP_CS_2, 1); 2588 } 2589 2590 /** 2591 * usb4_dp_port_nrd() - Read non-reduced rate and lanes 2592 * @port: DP IN adapter 2593 * @rate: Non-reduced rate in Mb/s is placed here 2594 * @lanes: Non-reduced lanes are placed here 2595 * 2596 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns 2597 * %0 in success and negative errno otherwise. Specifically returns 2598 * %-EOPNOTSUPP if the adapter does not support this. 2599 */ 2600 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes) 2601 { 2602 u32 val, tmp; 2603 int ret; 2604 2605 if (!is_usb4_dpin(port)) 2606 return -EOPNOTSUPP; 2607 2608 ret = tb_port_read(port, &val, TB_CFG_PORT, 2609 port->cap_adap + ADP_DP_CS_2, 1); 2610 if (ret) 2611 return ret; 2612 2613 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT; 2614 switch (tmp) { 2615 case DP_COMMON_CAP_RATE_RBR: 2616 *rate = 1620; 2617 break; 2618 case DP_COMMON_CAP_RATE_HBR: 2619 *rate = 2700; 2620 break; 2621 case DP_COMMON_CAP_RATE_HBR2: 2622 *rate = 5400; 2623 break; 2624 case DP_COMMON_CAP_RATE_HBR3: 2625 *rate = 8100; 2626 break; 2627 } 2628 2629 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK; 2630 switch (tmp) { 2631 case DP_COMMON_CAP_1_LANE: 2632 *lanes = 1; 2633 break; 2634 case DP_COMMON_CAP_2_LANES: 2635 *lanes = 2; 2636 break; 2637 case DP_COMMON_CAP_4_LANES: 2638 *lanes = 4; 2639 break; 2640 } 2641 2642 return 0; 2643 } 2644 2645 /** 2646 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes 2647 * @port: DP IN adapter 2648 * @rate: Non-reduced rate in Mb/s 2649 * @lanes: Non-reduced lanes 2650 * 2651 * Before the capabilities reduction this function can be used to set 2652 * the non-reduced values for the DP IN adapter. Returns %0 in success 2653 * and negative errno otherwise. If the adapter does not support this 2654 * %-EOPNOTSUPP is returned. 2655 */ 2656 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes) 2657 { 2658 u32 val; 2659 int ret; 2660 2661 if (!is_usb4_dpin(port)) 2662 return -EOPNOTSUPP; 2663 2664 ret = tb_port_read(port, &val, TB_CFG_PORT, 2665 port->cap_adap + ADP_DP_CS_2, 1); 2666 if (ret) 2667 return ret; 2668 2669 val &= ~ADP_DP_CS_2_NRD_MLR_MASK; 2670 2671 switch (rate) { 2672 case 1620: 2673 break; 2674 case 2700: 2675 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT) 2676 & ADP_DP_CS_2_NRD_MLR_MASK; 2677 break; 2678 case 5400: 2679 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2680 & ADP_DP_CS_2_NRD_MLR_MASK; 2681 break; 2682 case 8100: 2683 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2684 & ADP_DP_CS_2_NRD_MLR_MASK; 2685 break; 2686 default: 2687 return -EINVAL; 2688 } 2689 2690 val &= ~ADP_DP_CS_2_NRD_MLC_MASK; 2691 2692 switch (lanes) { 2693 case 1: 2694 break; 2695 case 2: 2696 val |= DP_COMMON_CAP_2_LANES; 2697 break; 2698 case 4: 2699 val |= DP_COMMON_CAP_4_LANES; 2700 break; 2701 default: 2702 return -EINVAL; 2703 } 2704 2705 return tb_port_write(port, &val, TB_CFG_PORT, 2706 port->cap_adap + ADP_DP_CS_2, 1); 2707 } 2708 2709 /** 2710 * usb4_dp_port_granularity() - Return granularity for the bandwidth values 2711 * @port: DP IN adapter 2712 * 2713 * Reads the programmed granularity from @port. If the DP IN adapter does 2714 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative 2715 * errno in other error cases. 2716 */ 2717 int usb4_dp_port_granularity(struct tb_port *port) 2718 { 2719 u32 val; 2720 int ret; 2721 2722 if (!is_usb4_dpin(port)) 2723 return -EOPNOTSUPP; 2724 2725 ret = tb_port_read(port, &val, TB_CFG_PORT, 2726 port->cap_adap + ADP_DP_CS_2, 1); 2727 if (ret) 2728 return ret; 2729 2730 val &= ADP_DP_CS_2_GR_MASK; 2731 val >>= ADP_DP_CS_2_GR_SHIFT; 2732 2733 switch (val) { 2734 case ADP_DP_CS_2_GR_0_25G: 2735 return 250; 2736 case ADP_DP_CS_2_GR_0_5G: 2737 return 500; 2738 case ADP_DP_CS_2_GR_1G: 2739 return 1000; 2740 } 2741 2742 return -EINVAL; 2743 } 2744 2745 /** 2746 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values 2747 * @port: DP IN adapter 2748 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250. 2749 * 2750 * Sets the granularity used with the estimated, allocated and requested 2751 * bandwidth. Returns %0 in success and negative errno otherwise. If the 2752 * adapter does not support this %-EOPNOTSUPP is returned. 2753 */ 2754 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity) 2755 { 2756 u32 val; 2757 int ret; 2758 2759 if (!is_usb4_dpin(port)) 2760 return -EOPNOTSUPP; 2761 2762 ret = tb_port_read(port, &val, TB_CFG_PORT, 2763 port->cap_adap + ADP_DP_CS_2, 1); 2764 if (ret) 2765 return ret; 2766 2767 val &= ~ADP_DP_CS_2_GR_MASK; 2768 2769 switch (granularity) { 2770 case 250: 2771 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT; 2772 break; 2773 case 500: 2774 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT; 2775 break; 2776 case 1000: 2777 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT; 2778 break; 2779 default: 2780 return -EINVAL; 2781 } 2782 2783 return tb_port_write(port, &val, TB_CFG_PORT, 2784 port->cap_adap + ADP_DP_CS_2, 1); 2785 } 2786 2787 /** 2788 * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth 2789 * @port: DP IN adapter 2790 * @bw: Estimated bandwidth in Mb/s. 2791 * 2792 * Sets the estimated bandwidth to @bw. Set the granularity by calling 2793 * usb4_dp_port_set_granularity() before calling this. The @bw is round 2794 * down to the closest granularity multiplier. Returns %0 in success 2795 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if 2796 * the adapter does not support this. 2797 */ 2798 int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw) 2799 { 2800 u32 val, granularity; 2801 int ret; 2802 2803 if (!is_usb4_dpin(port)) 2804 return -EOPNOTSUPP; 2805 2806 ret = usb4_dp_port_granularity(port); 2807 if (ret < 0) 2808 return ret; 2809 granularity = ret; 2810 2811 ret = tb_port_read(port, &val, TB_CFG_PORT, 2812 port->cap_adap + ADP_DP_CS_2, 1); 2813 if (ret) 2814 return ret; 2815 2816 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK; 2817 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT; 2818 2819 return tb_port_write(port, &val, TB_CFG_PORT, 2820 port->cap_adap + ADP_DP_CS_2, 1); 2821 } 2822 2823 /** 2824 * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth 2825 * @port: DP IN adapter 2826 * 2827 * Reads and returns allocated bandwidth for @port in Mb/s (taking into 2828 * account the programmed granularity). Returns negative errno in case 2829 * of error. 2830 */ 2831 int usb4_dp_port_allocated_bandwidth(struct tb_port *port) 2832 { 2833 u32 val, granularity; 2834 int ret; 2835 2836 if (!is_usb4_dpin(port)) 2837 return -EOPNOTSUPP; 2838 2839 ret = usb4_dp_port_granularity(port); 2840 if (ret < 0) 2841 return ret; 2842 granularity = ret; 2843 2844 ret = tb_port_read(port, &val, TB_CFG_PORT, 2845 port->cap_adap + DP_STATUS, 1); 2846 if (ret) 2847 return ret; 2848 2849 val &= DP_STATUS_ALLOCATED_BW_MASK; 2850 val >>= DP_STATUS_ALLOCATED_BW_SHIFT; 2851 2852 return val * granularity; 2853 } 2854 2855 static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack) 2856 { 2857 u32 val; 2858 int ret; 2859 2860 ret = tb_port_read(port, &val, TB_CFG_PORT, 2861 port->cap_adap + ADP_DP_CS_2, 1); 2862 if (ret) 2863 return ret; 2864 2865 if (ack) 2866 val |= ADP_DP_CS_2_CA; 2867 else 2868 val &= ~ADP_DP_CS_2_CA; 2869 2870 return tb_port_write(port, &val, TB_CFG_PORT, 2871 port->cap_adap + ADP_DP_CS_2, 1); 2872 } 2873 2874 static inline int usb4_dp_port_set_cm_ack(struct tb_port *port) 2875 { 2876 return __usb4_dp_port_set_cm_ack(port, true); 2877 } 2878 2879 static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, 2880 int timeout_msec) 2881 { 2882 ktime_t end; 2883 u32 val; 2884 int ret; 2885 2886 ret = __usb4_dp_port_set_cm_ack(port, false); 2887 if (ret) 2888 return ret; 2889 2890 end = ktime_add_ms(ktime_get(), timeout_msec); 2891 do { 2892 ret = tb_port_read(port, &val, TB_CFG_PORT, 2893 port->cap_adap + ADP_DP_CS_8, 1); 2894 if (ret) 2895 return ret; 2896 2897 if (!(val & ADP_DP_CS_8_DR)) 2898 break; 2899 2900 usleep_range(50, 100); 2901 } while (ktime_before(ktime_get(), end)); 2902 2903 if (val & ADP_DP_CS_8_DR) { 2904 tb_port_warn(port, "timeout waiting for DPTX request to clear\n"); 2905 return -ETIMEDOUT; 2906 } 2907 2908 ret = tb_port_read(port, &val, TB_CFG_PORT, 2909 port->cap_adap + ADP_DP_CS_2, 1); 2910 if (ret) 2911 return ret; 2912 2913 val &= ~ADP_DP_CS_2_CA; 2914 return tb_port_write(port, &val, TB_CFG_PORT, 2915 port->cap_adap + ADP_DP_CS_2, 1); 2916 } 2917 2918 /** 2919 * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth 2920 * @port: DP IN adapter 2921 * @bw: New allocated bandwidth in Mb/s 2922 * 2923 * Communicates the new allocated bandwidth with the DPCD (graphics 2924 * driver). Takes into account the programmed granularity. Returns %0 in 2925 * success and negative errno in case of error. 2926 */ 2927 int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw) 2928 { 2929 u32 val, granularity; 2930 int ret; 2931 2932 if (!is_usb4_dpin(port)) 2933 return -EOPNOTSUPP; 2934 2935 ret = usb4_dp_port_granularity(port); 2936 if (ret < 0) 2937 return ret; 2938 granularity = ret; 2939 2940 ret = tb_port_read(port, &val, TB_CFG_PORT, 2941 port->cap_adap + DP_STATUS, 1); 2942 if (ret) 2943 return ret; 2944 2945 val &= ~DP_STATUS_ALLOCATED_BW_MASK; 2946 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT; 2947 2948 ret = tb_port_write(port, &val, TB_CFG_PORT, 2949 port->cap_adap + DP_STATUS, 1); 2950 if (ret) 2951 return ret; 2952 2953 ret = usb4_dp_port_set_cm_ack(port); 2954 if (ret) 2955 return ret; 2956 2957 return usb4_dp_port_wait_and_clear_cm_ack(port, 500); 2958 } 2959 2960 /** 2961 * usb4_dp_port_requested_bandwidth() - Read requested bandwidth 2962 * @port: DP IN adapter 2963 * 2964 * Reads the DPCD (graphics driver) requested bandwidth and returns it 2965 * in Mb/s. Takes the programmed granularity into account. In case of 2966 * error returns negative errno. Specifically returns %-EOPNOTSUPP if 2967 * the adapter does not support bandwidth allocation mode, and %ENODATA 2968 * if there is no active bandwidth request from the graphics driver. 2969 */ 2970 int usb4_dp_port_requested_bandwidth(struct tb_port *port) 2971 { 2972 u32 val, granularity; 2973 int ret; 2974 2975 if (!is_usb4_dpin(port)) 2976 return -EOPNOTSUPP; 2977 2978 ret = usb4_dp_port_granularity(port); 2979 if (ret < 0) 2980 return ret; 2981 granularity = ret; 2982 2983 ret = tb_port_read(port, &val, TB_CFG_PORT, 2984 port->cap_adap + ADP_DP_CS_8, 1); 2985 if (ret) 2986 return ret; 2987 2988 if (!(val & ADP_DP_CS_8_DR)) 2989 return -ENODATA; 2990 2991 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; 2992 } 2993 2994 /** 2995 * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation 2996 * @port: PCIe adapter 2997 * @enable: Enable/disable extended encapsulation 2998 * 2999 * Enables or disables extended encapsulation used in PCIe tunneling. Caller 3000 * needs to make sure both adapters support this before enabling. Returns %0 on 3001 * success and negative errno otherwise. 3002 */ 3003 int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable) 3004 { 3005 u32 val; 3006 int ret; 3007 3008 if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port)) 3009 return -EINVAL; 3010 3011 ret = tb_port_read(port, &val, TB_CFG_PORT, 3012 port->cap_adap + ADP_PCIE_CS_1, 1); 3013 if (ret) 3014 return ret; 3015 3016 if (enable) 3017 val |= ADP_PCIE_CS_1_EE; 3018 else 3019 val &= ~ADP_PCIE_CS_1_EE; 3020 3021 return tb_port_write(port, &val, TB_CFG_PORT, 3022 port->cap_adap + ADP_PCIE_CS_1, 1); 3023 } 3024