Lines Matching +full:0 +full:xd
26 #define PCIE2CIO_CMD 0x30
35 #define PCIE2CIO_WRDATA 0x34
36 #define PCIE2CIO_RDDATA 0x38
38 #define PHY_PORT_CS1 0x37
61 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
120 #define EP_NAME_INTEL_VSS 0x10
132 #define INTEL_VSS_FLAGS_RTD3 BIT(0)
181 return link ? ((link - 1) ^ 0x01) + 1 : 0; in dual_link_from_link()
192 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; in get_parent_route()
206 return 0; in pci2cio_wait_completion()
233 return 0; in pcie2cio_read()
376 return 0; in icm_fr_get_switch_index()
379 return index != 0xff ? index : 0; in icm_fr_get_switch_index()
401 sw = &switches[0]; in icm_fr_get_route()
417 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { in icm_fr_get_route()
435 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); in icm_fr_save_devices()
448 memset(&reply, 0, sizeof(reply)); in icm_fr_driver_ready()
457 return 0; in icm_fr_driver_ready()
466 memset(&request, 0, sizeof(request)); in icm_fr_approve_switch()
472 memset(&reply, 0, sizeof(reply)); in icm_fr_approve_switch()
484 return 0; in icm_fr_approve_switch()
493 memset(&request, 0, sizeof(request)); in icm_fr_add_switch_key()
500 memset(&reply, 0, sizeof(reply)); in icm_fr_add_switch_key()
511 return 0; in icm_fr_add_switch_key()
521 memset(&request, 0, sizeof(request)); in icm_fr_challenge_switch_key()
528 memset(&reply, 0, sizeof(reply)); in icm_fr_challenge_switch_key()
541 return 0; in icm_fr_challenge_switch_key()
544 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in icm_fr_approve_xdomain_paths() argument
550 memset(&request, 0, sizeof(request)); in icm_fr_approve_xdomain_paths()
552 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; in icm_fr_approve_xdomain_paths()
553 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_fr_approve_xdomain_paths()
555 request.transmit_path = xd->transmit_path; in icm_fr_approve_xdomain_paths()
556 request.transmit_ring = xd->transmit_ring; in icm_fr_approve_xdomain_paths()
557 request.receive_path = xd->receive_path; in icm_fr_approve_xdomain_paths()
558 request.receive_ring = xd->receive_ring; in icm_fr_approve_xdomain_paths()
560 memset(&reply, 0, sizeof(reply)); in icm_fr_approve_xdomain_paths()
569 return 0; in icm_fr_approve_xdomain_paths()
572 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in icm_fr_disconnect_xdomain_paths() argument
577 phy_port = tb_phy_port_from_link(xd->link); in icm_fr_disconnect_xdomain_paths()
578 if (phy_port == 0) in icm_fr_disconnect_xdomain_paths()
586 return 0; in icm_fr_disconnect_xdomain_paths()
665 struct tb_xdomain *xd; in add_xdomain() local
669 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); in add_xdomain()
670 if (!xd) in add_xdomain()
673 xd->link = link; in add_xdomain()
674 xd->depth = depth; in add_xdomain()
676 tb_port_at(route, sw)->xdomain = xd; in add_xdomain()
678 tb_xdomain_add(xd); in add_xdomain()
685 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) in update_xdomain() argument
687 xd->link = link; in update_xdomain()
688 xd->route = route; in update_xdomain()
689 xd->is_unplugged = false; in update_xdomain()
692 static void remove_xdomain(struct tb_xdomain *xd) in remove_xdomain() argument
696 sw = tb_to_switch(xd->dev.parent); in remove_xdomain()
697 tb_port_at(xd->route, sw)->xdomain = NULL; in remove_xdomain()
698 tb_xdomain_remove(xd); in remove_xdomain()
711 struct tb_xdomain *xd; in icm_fr_device_connected() local
803 xd = tb_xdomain_find_by_link_depth(tb, link, depth); in icm_fr_device_connected()
804 if (xd) { in icm_fr_device_connected()
805 remove_xdomain(xd); in icm_fr_device_connected()
806 tb_xdomain_put(xd); in icm_fr_device_connected()
882 struct tb_xdomain *xd; in icm_fr_xdomain_connected() local
898 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_fr_xdomain_connected()
899 if (xd) { in icm_fr_xdomain_connected()
902 xd_phy_port = phy_port_from_route(xd->route, xd->depth); in icm_fr_xdomain_connected()
905 if (xd->depth == depth && xd_phy_port == phy_port) { in icm_fr_xdomain_connected()
906 update_xdomain(xd, route, link); in icm_fr_xdomain_connected()
907 tb_xdomain_put(xd); in icm_fr_xdomain_connected()
917 remove_xdomain(xd); in icm_fr_xdomain_connected()
918 tb_xdomain_put(xd); in icm_fr_xdomain_connected()
926 xd = tb_xdomain_find_by_link_depth(tb, link, depth); in icm_fr_xdomain_connected()
927 if (!xd) { in icm_fr_xdomain_connected()
932 xd = tb_xdomain_find_by_link_depth(tb, dual_link, in icm_fr_xdomain_connected()
935 if (xd) { in icm_fr_xdomain_connected()
936 remove_xdomain(xd); in icm_fr_xdomain_connected()
937 tb_xdomain_put(xd); in icm_fr_xdomain_connected()
968 struct tb_xdomain *xd; in icm_fr_xdomain_disconnected() local
975 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_fr_xdomain_disconnected()
976 if (xd) { in icm_fr_xdomain_disconnected()
977 remove_xdomain(xd); in icm_fr_xdomain_disconnected()
978 tb_xdomain_put(xd); in icm_fr_xdomain_disconnected()
984 return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); in icm_tr_cio_reset()
997 memset(&reply, 0, sizeof(reply)); in icm_tr_driver_ready()
1011 return 0; in icm_tr_driver_ready()
1020 memset(&request, 0, sizeof(request)); in icm_tr_approve_switch()
1027 memset(&reply, 0, sizeof(reply)); in icm_tr_approve_switch()
1038 return 0; in icm_tr_approve_switch()
1047 memset(&request, 0, sizeof(request)); in icm_tr_add_switch_key()
1055 memset(&reply, 0, sizeof(reply)); in icm_tr_add_switch_key()
1066 return 0; in icm_tr_add_switch_key()
1076 memset(&request, 0, sizeof(request)); in icm_tr_challenge_switch_key()
1084 memset(&reply, 0, sizeof(reply)); in icm_tr_challenge_switch_key()
1097 return 0; in icm_tr_challenge_switch_key()
1100 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in icm_tr_approve_xdomain_paths() argument
1106 memset(&request, 0, sizeof(request)); in icm_tr_approve_xdomain_paths()
1108 request.route_hi = upper_32_bits(xd->route); in icm_tr_approve_xdomain_paths()
1109 request.route_lo = lower_32_bits(xd->route); in icm_tr_approve_xdomain_paths()
1110 request.transmit_path = xd->transmit_path; in icm_tr_approve_xdomain_paths()
1111 request.transmit_ring = xd->transmit_ring; in icm_tr_approve_xdomain_paths()
1112 request.receive_path = xd->receive_path; in icm_tr_approve_xdomain_paths()
1113 request.receive_ring = xd->receive_ring; in icm_tr_approve_xdomain_paths()
1114 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_tr_approve_xdomain_paths()
1116 memset(&reply, 0, sizeof(reply)); in icm_tr_approve_xdomain_paths()
1125 return 0; in icm_tr_approve_xdomain_paths()
1128 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, in icm_tr_xdomain_tear_down() argument
1135 memset(&request, 0, sizeof(request)); in icm_tr_xdomain_tear_down()
1138 request.route_hi = upper_32_bits(xd->route); in icm_tr_xdomain_tear_down()
1139 request.route_lo = lower_32_bits(xd->route); in icm_tr_xdomain_tear_down()
1140 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); in icm_tr_xdomain_tear_down()
1142 memset(&reply, 0, sizeof(reply)); in icm_tr_xdomain_tear_down()
1151 return 0; in icm_tr_xdomain_tear_down()
1154 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) in icm_tr_disconnect_xdomain_paths() argument
1158 ret = icm_tr_xdomain_tear_down(tb, xd, 1); in icm_tr_disconnect_xdomain_paths()
1163 return icm_tr_xdomain_tear_down(tb, xd, 2); in icm_tr_disconnect_xdomain_paths()
1175 struct tb_xdomain *xd; in __icm_tr_device_connected() local
1208 0, 0, 0, boot); in __icm_tr_device_connected()
1225 xd = tb_xdomain_find_by_route(tb, route); in __icm_tr_device_connected()
1226 if (xd) { in __icm_tr_device_connected()
1227 remove_xdomain(xd); in __icm_tr_device_connected()
1228 tb_xdomain_put(xd); in __icm_tr_device_connected()
1293 struct tb_xdomain *xd; in icm_tr_xdomain_connected() local
1302 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); in icm_tr_xdomain_connected()
1303 if (xd) { in icm_tr_xdomain_connected()
1304 if (xd->route == route) { in icm_tr_xdomain_connected()
1305 update_xdomain(xd, route, 0); in icm_tr_xdomain_connected()
1306 tb_xdomain_put(xd); in icm_tr_xdomain_connected()
1310 remove_xdomain(xd); in icm_tr_xdomain_connected()
1311 tb_xdomain_put(xd); in icm_tr_xdomain_connected()
1315 xd = tb_xdomain_find_by_route(tb, route); in icm_tr_xdomain_connected()
1316 if (xd) { in icm_tr_xdomain_connected()
1317 remove_xdomain(xd); in icm_tr_xdomain_connected()
1318 tb_xdomain_put(xd); in icm_tr_xdomain_connected()
1338 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); in icm_tr_xdomain_connected()
1347 struct tb_xdomain *xd; in icm_tr_xdomain_disconnected() local
1352 xd = tb_xdomain_find_by_route(tb, route); in icm_tr_xdomain_disconnected()
1353 if (xd) { in icm_tr_xdomain_disconnected()
1354 remove_xdomain(xd); in icm_tr_xdomain_disconnected()
1355 tb_xdomain_put(xd); in icm_tr_xdomain_disconnected()
1414 if (cap > 0) { in icm_ar_is_supported()
1427 return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); in icm_ar_cio_reset()
1461 memset(&reply, 0, sizeof(reply)); in icm_ar_driver_ready()
1475 return 0; in icm_ar_driver_ready()
1487 memset(&reply, 0, sizeof(reply)); in icm_ar_get_route()
1497 return 0; in icm_ar_get_route()
1508 memset(&reply, 0, sizeof(reply)); in icm_ar_get_boot_acl()
1517 for (i = 0; i < nuuids; i++) { in icm_ar_get_boot_acl()
1520 uuid[0] = reply.acl[i].uuid_lo; in icm_ar_get_boot_acl()
1523 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { in icm_ar_get_boot_acl()
1525 uuid[0] = 0; in icm_ar_get_boot_acl()
1526 uuid[1] = 0; in icm_ar_get_boot_acl()
1527 } else if (uuid[0] != 0 || uuid[1] != 0) { in icm_ar_get_boot_acl()
1529 uuid[2] = 0xffffffff; in icm_ar_get_boot_acl()
1530 uuid[3] = 0xffffffff; in icm_ar_get_boot_acl()
1549 for (i = 0; i < nuuids; i++) { in icm_ar_set_boot_acl()
1557 request.acl[i].uuid_lo = 0xffffffff; in icm_ar_set_boot_acl()
1558 request.acl[i].uuid_hi = 0xffffffff; in icm_ar_set_boot_acl()
1561 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) in icm_ar_set_boot_acl()
1564 request.acl[i].uuid_lo = uuid[0]; in icm_ar_set_boot_acl()
1569 memset(&reply, 0, sizeof(reply)); in icm_ar_set_boot_acl()
1578 return 0; in icm_ar_set_boot_acl()
1591 memset(&reply, 0, sizeof(reply)); in icm_icl_driver_ready()
1601 return 0; in icm_icl_driver_ready()
1609 pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); in icm_icl_set_uuid()
1611 uuid[2] = 0xffffffff; in icm_icl_set_uuid()
1612 uuid[3] = 0xffffffff; in icm_icl_set_uuid()
1628 tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); in icm_icl_rtd3_veto()
1725 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, in __icm_driver_ready()
1726 0, 1, 100); in __icm_driver_ready()
1728 return 0; in __icm_driver_ready()
1768 return 0; in icm_firmware_start()
1781 return 0; in icm_firmware_start()
1798 return 0; in icm_reset_phy_port()
1826 return 0; in icm_reset_phy_port()
1879 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); in icm_firmware_init()
1883 if (ret < 0) in icm_firmware_init()
1895 ret = icm_reset_phy_port(tb, 0); in icm_firmware_init()
1902 return 0; in icm_firmware_init()
1918 return 0; in icm_driver_ready()
1931 tb->nboot_acl = 0; in icm_driver_ready()
1933 return 0; in icm_driver_ready()
1943 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_suspend()
1944 return 0; in icm_suspend()
1974 return 0; in complete_rpm()
2060 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_runtime_suspend()
2061 return 0; in icm_runtime_suspend()
2068 return 0; in icm_runtime_suspend_switch()
2079 return 0; in icm_runtime_resume_switch()
2089 return 0; in icm_runtime_resume()
2098 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); in icm_start()
2100 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); in icm_start()
2126 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); in icm_stop()
2131 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); in icm_disconnect_pcie_paths()