Lines Matching full:sw
111 static void tb_add_dp_resources(struct tb_switch *sw) in tb_add_dp_resources() argument
113 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
116 tb_switch_for_each_port(sw, port) { in tb_add_dp_resources()
120 if (!tb_switch_query_dp_resource(sw, port)) in tb_add_dp_resources()
129 if (tb_route(sw)) in tb_add_dp_resources()
138 static void tb_remove_dp_resources(struct tb_switch *sw) in tb_remove_dp_resources() argument
140 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
144 tb_switch_for_each_port(sw, port) { in tb_remove_dp_resources()
146 tb_remove_dp_resources(port->remote->sw); in tb_remove_dp_resources()
150 if (port->sw == sw) { in tb_remove_dp_resources()
184 static int tb_enable_clx(struct tb_switch *sw) in tb_enable_clx() argument
186 struct tb_cm *tcm = tb_priv(sw->tb); in tb_enable_clx()
198 while (sw && tb_switch_depth(sw) > 1) in tb_enable_clx()
199 sw = tb_switch_parent(sw); in tb_enable_clx()
201 if (!sw) in tb_enable_clx()
204 if (tb_switch_depth(sw) != 1) in tb_enable_clx()
213 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) in tb_enable_clx()
222 ret = tb_switch_clx_enable(sw, clx | TB_CL2); in tb_enable_clx()
224 ret = tb_switch_clx_enable(sw, clx); in tb_enable_clx()
230 * @sw: Router to start
232 * Disables CL states from @sw up to the host router. Returns true if
237 static bool tb_disable_clx(struct tb_switch *sw) in tb_disable_clx() argument
244 ret = tb_switch_clx_disable(sw); in tb_disable_clx()
248 tb_sw_warn(sw, "failed to disable CL states\n"); in tb_disable_clx()
250 sw = tb_switch_parent(sw); in tb_disable_clx()
251 } while (sw); in tb_disable_clx()
258 struct tb_switch *sw; in tb_increase_switch_tmu_accuracy() local
260 sw = tb_to_switch(dev); in tb_increase_switch_tmu_accuracy()
261 if (!sw) in tb_increase_switch_tmu_accuracy()
264 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { in tb_increase_switch_tmu_accuracy()
268 if (tb_switch_clx_is_enabled(sw, TB_CL1)) in tb_increase_switch_tmu_accuracy()
273 ret = tb_switch_tmu_configure(sw, mode); in tb_increase_switch_tmu_accuracy()
277 return tb_switch_tmu_enable(sw); in tb_increase_switch_tmu_accuracy()
285 struct tb_switch *sw; in tb_increase_tmu_accuracy() local
299 sw = tunnel->tb->root_switch; in tb_increase_tmu_accuracy()
300 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); in tb_increase_tmu_accuracy()
305 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_tmu_hifi_uni_required() local
307 if (sw && tb_switch_tmu_is_enabled(sw) && in tb_switch_tmu_hifi_uni_required()
308 tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) in tb_switch_tmu_hifi_uni_required()
321 static int tb_enable_tmu(struct tb_switch *sw) in tb_enable_tmu() argument
332 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
335 if (tb_switch_clx_is_enabled(sw, TB_CL1)) { in tb_enable_tmu()
346 if (tb_tmu_hifi_uni_required(sw->tb)) in tb_enable_tmu()
347 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
350 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
353 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
358 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
364 if (tb_switch_tmu_is_enabled(sw)) in tb_enable_tmu()
367 ret = tb_switch_tmu_disable(sw); in tb_enable_tmu()
371 ret = tb_switch_tmu_post_time(sw); in tb_enable_tmu()
375 return tb_switch_tmu_enable(sw); in tb_enable_tmu()
378 static void tb_switch_discover_tunnels(struct tb_switch *sw, in tb_switch_discover_tunnels() argument
382 struct tb *tb = sw->tb; in tb_switch_discover_tunnels()
385 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
410 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
412 tb_switch_discover_tunnels(port->remote->sw, list, in tb_switch_discover_tunnels()
420 if (tb_switch_is_usb4(port->sw)) in tb_port_configure_xdomain()
427 if (tb_switch_is_usb4(port->sw)) in tb_port_unconfigure_xdomain()
435 struct tb_switch *sw = port->sw; in tb_scan_xdomain() local
436 struct tb *tb = sw->tb; in tb_scan_xdomain()
450 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
453 tb_port_at(route, sw)->xdomain = xd; in tb_scan_xdomain()
460 * tb_find_unused_port() - return the first inactive port on @sw
461 * @sw: Switch to find the port on
464 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, in tb_find_unused_port() argument
469 tb_switch_for_each_port(sw, port) { in tb_find_unused_port()
483 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, in tb_find_usb3_down() argument
488 down = usb4_switch_map_usb3_down(sw, port); in tb_find_usb3_down()
517 struct tb_switch *sw; in tb_find_first_usb3_tunnel() local
521 sw = dst_port->sw; in tb_find_first_usb3_tunnel()
523 sw = src_port->sw; in tb_find_first_usb3_tunnel()
526 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
530 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
720 link_speed = port->sw->link_speed; in tb_maximum_bandwidth()
722 * sw->link_width is from upstream perspective so we use in tb_maximum_bandwidth()
725 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { in tb_maximum_bandwidth()
728 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { in tb_maximum_bandwidth()
746 up_bw = link_speed * port->sw->link_width * 1000; in tb_maximum_bandwidth()
901 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
903 struct tb_switch *parent = tb_switch_parent(sw); in tb_tunnel_usb3()
914 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); in tb_tunnel_usb3()
918 if (!sw->link_usb4) in tb_tunnel_usb3()
925 port = tb_switch_downstream_port(sw); in tb_tunnel_usb3()
984 static int tb_create_usb3_tunnels(struct tb_switch *sw) in tb_create_usb3_tunnels() argument
992 if (tb_route(sw)) { in tb_create_usb3_tunnels()
993 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
998 tb_switch_for_each_port(sw, port) { in tb_create_usb3_tunnels()
1001 ret = tb_create_usb3_tunnels(port->remote->sw); in tb_create_usb3_tunnels()
1029 struct tb_switch *sw; in tb_configure_asym() local
1039 sw = dst_port->sw; in tb_configure_asym()
1041 sw = src_port->sw; in tb_configure_asym()
1044 struct tb_port *down = tb_switch_downstream_port(up->sw); in tb_configure_asym()
1082 if (up->sw->link_width == width_up) in tb_configure_asym()
1095 clx = tb_disable_clx(sw); in tb_configure_asym()
1099 tb_sw_dbg(up->sw, "configuring asymmetric link\n"); in tb_configure_asym()
1105 ret = tb_switch_set_link_width(up->sw, width_up); in tb_configure_asym()
1107 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_asym()
1114 tb_enable_clx(sw); in tb_configure_asym()
1134 struct tb_switch *sw; in tb_configure_sym() local
1144 sw = dst_port->sw; in tb_configure_sym()
1146 sw = src_port->sw; in tb_configure_sym()
1152 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1155 if (up->sw->is_unplugged) in tb_configure_sym()
1177 if (up->sw->link_width == TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1188 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { in tb_configure_sym()
1189 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); in tb_configure_sym()
1195 clx = tb_disable_clx(sw); in tb_configure_sym()
1199 tb_sw_dbg(up->sw, "configuring symmetric link\n"); in tb_configure_sym()
1201 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); in tb_configure_sym()
1203 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_sym()
1210 tb_enable_clx(sw); in tb_configure_sym()
1216 struct tb_switch *sw) in tb_configure_link() argument
1218 struct tb *tb = sw->tb; in tb_configure_link()
1232 if (sw->link_width < TB_LINK_WIDTH_DUAL) in tb_configure_link()
1233 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); in tb_configure_link()
1240 if (tb_switch_depth(sw) > 1 && in tb_configure_link()
1242 up->sw->link_width == TB_LINK_WIDTH_DUAL) { in tb_configure_link()
1245 host_port = tb_port_at(tb_route(sw), tb->root_switch); in tb_configure_link()
1250 tb_switch_configure_link(sw); in tb_configure_link()
1256 static void tb_scan_switch(struct tb_switch *sw) in tb_scan_switch() argument
1260 pm_runtime_get_sync(&sw->dev); in tb_scan_switch()
1262 tb_switch_for_each_port(sw, port) in tb_scan_switch()
1265 pm_runtime_mark_last_busy(&sw->dev); in tb_scan_switch()
1266 pm_runtime_put_autosuspend(&sw->dev); in tb_scan_switch()
1274 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
1277 struct tb_switch *sw; in tb_scan_port() local
1285 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
1308 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
1310 if (IS_ERR(sw)) { in tb_scan_port()
1322 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) in tb_scan_port()
1327 if (tb_switch_configure(sw)) { in tb_scan_port()
1328 tb_switch_put(sw); in tb_scan_port()
1348 dev_set_uevent_suppress(&sw->dev, true); in tb_scan_port()
1356 sw->rpm = sw->generation > 1; in tb_scan_port()
1358 if (tb_switch_add(sw)) { in tb_scan_port()
1359 tb_switch_put(sw); in tb_scan_port()
1363 upstream_port = tb_upstream_port(sw); in tb_scan_port()
1364 tb_configure_link(port, upstream_port, sw); in tb_scan_port()
1379 tb_sw_dbg(sw, "discovery, not touching CL states\n"); in tb_scan_port()
1380 else if (tb_enable_clx(sw)) in tb_scan_port()
1381 tb_sw_warn(sw, "failed to enable CL states\n"); in tb_scan_port()
1383 if (tb_enable_tmu(sw)) in tb_scan_port()
1384 tb_sw_warn(sw, "failed to enable TMU\n"); in tb_scan_port()
1390 tb_switch_configuration_valid(sw); in tb_scan_port()
1401 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
1402 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); in tb_scan_port()
1404 tb_add_dp_resources(sw); in tb_scan_port()
1405 tb_scan_switch(sw); in tb_scan_port()
1621 if (tunnel->src_port->sw == in->sw && in tb_attach_bandwidth_group()
1622 tunnel->dst_port->sw == out->sw) { in tb_attach_bandwidth_group()
1686 struct tb_switch *parent = tunnel->dst_port->sw; in tb_discover_tunnels()
1688 while (parent != tunnel->src_port->sw) { in tb_discover_tunnels()
1697 pm_runtime_get_sync(&in->sw->dev); in tb_discover_tunnels()
1698 pm_runtime_get_sync(&out->sw->dev); in tb_discover_tunnels()
1727 tb_switch_dealloc_dp_resource(src_port->sw, src_port); in tb_deactivate_and_free_tunnel()
1734 pm_runtime_mark_last_busy(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1735 pm_runtime_put_autosuspend(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1736 pm_runtime_mark_last_busy(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1737 pm_runtime_put_autosuspend(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1773 static void tb_free_unplugged_children(struct tb_switch *sw) in tb_free_unplugged_children() argument
1777 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_children()
1781 if (port->remote->sw->is_unplugged) { in tb_free_unplugged_children()
1783 tb_remove_dp_resources(port->remote->sw); in tb_free_unplugged_children()
1784 tb_switch_unconfigure_link(port->remote->sw); in tb_free_unplugged_children()
1785 tb_switch_set_link_width(port->remote->sw, in tb_free_unplugged_children()
1787 tb_switch_remove(port->remote->sw); in tb_free_unplugged_children()
1792 tb_free_unplugged_children(port->remote->sw); in tb_free_unplugged_children()
1797 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, in tb_find_pcie_down() argument
1806 if (tb_switch_is_usb4(sw)) { in tb_find_pcie_down()
1807 down = usb4_switch_map_pcie_down(sw, port); in tb_find_pcie_down()
1808 } else if (!tb_route(sw)) { in tb_find_pcie_down()
1816 if (tb_switch_is_cactus_ridge(sw) || in tb_find_pcie_down()
1817 tb_switch_is_alpine_ridge(sw)) in tb_find_pcie_down()
1819 else if (tb_switch_is_falcon_ridge(sw)) in tb_find_pcie_down()
1821 else if (tb_switch_is_titan_ridge(sw)) in tb_find_pcie_down()
1827 if (WARN_ON(index > sw->config.max_port_number)) in tb_find_pcie_down()
1830 down = &sw->ports[index]; in tb_find_pcie_down()
1843 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); in tb_find_pcie_down()
1851 host_port = tb_route(in->sw) ? in tb_find_dp_out()
1852 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
1864 if (in->sw == port->sw) { in tb_find_dp_out()
1875 if (host_port && tb_route(port->sw)) { in tb_find_dp_out()
1878 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
1983 pm_runtime_get_sync(&in->sw->dev); in tb_tunnel_one_dp()
1984 pm_runtime_get_sync(&out->sw->dev); in tb_tunnel_one_dp()
1986 if (tb_switch_alloc_dp_resource(in->sw, in)) { in tb_tunnel_one_dp()
2036 tb_switch_dealloc_dp_resource(in->sw, in); in tb_tunnel_one_dp()
2038 pm_runtime_mark_last_busy(&out->sw->dev); in tb_tunnel_one_dp()
2039 pm_runtime_put_autosuspend(&out->sw->dev); in tb_tunnel_one_dp()
2040 pm_runtime_mark_last_busy(&in->sw->dev); in tb_tunnel_one_dp()
2041 pm_runtime_put_autosuspend(&in->sw->dev); in tb_tunnel_one_dp()
2087 struct tb_switch *sw = port->sw; in tb_enter_redrive() local
2089 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_enter_redrive()
2101 if (tb_route(sw)) in tb_enter_redrive()
2103 if (!tb_switch_query_dp_resource(sw, port)) { in tb_enter_redrive()
2105 pm_runtime_get(&sw->dev); in tb_enter_redrive()
2112 struct tb_switch *sw = port->sw; in tb_exit_redrive() local
2114 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_exit_redrive()
2119 if (tb_route(sw)) in tb_exit_redrive()
2121 if (port->redrive && tb_switch_query_dp_resource(sw, port)) { in tb_exit_redrive()
2123 pm_runtime_put(&sw->dev); in tb_exit_redrive()
2128 static void tb_switch_enter_redrive(struct tb_switch *sw) in tb_switch_enter_redrive() argument
2132 tb_switch_for_each_port(sw, port) in tb_switch_enter_redrive()
2140 static void tb_switch_exit_redrive(struct tb_switch *sw) in tb_switch_exit_redrive() argument
2144 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_switch_exit_redrive()
2147 tb_switch_for_each_port(sw, port) { in tb_switch_exit_redrive()
2153 pm_runtime_put(&sw->dev); in tb_switch_exit_redrive()
2235 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) in tb_disconnect_pci() argument
2240 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_disconnect_pci()
2248 tb_switch_xhci_disconnect(sw); in tb_disconnect_pci()
2256 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
2262 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_tunnel_pci()
2270 port = tb_switch_downstream_port(sw); in tb_tunnel_pci()
2271 down = tb_find_pcie_down(tb_switch_parent(sw), port); in tb_tunnel_pci()
2290 if (tb_switch_pcie_l1_enable(sw)) in tb_tunnel_pci()
2291 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); in tb_tunnel_pci()
2293 if (tb_switch_xhci_connect(sw)) in tb_tunnel_pci()
2294 tb_sw_warn(sw, "failed to connect xHCI\n"); in tb_tunnel_pci()
2307 struct tb_switch *sw; in tb_approve_xdomain_paths() local
2310 sw = tb_to_switch(xd->dev.parent); in tb_approve_xdomain_paths()
2311 dst_port = tb_port_at(xd->route, sw); in tb_approve_xdomain_paths()
2320 tb_disable_clx(sw); in tb_approve_xdomain_paths()
2343 tb_enable_clx(sw); in tb_approve_xdomain_paths()
2356 struct tb_switch *sw; in __tb_disconnect_xdomain_paths() local
2358 sw = tb_to_switch(xd->dev.parent); in __tb_disconnect_xdomain_paths()
2359 dst_port = tb_port_at(xd->route, sw); in __tb_disconnect_xdomain_paths()
2378 tb_enable_clx(sw); in __tb_disconnect_xdomain_paths()
2407 struct tb_switch *sw; in tb_handle_hotplug() local
2417 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
2418 if (!sw) { in tb_handle_hotplug()
2424 if (ev->port > sw->config.max_port_number) { in tb_handle_hotplug()
2430 port = &sw->ports[ev->port]; in tb_handle_hotplug()
2437 pm_runtime_get_sync(&sw->dev); in tb_handle_hotplug()
2444 tb_sw_set_unplugged(port->remote->sw); in tb_handle_hotplug()
2446 tb_remove_dp_resources(port->remote->sw); in tb_handle_hotplug()
2447 tb_switch_tmu_disable(port->remote->sw); in tb_handle_hotplug()
2448 tb_switch_unconfigure_link(port->remote->sw); in tb_handle_hotplug()
2449 tb_switch_set_link_width(port->remote->sw, in tb_handle_hotplug()
2451 tb_switch_remove(port->remote->sw); in tb_handle_hotplug()
2478 tb_sw_dbg(sw, "xHCI disconnect request\n"); in tb_handle_hotplug()
2479 tb_switch_xhci_disconnect(sw); in tb_handle_hotplug()
2486 } else if (!port->port && sw->authorized) { in tb_handle_hotplug()
2487 tb_sw_dbg(sw, "xHCI connect request\n"); in tb_handle_hotplug()
2488 tb_switch_xhci_connect(sw); in tb_handle_hotplug()
2500 pm_runtime_mark_last_busy(&sw->dev); in tb_handle_hotplug()
2501 pm_runtime_put_autosuspend(&sw->dev); in tb_handle_hotplug()
2504 tb_switch_put(sw); in tb_handle_hotplug()
2714 struct tb_switch *sw; in tb_handle_dp_bandwidth_request() local
2723 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_dp_bandwidth_request()
2724 if (!sw) { in tb_handle_dp_bandwidth_request()
2730 in = &sw->ports[ev->port]; in tb_handle_dp_bandwidth_request()
2826 tb_switch_put(sw); in tb_handle_dp_bandwidth_request()
2944 struct tb_switch *sw = tb_to_switch(dev); in tb_scan_finalize_switch() local
2951 if (sw->boot) in tb_scan_finalize_switch()
2952 sw->authorized = 1; in tb_scan_finalize_switch()
3057 static void tb_restore_children(struct tb_switch *sw) in tb_restore_children() argument
3062 if (sw->is_unplugged) in tb_restore_children()
3065 if (tb_enable_clx(sw)) in tb_restore_children()
3066 tb_sw_warn(sw, "failed to re-enable CL states\n"); in tb_restore_children()
3068 if (tb_enable_tmu(sw)) in tb_restore_children()
3069 tb_sw_warn(sw, "failed to restore TMU configuration\n"); in tb_restore_children()
3071 tb_switch_configuration_valid(sw); in tb_restore_children()
3073 tb_switch_for_each_port(sw, port) { in tb_restore_children()
3078 tb_switch_set_link_width(port->remote->sw, in tb_restore_children()
3079 port->remote->sw->link_width); in tb_restore_children()
3080 tb_switch_configure_link(port->remote->sw); in tb_restore_children()
3082 tb_restore_children(port->remote->sw); in tb_restore_children()
3150 static int tb_free_unplugged_xdomains(struct tb_switch *sw) in tb_free_unplugged_xdomains() argument
3155 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_xdomains()
3165 ret += tb_free_unplugged_xdomains(port->remote->sw); in tb_free_unplugged_xdomains()