Lines Matching refs:sw

99 static void tb_add_dp_resources(struct tb_switch *sw)  in tb_add_dp_resources()  argument
101 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
104 tb_switch_for_each_port(sw, port) { in tb_add_dp_resources()
108 if (!tb_switch_query_dp_resource(sw, port)) in tb_add_dp_resources()
117 if (tb_route(sw)) in tb_add_dp_resources()
126 static void tb_remove_dp_resources(struct tb_switch *sw) in tb_remove_dp_resources() argument
128 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
132 tb_switch_for_each_port(sw, port) { in tb_remove_dp_resources()
134 tb_remove_dp_resources(port->remote->sw); in tb_remove_dp_resources()
138 if (port->sw == sw) { in tb_remove_dp_resources()
172 static int tb_enable_clx(struct tb_switch *sw) in tb_enable_clx() argument
174 struct tb_cm *tcm = tb_priv(sw->tb); in tb_enable_clx()
186 while (sw && tb_switch_depth(sw) > 1) in tb_enable_clx()
187 sw = tb_switch_parent(sw); in tb_enable_clx()
189 if (!sw) in tb_enable_clx()
192 if (tb_switch_depth(sw) != 1) in tb_enable_clx()
201 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) in tb_enable_clx()
210 ret = tb_switch_clx_enable(sw, clx | TB_CL2); in tb_enable_clx()
212 ret = tb_switch_clx_enable(sw, clx); in tb_enable_clx()
225 static bool tb_disable_clx(struct tb_switch *sw) in tb_disable_clx() argument
232 ret = tb_switch_clx_disable(sw); in tb_disable_clx()
236 tb_sw_warn(sw, "failed to disable CL states\n"); in tb_disable_clx()
238 sw = tb_switch_parent(sw); in tb_disable_clx()
239 } while (sw); in tb_disable_clx()
246 struct tb_switch *sw; in tb_increase_switch_tmu_accuracy() local
248 sw = tb_to_switch(dev); in tb_increase_switch_tmu_accuracy()
249 if (!sw) in tb_increase_switch_tmu_accuracy()
252 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { in tb_increase_switch_tmu_accuracy()
256 if (tb_switch_clx_is_enabled(sw, TB_CL1)) in tb_increase_switch_tmu_accuracy()
261 ret = tb_switch_tmu_configure(sw, mode); in tb_increase_switch_tmu_accuracy()
265 return tb_switch_tmu_enable(sw); in tb_increase_switch_tmu_accuracy()
273 struct tb_switch *sw; in tb_increase_tmu_accuracy() local
287 sw = tunnel->tb->root_switch; in tb_increase_tmu_accuracy()
288 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); in tb_increase_tmu_accuracy()
293 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_tmu_hifi_uni_required() local
295 if (sw && tb_switch_tmu_is_enabled(sw) && in tb_switch_tmu_hifi_uni_required()
296 tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) in tb_switch_tmu_hifi_uni_required()
309 static int tb_enable_tmu(struct tb_switch *sw) in tb_enable_tmu() argument
320 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
323 if (tb_switch_clx_is_enabled(sw, TB_CL1)) { in tb_enable_tmu()
334 if (tb_tmu_hifi_uni_required(sw->tb)) in tb_enable_tmu()
335 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
338 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
341 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
346 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
352 if (tb_switch_tmu_is_enabled(sw)) in tb_enable_tmu()
355 ret = tb_switch_tmu_disable(sw); in tb_enable_tmu()
359 ret = tb_switch_tmu_post_time(sw); in tb_enable_tmu()
363 return tb_switch_tmu_enable(sw); in tb_enable_tmu()
366 static void tb_switch_discover_tunnels(struct tb_switch *sw, in tb_switch_discover_tunnels() argument
370 struct tb *tb = sw->tb; in tb_switch_discover_tunnels()
373 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
398 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
400 tb_switch_discover_tunnels(port->remote->sw, list, in tb_switch_discover_tunnels()
408 if (tb_switch_is_usb4(port->sw)) in tb_port_configure_xdomain()
415 if (tb_switch_is_usb4(port->sw)) in tb_port_unconfigure_xdomain()
423 struct tb_switch *sw = port->sw; in tb_scan_xdomain() local
424 struct tb *tb = sw->tb; in tb_scan_xdomain()
438 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
441 tb_port_at(route, sw)->xdomain = xd; in tb_scan_xdomain()
452 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, in tb_find_unused_port() argument
457 tb_switch_for_each_port(sw, port) { in tb_find_unused_port()
471 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, in tb_find_usb3_down() argument
476 down = usb4_switch_map_usb3_down(sw, port); in tb_find_usb3_down()
505 struct tb_switch *sw; in tb_find_first_usb3_tunnel() local
509 sw = dst_port->sw; in tb_find_first_usb3_tunnel()
511 sw = src_port->sw; in tb_find_first_usb3_tunnel()
514 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
518 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
708 link_speed = port->sw->link_speed; in tb_maximum_bandwidth()
713 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { in tb_maximum_bandwidth()
716 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { in tb_maximum_bandwidth()
734 up_bw = link_speed * port->sw->link_width * 1000; in tb_maximum_bandwidth()
889 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
891 struct tb_switch *parent = tb_switch_parent(sw); in tb_tunnel_usb3()
902 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); in tb_tunnel_usb3()
906 if (!sw->link_usb4) in tb_tunnel_usb3()
913 port = tb_switch_downstream_port(sw); in tb_tunnel_usb3()
972 static int tb_create_usb3_tunnels(struct tb_switch *sw) in tb_create_usb3_tunnels() argument
980 if (tb_route(sw)) { in tb_create_usb3_tunnels()
981 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
986 tb_switch_for_each_port(sw, port) { in tb_create_usb3_tunnels()
989 ret = tb_create_usb3_tunnels(port->remote->sw); in tb_create_usb3_tunnels()
1017 struct tb_switch *sw; in tb_configure_asym() local
1027 sw = dst_port->sw; in tb_configure_asym()
1029 sw = src_port->sw; in tb_configure_asym()
1032 struct tb_port *down = tb_switch_downstream_port(up->sw); in tb_configure_asym()
1070 if (up->sw->link_width == width_up) in tb_configure_asym()
1083 clx = tb_disable_clx(sw); in tb_configure_asym()
1087 tb_sw_dbg(up->sw, "configuring asymmetric link\n"); in tb_configure_asym()
1093 ret = tb_switch_set_link_width(up->sw, width_up); in tb_configure_asym()
1095 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_asym()
1102 tb_enable_clx(sw); in tb_configure_asym()
1122 struct tb_switch *sw; in tb_configure_sym() local
1132 sw = dst_port->sw; in tb_configure_sym()
1134 sw = src_port->sw; in tb_configure_sym()
1140 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1143 if (up->sw->is_unplugged) in tb_configure_sym()
1165 if (up->sw->link_width == TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1176 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { in tb_configure_sym()
1177 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); in tb_configure_sym()
1183 clx = tb_disable_clx(sw); in tb_configure_sym()
1187 tb_sw_dbg(up->sw, "configuring symmetric link\n"); in tb_configure_sym()
1189 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); in tb_configure_sym()
1191 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_sym()
1198 tb_enable_clx(sw); in tb_configure_sym()
1204 struct tb_switch *sw) in tb_configure_link() argument
1206 struct tb *tb = sw->tb; in tb_configure_link()
1220 if (sw->link_width < TB_LINK_WIDTH_DUAL) in tb_configure_link()
1221 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); in tb_configure_link()
1228 if (tb_switch_depth(sw) > 1 && in tb_configure_link()
1230 up->sw->link_width == TB_LINK_WIDTH_DUAL) { in tb_configure_link()
1233 host_port = tb_port_at(tb_route(sw), tb->root_switch); in tb_configure_link()
1238 tb_switch_configure_link(sw); in tb_configure_link()
1246 static void tb_scan_switch(struct tb_switch *sw) in tb_scan_switch() argument
1250 pm_runtime_get_sync(&sw->dev); in tb_scan_switch()
1252 tb_switch_for_each_port(sw, port) in tb_scan_switch()
1255 pm_runtime_mark_last_busy(&sw->dev); in tb_scan_switch()
1256 pm_runtime_put_autosuspend(&sw->dev); in tb_scan_switch()
1264 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
1267 struct tb_switch *sw; in tb_scan_port() local
1275 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
1300 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
1302 if (IS_ERR(sw)) { in tb_scan_port()
1308 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) in tb_scan_port()
1313 if (tb_switch_configure(sw)) { in tb_scan_port()
1314 tb_switch_put(sw); in tb_scan_port()
1334 dev_set_uevent_suppress(&sw->dev, true); in tb_scan_port()
1342 sw->rpm = sw->generation > 1; in tb_scan_port()
1344 if (tb_switch_add(sw)) { in tb_scan_port()
1345 tb_switch_put(sw); in tb_scan_port()
1349 upstream_port = tb_upstream_port(sw); in tb_scan_port()
1350 tb_configure_link(port, upstream_port, sw); in tb_scan_port()
1357 tb_sw_dbg(sw, "discovery, not touching CL states\n"); in tb_scan_port()
1358 else if (tb_enable_clx(sw)) in tb_scan_port()
1359 tb_sw_warn(sw, "failed to enable CL states\n"); in tb_scan_port()
1361 if (tb_enable_tmu(sw)) in tb_scan_port()
1362 tb_sw_warn(sw, "failed to enable TMU\n"); in tb_scan_port()
1368 tb_switch_configuration_valid(sw); in tb_scan_port()
1379 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
1380 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); in tb_scan_port()
1382 tb_add_dp_resources(sw); in tb_scan_port()
1383 tb_scan_switch(sw); in tb_scan_port()
1599 if (tunnel->src_port->sw == in->sw && in tb_attach_bandwidth_group()
1600 tunnel->dst_port->sw == out->sw) { in tb_attach_bandwidth_group()
1664 struct tb_switch *parent = tunnel->dst_port->sw; in tb_discover_tunnels()
1666 while (parent != tunnel->src_port->sw) { in tb_discover_tunnels()
1675 pm_runtime_get_sync(&in->sw->dev); in tb_discover_tunnels()
1676 pm_runtime_get_sync(&out->sw->dev); in tb_discover_tunnels()
1705 tb_switch_dealloc_dp_resource(src_port->sw, src_port); in tb_deactivate_and_free_tunnel()
1712 pm_runtime_mark_last_busy(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1713 pm_runtime_put_autosuspend(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1714 pm_runtime_mark_last_busy(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1715 pm_runtime_put_autosuspend(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1751 static void tb_free_unplugged_children(struct tb_switch *sw) in tb_free_unplugged_children() argument
1755 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_children()
1759 if (port->remote->sw->is_unplugged) { in tb_free_unplugged_children()
1761 tb_remove_dp_resources(port->remote->sw); in tb_free_unplugged_children()
1762 tb_switch_unconfigure_link(port->remote->sw); in tb_free_unplugged_children()
1763 tb_switch_set_link_width(port->remote->sw, in tb_free_unplugged_children()
1765 tb_switch_remove(port->remote->sw); in tb_free_unplugged_children()
1770 tb_free_unplugged_children(port->remote->sw); in tb_free_unplugged_children()
1775 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, in tb_find_pcie_down() argument
1784 if (tb_switch_is_usb4(sw)) { in tb_find_pcie_down()
1785 down = usb4_switch_map_pcie_down(sw, port); in tb_find_pcie_down()
1786 } else if (!tb_route(sw)) { in tb_find_pcie_down()
1794 if (tb_switch_is_cactus_ridge(sw) || in tb_find_pcie_down()
1795 tb_switch_is_alpine_ridge(sw)) in tb_find_pcie_down()
1797 else if (tb_switch_is_falcon_ridge(sw)) in tb_find_pcie_down()
1799 else if (tb_switch_is_titan_ridge(sw)) in tb_find_pcie_down()
1805 if (WARN_ON(index > sw->config.max_port_number)) in tb_find_pcie_down()
1808 down = &sw->ports[index]; in tb_find_pcie_down()
1821 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); in tb_find_pcie_down()
1829 host_port = tb_route(in->sw) ? in tb_find_dp_out()
1830 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
1842 if (in->sw == port->sw) { in tb_find_dp_out()
1853 if (host_port && tb_route(port->sw)) { in tb_find_dp_out()
1856 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
1897 pm_runtime_get_sync(&in->sw->dev); in tb_tunnel_one_dp()
1898 pm_runtime_get_sync(&out->sw->dev); in tb_tunnel_one_dp()
1900 if (tb_switch_alloc_dp_resource(in->sw, in)) { in tb_tunnel_one_dp()
1968 tb_switch_dealloc_dp_resource(in->sw, in); in tb_tunnel_one_dp()
1970 pm_runtime_mark_last_busy(&out->sw->dev); in tb_tunnel_one_dp()
1971 pm_runtime_put_autosuspend(&out->sw->dev); in tb_tunnel_one_dp()
1972 pm_runtime_mark_last_busy(&in->sw->dev); in tb_tunnel_one_dp()
1973 pm_runtime_put_autosuspend(&in->sw->dev); in tb_tunnel_one_dp()
2021 struct tb_switch *sw = port->sw; in tb_enter_redrive() local
2023 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_enter_redrive()
2035 if (tb_route(sw)) in tb_enter_redrive()
2037 if (!tb_switch_query_dp_resource(sw, port)) { in tb_enter_redrive()
2039 pm_runtime_get(&sw->dev); in tb_enter_redrive()
2046 struct tb_switch *sw = port->sw; in tb_exit_redrive() local
2048 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_exit_redrive()
2053 if (tb_route(sw)) in tb_exit_redrive()
2055 if (port->redrive && tb_switch_query_dp_resource(sw, port)) { in tb_exit_redrive()
2057 pm_runtime_put(&sw->dev); in tb_exit_redrive()
2137 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) in tb_disconnect_pci() argument
2142 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_disconnect_pci()
2150 tb_switch_xhci_disconnect(sw); in tb_disconnect_pci()
2158 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
2164 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_tunnel_pci()
2172 port = tb_switch_downstream_port(sw); in tb_tunnel_pci()
2173 down = tb_find_pcie_down(tb_switch_parent(sw), port); in tb_tunnel_pci()
2192 if (tb_switch_pcie_l1_enable(sw)) in tb_tunnel_pci()
2193 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); in tb_tunnel_pci()
2195 if (tb_switch_xhci_connect(sw)) in tb_tunnel_pci()
2196 tb_sw_warn(sw, "failed to connect xHCI\n"); in tb_tunnel_pci()
2209 struct tb_switch *sw; in tb_approve_xdomain_paths() local
2212 sw = tb_to_switch(xd->dev.parent); in tb_approve_xdomain_paths()
2213 dst_port = tb_port_at(xd->route, sw); in tb_approve_xdomain_paths()
2222 tb_disable_clx(sw); in tb_approve_xdomain_paths()
2245 tb_enable_clx(sw); in tb_approve_xdomain_paths()
2258 struct tb_switch *sw; in __tb_disconnect_xdomain_paths() local
2260 sw = tb_to_switch(xd->dev.parent); in __tb_disconnect_xdomain_paths()
2261 dst_port = tb_port_at(xd->route, sw); in __tb_disconnect_xdomain_paths()
2280 tb_enable_clx(sw); in __tb_disconnect_xdomain_paths()
2309 struct tb_switch *sw; in tb_handle_hotplug() local
2319 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
2320 if (!sw) { in tb_handle_hotplug()
2326 if (ev->port > sw->config.max_port_number) { in tb_handle_hotplug()
2332 port = &sw->ports[ev->port]; in tb_handle_hotplug()
2339 pm_runtime_get_sync(&sw->dev); in tb_handle_hotplug()
2346 tb_sw_set_unplugged(port->remote->sw); in tb_handle_hotplug()
2348 tb_remove_dp_resources(port->remote->sw); in tb_handle_hotplug()
2349 tb_switch_tmu_disable(port->remote->sw); in tb_handle_hotplug()
2350 tb_switch_unconfigure_link(port->remote->sw); in tb_handle_hotplug()
2351 tb_switch_set_link_width(port->remote->sw, in tb_handle_hotplug()
2353 tb_switch_remove(port->remote->sw); in tb_handle_hotplug()
2380 tb_sw_dbg(sw, "xHCI disconnect request\n"); in tb_handle_hotplug()
2381 tb_switch_xhci_disconnect(sw); in tb_handle_hotplug()
2388 } else if (!port->port && sw->authorized) { in tb_handle_hotplug()
2389 tb_sw_dbg(sw, "xHCI connect request\n"); in tb_handle_hotplug()
2390 tb_switch_xhci_connect(sw); in tb_handle_hotplug()
2402 pm_runtime_mark_last_busy(&sw->dev); in tb_handle_hotplug()
2403 pm_runtime_put_autosuspend(&sw->dev); in tb_handle_hotplug()
2406 tb_switch_put(sw); in tb_handle_hotplug()
2616 struct tb_switch *sw; in tb_handle_dp_bandwidth_request() local
2625 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_dp_bandwidth_request()
2626 if (!sw) { in tb_handle_dp_bandwidth_request()
2632 in = &sw->ports[ev->port]; in tb_handle_dp_bandwidth_request()
2707 tb_switch_put(sw); in tb_handle_dp_bandwidth_request()
2823 struct tb_switch *sw = tb_to_switch(dev); in tb_scan_finalize_switch() local
2830 if (sw->boot) in tb_scan_finalize_switch()
2831 sw->authorized = 1; in tb_scan_finalize_switch()
2934 static void tb_restore_children(struct tb_switch *sw) in tb_restore_children() argument
2939 if (sw->is_unplugged) in tb_restore_children()
2942 if (tb_enable_clx(sw)) in tb_restore_children()
2943 tb_sw_warn(sw, "failed to re-enable CL states\n"); in tb_restore_children()
2945 if (tb_enable_tmu(sw)) in tb_restore_children()
2946 tb_sw_warn(sw, "failed to restore TMU configuration\n"); in tb_restore_children()
2948 tb_switch_configuration_valid(sw); in tb_restore_children()
2950 tb_switch_for_each_port(sw, port) { in tb_restore_children()
2955 tb_switch_set_link_width(port->remote->sw, in tb_restore_children()
2956 port->remote->sw->link_width); in tb_restore_children()
2957 tb_switch_configure_link(port->remote->sw); in tb_restore_children()
2959 tb_restore_children(port->remote->sw); in tb_restore_children()
3026 static int tb_free_unplugged_xdomains(struct tb_switch *sw) in tb_free_unplugged_xdomains() argument
3031 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_xdomains()
3041 ret += tb_free_unplugged_xdomains(port->remote->sw); in tb_free_unplugged_xdomains()