1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
22 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23 * direction. This is 40G - 10% guard band bandwidth.
25 #define TB_ASYM_MIN (40000 * 90 / 100)
28 * Threshold bandwidth (in Mb/s) that is used to switch the links to
29 * asymmetric and back. This is selected as 45G which means when the
30 * request is higher than this, we switch the link to asymmetric, and
31 * when it is less than this we switch it back. The 45G is selected so
32 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33 * switching back to symmetric.
35 #define TB_ASYM_THRESHOLD 45000
37 #define MAX_GROUPS 7 /* max Group_ID is 7 */
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
46 * struct tb_cm - Simple Thunderbolt connection manager
47 * @tunnel_list: List of active tunnels
48 * @dp_resources: List of available DP resources for DP tunneling
49 * @hotplug_active: tb_handle_hotplug will stop progressing plug
50 * events and exit if this is not set (it needs to
51 * acquire the lock one more time). Used to drain wq
52 * after cfg has been paused.
53 * @remove_work: Work used to remove any unplugged routers after
55 * @groups: Bandwidth groups used in this domain.
58 struct list_head tunnel_list;
59 struct list_head dp_resources;
61 struct delayed_work remove_work;
62 struct tb_bandwidth_group groups[MAX_GROUPS];
65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 return ((void *)tcm - sizeof(struct tb));
70 struct tb_hotplug_event {
71 struct work_struct work;
78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
82 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83 struct tb_bandwidth_group *group = &tcm->groups[i];
85 group->tb = tcm_to_tb(tcm);
87 INIT_LIST_HEAD(&group->ports);
91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
94 if (!group || WARN_ON(in->group))
98 list_add_tail(&in->group_list, &group->ports);
100 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
107 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108 struct tb_bandwidth_group *group = &tcm->groups[i];
110 if (list_empty(&group->ports))
117 static struct tb_bandwidth_group *
118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
121 struct tb_bandwidth_group *group;
122 struct tb_tunnel *tunnel;
125 * Find all DP tunnels that go through all the same USB4 links
126 * as this one. Because we always setup tunnels the same way we
127 * can just check for the routers at both ends of the tunnels
128 * and if they are the same we have a match.
130 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131 if (!tb_tunnel_is_dp(tunnel))
134 if (tunnel->src_port->sw == in->sw &&
135 tunnel->dst_port->sw == out->sw) {
136 group = tunnel->src_port->group;
138 tb_bandwidth_group_attach_port(group, in);
144 /* Pick up next available group then */
145 group = tb_find_free_bandwidth_group(tcm);
147 tb_bandwidth_group_attach_port(group, in);
149 tb_port_warn(in, "no available bandwidth groups\n");
154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
157 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
160 index = usb4_dp_port_group_id(in);
161 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162 if (tcm->groups[i].index == index) {
163 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
169 tb_attach_bandwidth_group(tcm, in, out);
172 static void tb_detach_bandwidth_group(struct tb_port *in)
174 struct tb_bandwidth_group *group = in->group;
178 list_del_init(&in->group_list);
180 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
184 static void tb_handle_hotplug(struct work_struct *work);
186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
188 struct tb_hotplug_event *ev;
190 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
198 INIT_WORK(&ev->work, tb_handle_hotplug);
199 queue_work(tb->wq, &ev->work);
202 /* enumeration & hot plug handling */
204 static void tb_add_dp_resources(struct tb_switch *sw)
206 struct tb_cm *tcm = tb_priv(sw->tb);
207 struct tb_port *port;
209 tb_switch_for_each_port(sw, port) {
210 if (!tb_port_is_dpin(port))
213 if (!tb_switch_query_dp_resource(sw, port))
217 * If DP IN on device router exist, position it at the
218 * beginning of the DP resources list, so that it is used
219 * before DP IN of the host router. This way external GPU(s)
220 * will be prioritized when pairing DP IN to a DP OUT.
223 list_add(&port->list, &tcm->dp_resources);
225 list_add_tail(&port->list, &tcm->dp_resources);
227 tb_port_dbg(port, "DP IN resource available\n");
231 static void tb_remove_dp_resources(struct tb_switch *sw)
233 struct tb_cm *tcm = tb_priv(sw->tb);
234 struct tb_port *port, *tmp;
236 /* Clear children resources first */
237 tb_switch_for_each_port(sw, port) {
238 if (tb_port_has_remote(port))
239 tb_remove_dp_resources(port->remote->sw);
242 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
243 if (port->sw == sw) {
244 tb_port_dbg(port, "DP OUT resource unavailable\n");
245 list_del_init(&port->list);
250 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
252 struct tb_cm *tcm = tb_priv(tb);
255 list_for_each_entry(p, &tcm->dp_resources, list) {
260 tb_port_dbg(port, "DP %s resource available discovered\n",
261 tb_port_is_dpin(port) ? "IN" : "OUT");
262 list_add_tail(&port->list, &tcm->dp_resources);
265 static void tb_discover_dp_resources(struct tb *tb)
267 struct tb_cm *tcm = tb_priv(tb);
268 struct tb_tunnel *tunnel;
270 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271 if (tb_tunnel_is_dp(tunnel))
272 tb_discover_dp_resource(tb, tunnel->dst_port);
276 /* Enables CL states up to host router */
277 static int tb_enable_clx(struct tb_switch *sw)
279 struct tb_cm *tcm = tb_priv(sw->tb);
280 unsigned int clx = TB_CL0S | TB_CL1;
281 const struct tb_tunnel *tunnel;
285 * Currently only enable CLx for the first link. This is enough
286 * to allow the CPU to save energy at least on Intel hardware
287 * and makes it slightly simpler to implement. We may change
288 * this in the future to cover the whole topology if it turns
289 * out to be beneficial.
291 while (sw && tb_switch_depth(sw) > 1)
292 sw = tb_switch_parent(sw);
297 if (tb_switch_depth(sw) != 1)
301 * If we are re-enabling then check if there is an active DMA
302 * tunnel and in that case bail out.
304 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
305 if (tb_tunnel_is_dma(tunnel)) {
306 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
312 * Initially try with CL2. If that's not supported by the
313 * topology try with CL0s and CL1 and then give up.
315 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
316 if (ret == -EOPNOTSUPP)
317 ret = tb_switch_clx_enable(sw, clx);
318 return ret == -EOPNOTSUPP ? 0 : ret;
322 * tb_disable_clx() - Disable CL states up to host router
323 * @sw: Router to start
325 * Disables CL states from @sw up to the host router. Returns true if
326 * any CL state were disabled. This can be used to figure out whether
327 * the link was setup by us or the boot firmware so we don't
328 * accidentally enable them if they were not enabled during discovery.
330 static bool tb_disable_clx(struct tb_switch *sw)
332 bool disabled = false;
337 ret = tb_switch_clx_disable(sw);
341 tb_sw_warn(sw, "failed to disable CL states\n");
343 sw = tb_switch_parent(sw);
349 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
351 struct tb_switch *sw;
353 sw = tb_to_switch(dev);
357 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
358 enum tb_switch_tmu_mode mode;
361 if (tb_switch_clx_is_enabled(sw, TB_CL1))
362 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
364 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
366 ret = tb_switch_tmu_configure(sw, mode);
370 return tb_switch_tmu_enable(sw);
376 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
378 struct tb_switch *sw;
384 * Once first DP tunnel is established we change the TMU
385 * accuracy of first depth child routers (and the host router)
386 * to the highest. This is needed for the DP tunneling to work
387 * but also allows CL0s.
389 * If both routers are v2 then we don't need to do anything as
390 * they are using enhanced TMU mode that allows all CLx.
392 sw = tunnel->tb->root_switch;
393 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
396 static int tb_enable_tmu(struct tb_switch *sw)
401 * If both routers at the end of the link are v2 we simply
402 * enable the enhanched uni-directional mode. That covers all
403 * the CL states. For v1 and before we need to use the normal
404 * rate to allow CL1 (when supported). Otherwise we keep the TMU
405 * running at the highest accuracy.
407 ret = tb_switch_tmu_configure(sw,
408 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
409 if (ret == -EOPNOTSUPP) {
410 if (tb_switch_clx_is_enabled(sw, TB_CL1))
411 ret = tb_switch_tmu_configure(sw,
412 TB_SWITCH_TMU_MODE_LOWRES);
414 ret = tb_switch_tmu_configure(sw,
415 TB_SWITCH_TMU_MODE_HIFI_BI);
420 /* If it is already enabled in correct mode, don't touch it */
421 if (tb_switch_tmu_is_enabled(sw))
424 ret = tb_switch_tmu_disable(sw);
428 ret = tb_switch_tmu_post_time(sw);
432 return tb_switch_tmu_enable(sw);
435 static void tb_switch_discover_tunnels(struct tb_switch *sw,
436 struct list_head *list,
439 struct tb *tb = sw->tb;
440 struct tb_port *port;
442 tb_switch_for_each_port(sw, port) {
443 struct tb_tunnel *tunnel = NULL;
445 switch (port->config.type) {
446 case TB_TYPE_DP_HDMI_IN:
447 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
448 tb_increase_tmu_accuracy(tunnel);
451 case TB_TYPE_PCIE_DOWN:
452 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
455 case TB_TYPE_USB3_DOWN:
456 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
464 list_add_tail(&tunnel->list, list);
467 tb_switch_for_each_port(sw, port) {
468 if (tb_port_has_remote(port)) {
469 tb_switch_discover_tunnels(port->remote->sw, list,
475 static void tb_discover_tunnels(struct tb *tb)
477 struct tb_cm *tcm = tb_priv(tb);
478 struct tb_tunnel *tunnel;
480 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
482 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
483 if (tb_tunnel_is_pci(tunnel)) {
484 struct tb_switch *parent = tunnel->dst_port->sw;
486 while (parent != tunnel->src_port->sw) {
488 parent = tb_switch_parent(parent);
490 } else if (tb_tunnel_is_dp(tunnel)) {
491 struct tb_port *in = tunnel->src_port;
492 struct tb_port *out = tunnel->dst_port;
494 /* Keep the domain from powering down */
495 pm_runtime_get_sync(&in->sw->dev);
496 pm_runtime_get_sync(&out->sw->dev);
498 tb_discover_bandwidth_group(tcm, in, out);
503 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
505 if (tb_switch_is_usb4(port->sw))
506 return usb4_port_configure_xdomain(port, xd);
507 return tb_lc_configure_xdomain(port);
510 static void tb_port_unconfigure_xdomain(struct tb_port *port)
512 if (tb_switch_is_usb4(port->sw))
513 usb4_port_unconfigure_xdomain(port);
515 tb_lc_unconfigure_xdomain(port);
518 static void tb_scan_xdomain(struct tb_port *port)
520 struct tb_switch *sw = port->sw;
521 struct tb *tb = sw->tb;
522 struct tb_xdomain *xd;
525 if (!tb_is_xdomain_enabled())
528 route = tb_downstream_route(port);
529 xd = tb_xdomain_find_by_route(tb, route);
535 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
538 tb_port_at(route, sw)->xdomain = xd;
539 tb_port_configure_xdomain(port, xd);
545 * tb_find_unused_port() - return the first inactive port on @sw
546 * @sw: Switch to find the port on
547 * @type: Port type to look for
549 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
550 enum tb_port_type type)
552 struct tb_port *port;
554 tb_switch_for_each_port(sw, port) {
555 if (tb_is_upstream_port(port))
557 if (port->config.type != type)
561 if (tb_port_is_enabled(port))
568 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
569 const struct tb_port *port)
571 struct tb_port *down;
573 down = usb4_switch_map_usb3_down(sw, port);
574 if (down && !tb_usb3_port_is_enabled(down))
579 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
580 struct tb_port *src_port,
581 struct tb_port *dst_port)
583 struct tb_cm *tcm = tb_priv(tb);
584 struct tb_tunnel *tunnel;
586 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
587 if (tunnel->type == type &&
588 ((src_port && src_port == tunnel->src_port) ||
589 (dst_port && dst_port == tunnel->dst_port))) {
597 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
598 struct tb_port *src_port,
599 struct tb_port *dst_port)
601 struct tb_port *port, *usb3_down;
602 struct tb_switch *sw;
604 /* Pick the router that is deepest in the topology */
605 if (tb_port_path_direction_downstream(src_port, dst_port))
610 /* Can't be the host router */
611 if (sw == tb->root_switch)
614 /* Find the downstream USB4 port that leads to this router */
615 port = tb_port_at(tb_route(sw), tb->root_switch);
616 /* Find the corresponding host router USB3 downstream port */
617 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
621 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
625 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
626 * @tb: Domain structure
627 * @src_port: Source protocol adapter
628 * @dst_port: Destination protocol adapter
629 * @port: USB4 port the consumed bandwidth is calculated
630 * @consumed_up: Consumed upsream bandwidth (Mb/s)
631 * @consumed_down: Consumed downstream bandwidth (Mb/s)
633 * Calculates consumed USB3 and PCIe bandwidth at @port between path
634 * from @src_port to @dst_port. Does not take tunnel starting from
635 * @src_port and ending from @src_port into account.
637 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
638 struct tb_port *src_port,
639 struct tb_port *dst_port,
640 struct tb_port *port,
644 int pci_consumed_up, pci_consumed_down;
645 struct tb_tunnel *tunnel;
647 *consumed_up = *consumed_down = 0;
649 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
650 if (tunnel && tunnel->src_port != src_port &&
651 tunnel->dst_port != dst_port) {
654 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
661 * If there is anything reserved for PCIe bulk traffic take it
662 * into account here too.
664 if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
665 *consumed_up += pci_consumed_up;
666 *consumed_down += pci_consumed_down;
673 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
674 * @tb: Domain structure
675 * @src_port: Source protocol adapter
676 * @dst_port: Destination protocol adapter
677 * @port: USB4 port the consumed bandwidth is calculated
678 * @consumed_up: Consumed upsream bandwidth (Mb/s)
679 * @consumed_down: Consumed downstream bandwidth (Mb/s)
681 * Calculates consumed DP bandwidth at @port between path from @src_port
682 * to @dst_port. Does not take tunnel starting from @src_port and ending
683 * from @src_port into account.
685 static int tb_consumed_dp_bandwidth(struct tb *tb,
686 struct tb_port *src_port,
687 struct tb_port *dst_port,
688 struct tb_port *port,
692 struct tb_cm *tcm = tb_priv(tb);
693 struct tb_tunnel *tunnel;
696 *consumed_up = *consumed_down = 0;
699 * Find all DP tunnels that cross the port and reduce
700 * their consumed bandwidth from the available.
702 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
703 int dp_consumed_up, dp_consumed_down;
705 if (tb_tunnel_is_invalid(tunnel))
708 if (!tb_tunnel_is_dp(tunnel))
711 if (!tb_tunnel_port_on_path(tunnel, port))
715 * Ignore the DP tunnel between src_port and dst_port
716 * because it is the same tunnel and we may be
717 * re-calculating estimated bandwidth.
719 if (tunnel->src_port == src_port &&
720 tunnel->dst_port == dst_port)
723 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
728 *consumed_up += dp_consumed_up;
729 *consumed_down += dp_consumed_down;
735 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
736 struct tb_port *port)
738 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
739 enum tb_link_width width;
741 if (tb_is_upstream_port(port))
742 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
744 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
746 return tb_port_width_supported(port, width);
750 * tb_maximum_bandwidth() - Maximum bandwidth over a single link
751 * @tb: Domain structure
752 * @src_port: Source protocol adapter
753 * @dst_port: Destination protocol adapter
754 * @port: USB4 port the total bandwidth is calculated
755 * @max_up: Maximum upstream bandwidth (Mb/s)
756 * @max_down: Maximum downstream bandwidth (Mb/s)
757 * @include_asym: Include bandwidth if the link is switched from
758 * symmetric to asymmetric
760 * Returns maximum possible bandwidth in @max_up and @max_down over a
761 * single link at @port. If @include_asym is set then includes the
762 * additional banwdith if the links are transitioned into asymmetric to
763 * direction from @src_port to @dst_port.
765 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
766 struct tb_port *dst_port, struct tb_port *port,
767 int *max_up, int *max_down, bool include_asym)
769 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
770 int link_speed, link_width, up_bw, down_bw;
773 * Can include asymmetric, only if it is actually supported by
776 if (!tb_asym_supported(src_port, dst_port, port))
777 include_asym = false;
779 if (tb_is_upstream_port(port)) {
780 link_speed = port->sw->link_speed;
782 * sw->link_width is from upstream perspective so we use
783 * the opposite for downstream of the host router.
785 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
786 up_bw = link_speed * 3 * 1000;
787 down_bw = link_speed * 1 * 1000;
788 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
789 up_bw = link_speed * 1 * 1000;
790 down_bw = link_speed * 3 * 1000;
791 } else if (include_asym) {
793 * The link is symmetric at the moment but we
794 * can switch it to asymmetric as needed. Report
795 * this bandwidth as available (even though it
796 * is not yet enabled).
799 up_bw = link_speed * 1 * 1000;
800 down_bw = link_speed * 3 * 1000;
802 up_bw = link_speed * 3 * 1000;
803 down_bw = link_speed * 1 * 1000;
806 up_bw = link_speed * port->sw->link_width * 1000;
810 link_speed = tb_port_get_link_speed(port);
814 link_width = tb_port_get_link_width(port);
818 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
819 up_bw = link_speed * 1 * 1000;
820 down_bw = link_speed * 3 * 1000;
821 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
822 up_bw = link_speed * 3 * 1000;
823 down_bw = link_speed * 1 * 1000;
824 } else if (include_asym) {
826 * The link is symmetric at the moment but we
827 * can switch it to asymmetric as needed. Report
828 * this bandwidth as available (even though it
829 * is not yet enabled).
832 up_bw = link_speed * 1 * 1000;
833 down_bw = link_speed * 3 * 1000;
835 up_bw = link_speed * 3 * 1000;
836 down_bw = link_speed * 1 * 1000;
839 up_bw = link_speed * link_width * 1000;
844 /* Leave 10% guard band */
845 *max_up = up_bw - up_bw / 10;
846 *max_down = down_bw - down_bw / 10;
848 tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
853 * tb_available_bandwidth() - Available bandwidth for tunneling
854 * @tb: Domain structure
855 * @src_port: Source protocol adapter
856 * @dst_port: Destination protocol adapter
857 * @available_up: Available bandwidth upstream (Mb/s)
858 * @available_down: Available bandwidth downstream (Mb/s)
859 * @include_asym: Include bandwidth if the link is switched from
860 * symmetric to asymmetric
862 * Calculates maximum available bandwidth for protocol tunneling between
863 * @src_port and @dst_port at the moment. This is minimum of maximum
864 * link bandwidth across all links reduced by currently consumed
865 * bandwidth on that link.
867 * If @include_asym is true then includes also bandwidth that can be
868 * added when the links are transitioned into asymmetric (but does not
869 * transition the links).
871 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
872 struct tb_port *dst_port, int *available_up,
873 int *available_down, bool include_asym)
875 struct tb_port *port;
878 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
879 *available_up = *available_down = 120000;
881 /* Find the minimum available bandwidth over all links */
882 tb_for_each_port_on_path(src_port, dst_port, port) {
883 int max_up, max_down, consumed_up, consumed_down;
885 if (!tb_port_is_null(port))
888 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
889 &max_up, &max_down, include_asym);
893 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
898 max_up -= consumed_up;
899 max_down -= consumed_down;
901 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
902 &consumed_up, &consumed_down);
905 max_up -= consumed_up;
906 max_down -= consumed_down;
908 if (max_up < *available_up)
909 *available_up = max_up;
910 if (max_down < *available_down)
911 *available_down = max_down;
914 if (*available_up < 0)
916 if (*available_down < 0)
922 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
923 struct tb_port *src_port,
924 struct tb_port *dst_port)
926 struct tb_tunnel *tunnel;
928 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
929 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
932 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
933 struct tb_port *dst_port)
935 int ret, available_up, available_down;
936 struct tb_tunnel *tunnel;
938 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
942 tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
945 * Calculate available bandwidth for the first hop USB3 tunnel.
946 * That determines the whole USB3 bandwidth for this branch.
948 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
949 &available_up, &available_down, false);
951 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
955 tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
958 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
961 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
963 struct tb_switch *parent = tb_switch_parent(sw);
964 int ret, available_up, available_down;
965 struct tb_port *up, *down, *port;
966 struct tb_cm *tcm = tb_priv(tb);
967 struct tb_tunnel *tunnel;
969 if (!tb_acpi_may_tunnel_usb3()) {
970 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
974 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
982 * Look up available down port. Since we are chaining it should
983 * be found right above this switch.
985 port = tb_switch_downstream_port(sw);
986 down = tb_find_usb3_down(parent, port);
990 if (tb_route(parent)) {
991 struct tb_port *parent_up;
993 * Check first that the parent switch has its upstream USB3
994 * port enabled. Otherwise the chain is not complete and
995 * there is no point setting up a new tunnel.
997 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
998 if (!parent_up || !tb_port_is_enabled(parent_up))
1001 /* Make all unused bandwidth available for the new tunnel */
1002 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
1007 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1012 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1013 available_up, available_down);
1015 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1022 if (tb_tunnel_activate(tunnel)) {
1024 "USB3 tunnel activation failed, aborting\n");
1029 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1030 if (tb_route(parent))
1031 tb_reclaim_usb3_bandwidth(tb, down, up);
1036 tb_tunnel_free(tunnel);
1038 if (tb_route(parent))
1039 tb_reclaim_usb3_bandwidth(tb, down, up);
1044 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1046 struct tb_port *port;
1049 if (!tb_acpi_may_tunnel_usb3())
1053 ret = tb_tunnel_usb3(sw->tb, sw);
1058 tb_switch_for_each_port(sw, port) {
1059 if (!tb_port_has_remote(port))
1061 ret = tb_create_usb3_tunnels(port->remote->sw);
1070 * tb_configure_asym() - Transition links to asymmetric if needed
1071 * @tb: Domain structure
1072 * @src_port: Source adapter to start the transition
1073 * @dst_port: Destination adapter
1074 * @requested_up: Additional bandwidth (Mb/s) required upstream
1075 * @requested_down: Additional bandwidth (Mb/s) required downstream
1077 * Transition links between @src_port and @dst_port into asymmetric, with
1078 * three lanes in the direction from @src_port towards @dst_port and one lane
1079 * in the opposite direction, if the bandwidth requirements
1080 * (requested + currently consumed) on that link exceed @asym_threshold.
1082 * Must be called with available >= requested over all links.
1084 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1085 struct tb_port *dst_port, int requested_up,
1088 bool clx = false, clx_disabled = false, downstream;
1089 struct tb_switch *sw;
1093 if (!asym_threshold)
1096 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1097 /* Pick up router deepest in the hierarchy */
1103 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1104 struct tb_port *down = tb_switch_downstream_port(up->sw);
1105 enum tb_link_width width_up, width_down;
1106 int consumed_up, consumed_down;
1108 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1109 &consumed_up, &consumed_down);
1115 * Downstream so make sure upstream is within the 36G
1116 * (40G - guard band 10%), and the requested is above
1117 * what the threshold is.
1119 if (consumed_up + requested_up >= TB_ASYM_MIN) {
1123 /* Does consumed + requested exceed the threshold */
1124 if (consumed_down + requested_down < asym_threshold)
1127 width_up = TB_LINK_WIDTH_ASYM_RX;
1128 width_down = TB_LINK_WIDTH_ASYM_TX;
1130 /* Upstream, the opposite of above */
1131 if (consumed_down + requested_down >= TB_ASYM_MIN) {
1135 if (consumed_up + requested_up < asym_threshold)
1138 width_up = TB_LINK_WIDTH_ASYM_TX;
1139 width_down = TB_LINK_WIDTH_ASYM_RX;
1142 if (up->sw->link_width == width_up)
1145 if (!tb_port_width_supported(up, width_up) ||
1146 !tb_port_width_supported(down, width_down))
1150 * Disable CL states before doing any transitions. We
1151 * delayed it until now that we know there is a real
1152 * transition taking place.
1154 if (!clx_disabled) {
1155 clx = tb_disable_clx(sw);
1156 clx_disabled = true;
1159 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1162 * Here requested + consumed > threshold so we need to
1163 * transtion the link into asymmetric now.
1165 ret = tb_switch_set_link_width(up->sw, width_up);
1167 tb_sw_warn(up->sw, "failed to set link width\n");
1172 /* Re-enable CL states if they were previosly enabled */
1180 * tb_configure_sym() - Transition links to symmetric if possible
1181 * @tb: Domain structure
1182 * @src_port: Source adapter to start the transition
1183 * @dst_port: Destination adapter
1184 * @requested_up: New lower bandwidth request upstream (Mb/s)
1185 * @requested_down: New lower bandwidth request downstream (Mb/s)
1186 * @keep_asym: Keep asymmetric link if preferred
1188 * Goes over each link from @src_port to @dst_port and tries to
1189 * transition the link to symmetric if the currently consumed bandwidth
1190 * allows and link asymmetric preference is ignored (if @keep_asym is %false).
1192 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1193 struct tb_port *dst_port, int requested_up,
1194 int requested_down, bool keep_asym)
1196 bool clx = false, clx_disabled = false, downstream;
1197 struct tb_switch *sw;
1201 if (!asym_threshold)
1204 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1205 /* Pick up router deepest in the hierarchy */
1211 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1212 int consumed_up, consumed_down;
1214 /* Already symmetric */
1215 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1217 /* Unplugged, no need to switch */
1218 if (up->sw->is_unplugged)
1221 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1222 &consumed_up, &consumed_down);
1228 * Downstream so we want the consumed_down < threshold.
1229 * Upstream traffic should be less than 36G (40G
1230 * guard band 10%) as the link was configured asymmetric
1233 if (consumed_down + requested_down >= asym_threshold)
1236 if (consumed_up + requested_up >= asym_threshold)
1240 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1244 * Here consumed < threshold so we can transition the
1245 * link to symmetric.
1247 * However, if the router prefers asymmetric link we
1248 * honor that (unless @keep_asym is %false).
1251 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
1252 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
1256 /* Disable CL states before doing any transitions */
1257 if (!clx_disabled) {
1258 clx = tb_disable_clx(sw);
1259 clx_disabled = true;
1262 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1264 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1266 tb_sw_warn(up->sw, "failed to set link width\n");
1271 /* Re-enable CL states if they were previosly enabled */
1278 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1279 struct tb_switch *sw)
1281 struct tb *tb = sw->tb;
1283 /* Link the routers using both links if available */
1286 if (down->dual_link_port && up->dual_link_port) {
1287 down->dual_link_port->remote = up->dual_link_port;
1288 up->dual_link_port->remote = down->dual_link_port;
1292 * Enable lane bonding if the link is currently two single lane
1295 if (sw->link_width < TB_LINK_WIDTH_DUAL)
1296 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1299 * Device router that comes up as symmetric link is
1300 * connected deeper in the hierarchy, we transition the links
1301 * above into symmetric if bandwidth allows.
1303 if (tb_switch_depth(sw) > 1 &&
1304 tb_port_get_link_generation(up) >= 4 &&
1305 up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1306 struct tb_port *host_port;
1308 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1309 tb_configure_sym(tb, host_port, up, 0, 0, false);
1312 /* Set the link configured */
1313 tb_switch_configure_link(sw);
1316 static void tb_scan_port(struct tb_port *port);
1319 * tb_scan_switch() - scan for and initialize downstream switches
1321 static void tb_scan_switch(struct tb_switch *sw)
1323 struct tb_port *port;
1325 pm_runtime_get_sync(&sw->dev);
1327 tb_switch_for_each_port(sw, port)
1330 pm_runtime_mark_last_busy(&sw->dev);
1331 pm_runtime_put_autosuspend(&sw->dev);
1335 * tb_scan_port() - check for and initialize switches below port
1337 static void tb_scan_port(struct tb_port *port)
1339 struct tb_cm *tcm = tb_priv(port->sw->tb);
1340 struct tb_port *upstream_port;
1341 bool discovery = false;
1342 struct tb_switch *sw;
1344 if (tb_is_upstream_port(port))
1347 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1348 !tb_dp_port_is_enabled(port)) {
1349 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1350 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1355 if (port->config.type != TB_TYPE_PORT)
1357 if (port->dual_link_port && port->link_nr)
1359 * Downstream switch is reachable through two ports.
1360 * Only scan on the primary port (link_nr == 0).
1364 pm_runtime_get_sync(&port->usb4->dev);
1366 if (tb_wait_for_port(port, false) <= 0)
1369 tb_port_dbg(port, "port already has a remote\n");
1373 tb_retimer_scan(port, true);
1375 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1376 tb_downstream_route(port));
1379 * If there is an error accessing the connected switch
1380 * it may be connected to another domain. Also we allow
1381 * the other domain to be connected to a max depth switch.
1383 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1384 tb_scan_xdomain(port);
1388 if (tb_switch_configure(sw)) {
1394 * If there was previously another domain connected remove it
1397 if (port->xdomain) {
1398 tb_xdomain_remove(port->xdomain);
1399 tb_port_unconfigure_xdomain(port);
1400 port->xdomain = NULL;
1404 * Do not send uevents until we have discovered all existing
1405 * tunnels and know which switches were authorized already by
1406 * the boot firmware.
1408 if (!tcm->hotplug_active) {
1409 dev_set_uevent_suppress(&sw->dev, true);
1414 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1415 * can support runtime PM.
1417 sw->rpm = sw->generation > 1;
1419 if (tb_switch_add(sw)) {
1424 upstream_port = tb_upstream_port(sw);
1425 tb_configure_link(port, upstream_port, sw);
1428 * CL0s and CL1 are enabled and supported together.
1429 * Silently ignore CLx enabling in case CLx is not supported.
1432 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1433 else if (tb_enable_clx(sw))
1434 tb_sw_warn(sw, "failed to enable CL states\n");
1436 if (tb_enable_tmu(sw))
1437 tb_sw_warn(sw, "failed to enable TMU\n");
1440 * Configuration valid needs to be set after the TMU has been
1441 * enabled for the upstream port of the router so we do it here.
1443 tb_switch_configuration_valid(sw);
1445 /* Scan upstream retimers */
1446 tb_retimer_scan(upstream_port, true);
1449 * Create USB 3.x tunnels only when the switch is plugged to the
1450 * domain. This is because we scan the domain also during discovery
1451 * and want to discover existing USB 3.x tunnels before we create
1454 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1455 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1457 tb_add_dp_resources(sw);
1462 pm_runtime_mark_last_busy(&port->usb4->dev);
1463 pm_runtime_put_autosuspend(&port->usb4->dev);
1467 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1469 struct tb_port *src_port, *dst_port;
1475 tb_tunnel_deactivate(tunnel);
1476 list_del(&tunnel->list);
1479 src_port = tunnel->src_port;
1480 dst_port = tunnel->dst_port;
1482 switch (tunnel->type) {
1484 tb_detach_bandwidth_group(src_port);
1486 * In case of DP tunnel make sure the DP IN resource is
1487 * deallocated properly.
1489 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1491 * If bandwidth on a link is < asym_threshold
1492 * transition the link to symmetric.
1494 tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
1495 /* Now we can allow the domain to runtime suspend again */
1496 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1497 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1498 pm_runtime_mark_last_busy(&src_port->sw->dev);
1499 pm_runtime_put_autosuspend(&src_port->sw->dev);
1502 case TB_TUNNEL_USB3:
1503 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1508 * PCIe and DMA tunnels do not consume guaranteed
1514 tb_tunnel_free(tunnel);
1518 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1520 static void tb_free_invalid_tunnels(struct tb *tb)
1522 struct tb_cm *tcm = tb_priv(tb);
1523 struct tb_tunnel *tunnel;
1524 struct tb_tunnel *n;
1526 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1527 if (tb_tunnel_is_invalid(tunnel))
1528 tb_deactivate_and_free_tunnel(tunnel);
1533 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1535 static void tb_free_unplugged_children(struct tb_switch *sw)
1537 struct tb_port *port;
1539 tb_switch_for_each_port(sw, port) {
1540 if (!tb_port_has_remote(port))
1543 if (port->remote->sw->is_unplugged) {
1544 tb_retimer_remove_all(port);
1545 tb_remove_dp_resources(port->remote->sw);
1546 tb_switch_unconfigure_link(port->remote->sw);
1547 tb_switch_set_link_width(port->remote->sw,
1548 TB_LINK_WIDTH_SINGLE);
1549 tb_switch_remove(port->remote->sw);
1550 port->remote = NULL;
1551 if (port->dual_link_port)
1552 port->dual_link_port->remote = NULL;
1554 tb_free_unplugged_children(port->remote->sw);
1559 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1560 const struct tb_port *port)
1562 struct tb_port *down = NULL;
1565 * To keep plugging devices consistently in the same PCIe
1566 * hierarchy, do mapping here for switch downstream PCIe ports.
1568 if (tb_switch_is_usb4(sw)) {
1569 down = usb4_switch_map_pcie_down(sw, port);
1570 } else if (!tb_route(sw)) {
1571 int phy_port = tb_phy_port_from_link(port->port);
1575 * Hard-coded Thunderbolt port to PCIe down port mapping
1578 if (tb_switch_is_cactus_ridge(sw) ||
1579 tb_switch_is_alpine_ridge(sw))
1580 index = !phy_port ? 6 : 7;
1581 else if (tb_switch_is_falcon_ridge(sw))
1582 index = !phy_port ? 6 : 8;
1583 else if (tb_switch_is_titan_ridge(sw))
1584 index = !phy_port ? 8 : 9;
1588 /* Validate the hard-coding */
1589 if (WARN_ON(index > sw->config.max_port_number))
1592 down = &sw->ports[index];
1596 if (WARN_ON(!tb_port_is_pcie_down(down)))
1598 if (tb_pci_port_is_enabled(down))
1605 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1609 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1611 struct tb_tunnel *first_tunnel;
1612 struct tb *tb = group->tb;
1616 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1619 first_tunnel = NULL;
1620 list_for_each_entry(in, &group->ports, group_list) {
1621 int estimated_bw, estimated_up, estimated_down;
1622 struct tb_tunnel *tunnel;
1623 struct tb_port *out;
1625 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1628 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1629 if (WARN_ON(!tunnel))
1632 if (!first_tunnel) {
1634 * Since USB3 bandwidth is shared by all DP
1635 * tunnels under the host router USB4 port, even
1636 * if they do not begin from the host router, we
1637 * can release USB3 bandwidth just once and not
1638 * for each tunnel separately.
1640 first_tunnel = tunnel;
1641 ret = tb_release_unused_usb3_bandwidth(tb,
1642 first_tunnel->src_port, first_tunnel->dst_port);
1644 tb_tunnel_warn(tunnel,
1645 "failed to release unused bandwidth\n");
1650 out = tunnel->dst_port;
1651 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1652 &estimated_down, true);
1654 tb_tunnel_warn(tunnel,
1655 "failed to re-calculate estimated bandwidth\n");
1660 * Estimated bandwidth includes:
1661 * - already allocated bandwidth for the DP tunnel
1662 * - available bandwidth along the path
1663 * - bandwidth allocated for USB 3.x but not used.
1665 tb_tunnel_dbg(tunnel,
1666 "re-calculated estimated bandwidth %u/%u Mb/s\n",
1667 estimated_up, estimated_down);
1669 if (tb_port_path_direction_downstream(in, out))
1670 estimated_bw = estimated_down;
1672 estimated_bw = estimated_up;
1674 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1675 tb_tunnel_warn(tunnel,
1676 "failed to update estimated bandwidth\n");
1680 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1681 first_tunnel->dst_port);
1683 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1686 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1688 struct tb_cm *tcm = tb_priv(tb);
1691 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1693 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1694 struct tb_bandwidth_group *group = &tcm->groups[i];
1696 if (!list_empty(&group->ports))
1697 tb_recalc_estimated_bandwidth_for_group(group);
1700 tb_dbg(tb, "bandwidth re-calculation done\n");
1703 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1705 struct tb_port *host_port, *port;
1706 struct tb_cm *tcm = tb_priv(tb);
1708 host_port = tb_route(in->sw) ?
1709 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1711 list_for_each_entry(port, &tcm->dp_resources, list) {
1712 if (!tb_port_is_dpout(port))
1715 if (tb_port_is_enabled(port)) {
1716 tb_port_dbg(port, "DP OUT in use\n");
1720 tb_port_dbg(port, "DP OUT available\n");
1723 * Keep the DP tunnel under the topology starting from
1724 * the same host router downstream port.
1726 if (host_port && tb_route(port->sw)) {
1729 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1740 static bool tb_tunnel_one_dp(struct tb *tb)
1742 int available_up, available_down, ret, link_nr;
1743 struct tb_cm *tcm = tb_priv(tb);
1744 struct tb_port *port, *in, *out;
1745 int consumed_up, consumed_down;
1746 struct tb_tunnel *tunnel;
1749 * Find pair of inactive DP IN and DP OUT adapters and then
1750 * establish a DP tunnel between them.
1752 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1756 list_for_each_entry(port, &tcm->dp_resources, list) {
1757 if (!tb_port_is_dpin(port))
1760 if (tb_port_is_enabled(port)) {
1761 tb_port_dbg(port, "DP IN in use\n");
1766 tb_port_dbg(in, "DP IN available\n");
1768 out = tb_find_dp_out(tb, port);
1774 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1778 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1783 * This is only applicable to links that are not bonded (so
1784 * when Thunderbolt 1 hardware is involved somewhere in the
1785 * topology). For these try to share the DP bandwidth between
1789 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1790 if (tb_tunnel_is_dp(tunnel)) {
1797 * DP stream needs the domain to be active so runtime resume
1798 * both ends of the tunnel.
1800 * This should bring the routers in the middle active as well
1801 * and keeps the domain from runtime suspending while the DP
1804 pm_runtime_get_sync(&in->sw->dev);
1805 pm_runtime_get_sync(&out->sw->dev);
1807 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1808 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1812 if (!tb_attach_bandwidth_group(tcm, in, out))
1813 goto err_dealloc_dp;
1815 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1816 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1818 tb_warn(tb, "failed to release unused bandwidth\n");
1819 goto err_detach_group;
1822 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1825 goto err_reclaim_usb;
1827 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1828 available_up, available_down);
1830 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1833 tb_port_dbg(out, "could not allocate DP tunnel\n");
1834 goto err_reclaim_usb;
1837 if (tb_tunnel_activate(tunnel)) {
1838 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1842 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1843 tb_reclaim_usb3_bandwidth(tb, in, out);
1846 * Transition the links to asymmetric if the consumption exceeds
1849 if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
1850 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1852 /* Update the domain with the new bandwidth estimation */
1853 tb_recalc_estimated_bandwidth(tb);
1856 * In case of DP tunnel exists, change host router's 1st children
1857 * TMU mode to HiFi for CL0s to work.
1859 tb_increase_tmu_accuracy(tunnel);
1863 tb_tunnel_free(tunnel);
1865 tb_reclaim_usb3_bandwidth(tb, in, out);
1867 tb_detach_bandwidth_group(in);
1869 tb_switch_dealloc_dp_resource(in->sw, in);
1871 pm_runtime_mark_last_busy(&out->sw->dev);
1872 pm_runtime_put_autosuspend(&out->sw->dev);
1873 pm_runtime_mark_last_busy(&in->sw->dev);
1874 pm_runtime_put_autosuspend(&in->sw->dev);
1879 static void tb_tunnel_dp(struct tb *tb)
1881 if (!tb_acpi_may_tunnel_dp()) {
1882 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1886 while (tb_tunnel_one_dp(tb))
1890 static void tb_enter_redrive(struct tb_port *port)
1892 struct tb_switch *sw = port->sw;
1894 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1898 * If we get hot-unplug for the DP IN port of the host router
1899 * and the DP resource is not available anymore it means there
1900 * is a monitor connected directly to the Type-C port and we are
1901 * in "redrive" mode. For this to work we cannot enter RTD3 so
1902 * we bump up the runtime PM reference count here.
1904 if (!tb_port_is_dpin(port))
1908 if (!tb_switch_query_dp_resource(sw, port)) {
1909 port->redrive = true;
1910 pm_runtime_get(&sw->dev);
1911 tb_port_dbg(port, "enter redrive mode, keeping powered\n");
1915 static void tb_exit_redrive(struct tb_port *port)
1917 struct tb_switch *sw = port->sw;
1919 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
1922 if (!tb_port_is_dpin(port))
1926 if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
1927 port->redrive = false;
1928 pm_runtime_put(&sw->dev);
1929 tb_port_dbg(port, "exit redrive mode\n");
1933 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1935 struct tb_port *in, *out;
1936 struct tb_tunnel *tunnel;
1938 if (tb_port_is_dpin(port)) {
1939 tb_port_dbg(port, "DP IN resource unavailable\n");
1943 tb_port_dbg(port, "DP OUT resource unavailable\n");
1948 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1950 tb_deactivate_and_free_tunnel(tunnel);
1952 tb_enter_redrive(port);
1953 list_del_init(&port->list);
1956 * See if there is another DP OUT port that can be used for
1957 * to create another tunnel.
1959 tb_recalc_estimated_bandwidth(tb);
1963 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1965 struct tb_cm *tcm = tb_priv(tb);
1968 if (tb_port_is_enabled(port))
1971 list_for_each_entry(p, &tcm->dp_resources, list) {
1976 tb_port_dbg(port, "DP %s resource available after hotplug\n",
1977 tb_port_is_dpin(port) ? "IN" : "OUT");
1978 list_add_tail(&port->list, &tcm->dp_resources);
1979 tb_exit_redrive(port);
1981 /* Look for suitable DP IN <-> DP OUT pairs now */
1985 static void tb_disconnect_and_release_dp(struct tb *tb)
1987 struct tb_cm *tcm = tb_priv(tb);
1988 struct tb_tunnel *tunnel, *n;
1991 * Tear down all DP tunnels and release their resources. They
1992 * will be re-established after resume based on plug events.
1994 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1995 if (tb_tunnel_is_dp(tunnel))
1996 tb_deactivate_and_free_tunnel(tunnel);
1999 while (!list_empty(&tcm->dp_resources)) {
2000 struct tb_port *port;
2002 port = list_first_entry(&tcm->dp_resources,
2003 struct tb_port, list);
2004 list_del_init(&port->list);
2008 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
2010 struct tb_tunnel *tunnel;
2013 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2017 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
2018 if (WARN_ON(!tunnel))
2021 tb_switch_xhci_disconnect(sw);
2023 tb_tunnel_deactivate(tunnel);
2024 list_del(&tunnel->list);
2025 tb_tunnel_free(tunnel);
2029 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
2031 struct tb_port *up, *down, *port;
2032 struct tb_cm *tcm = tb_priv(tb);
2033 struct tb_tunnel *tunnel;
2035 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
2040 * Look up available down port. Since we are chaining it should
2041 * be found right above this switch.
2043 port = tb_switch_downstream_port(sw);
2044 down = tb_find_pcie_down(tb_switch_parent(sw), port);
2048 tunnel = tb_tunnel_alloc_pci(tb, up, down);
2052 if (tb_tunnel_activate(tunnel)) {
2054 "PCIe tunnel activation failed, aborting\n");
2055 tb_tunnel_free(tunnel);
2060 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
2063 if (tb_switch_pcie_l1_enable(sw))
2064 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
2066 if (tb_switch_xhci_connect(sw))
2067 tb_sw_warn(sw, "failed to connect xHCI\n");
2069 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2073 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2074 int transmit_path, int transmit_ring,
2075 int receive_path, int receive_ring)
2077 struct tb_cm *tcm = tb_priv(tb);
2078 struct tb_port *nhi_port, *dst_port;
2079 struct tb_tunnel *tunnel;
2080 struct tb_switch *sw;
2083 sw = tb_to_switch(xd->dev.parent);
2084 dst_port = tb_port_at(xd->route, sw);
2085 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2087 mutex_lock(&tb->lock);
2090 * When tunneling DMA paths the link should not enter CL states
2091 * so disable them now.
2095 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2096 transmit_ring, receive_path, receive_ring);
2102 if (tb_tunnel_activate(tunnel)) {
2103 tb_port_info(nhi_port,
2104 "DMA tunnel activation failed, aborting\n");
2109 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2110 mutex_unlock(&tb->lock);
2114 tb_tunnel_free(tunnel);
2117 mutex_unlock(&tb->lock);
2122 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2123 int transmit_path, int transmit_ring,
2124 int receive_path, int receive_ring)
2126 struct tb_cm *tcm = tb_priv(tb);
2127 struct tb_port *nhi_port, *dst_port;
2128 struct tb_tunnel *tunnel, *n;
2129 struct tb_switch *sw;
2131 sw = tb_to_switch(xd->dev.parent);
2132 dst_port = tb_port_at(xd->route, sw);
2133 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2135 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2136 if (!tb_tunnel_is_dma(tunnel))
2138 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2141 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2142 receive_path, receive_ring))
2143 tb_deactivate_and_free_tunnel(tunnel);
2147 * Try to re-enable CL states now, it is OK if this fails
2148 * because we may still have another DMA tunnel active through
2149 * the same host router USB4 downstream port.
2154 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2155 int transmit_path, int transmit_ring,
2156 int receive_path, int receive_ring)
2158 if (!xd->is_unplugged) {
2159 mutex_lock(&tb->lock);
2160 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2161 transmit_ring, receive_path,
2163 mutex_unlock(&tb->lock);
2168 /* hotplug handling */
2171 * tb_handle_hotplug() - handle hotplug event
2173 * Executes on tb->wq.
2175 static void tb_handle_hotplug(struct work_struct *work)
2177 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2178 struct tb *tb = ev->tb;
2179 struct tb_cm *tcm = tb_priv(tb);
2180 struct tb_switch *sw;
2181 struct tb_port *port;
2183 /* Bring the domain back from sleep if it was suspended */
2184 pm_runtime_get_sync(&tb->dev);
2186 mutex_lock(&tb->lock);
2187 if (!tcm->hotplug_active)
2188 goto out; /* during init, suspend or shutdown */
2190 sw = tb_switch_find_by_route(tb, ev->route);
2193 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2194 ev->route, ev->port, ev->unplug);
2197 if (ev->port > sw->config.max_port_number) {
2199 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2200 ev->route, ev->port, ev->unplug);
2203 port = &sw->ports[ev->port];
2204 if (tb_is_upstream_port(port)) {
2205 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2206 ev->route, ev->port, ev->unplug);
2210 pm_runtime_get_sync(&sw->dev);
2213 tb_retimer_remove_all(port);
2215 if (tb_port_has_remote(port)) {
2216 tb_port_dbg(port, "switch unplugged\n");
2217 tb_sw_set_unplugged(port->remote->sw);
2218 tb_free_invalid_tunnels(tb);
2219 tb_remove_dp_resources(port->remote->sw);
2220 tb_switch_tmu_disable(port->remote->sw);
2221 tb_switch_unconfigure_link(port->remote->sw);
2222 tb_switch_set_link_width(port->remote->sw,
2223 TB_LINK_WIDTH_SINGLE);
2224 tb_switch_remove(port->remote->sw);
2225 port->remote = NULL;
2226 if (port->dual_link_port)
2227 port->dual_link_port->remote = NULL;
2228 /* Maybe we can create another DP tunnel */
2229 tb_recalc_estimated_bandwidth(tb);
2231 } else if (port->xdomain) {
2232 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2234 tb_port_dbg(port, "xdomain unplugged\n");
2236 * Service drivers are unbound during
2237 * tb_xdomain_remove() so setting XDomain as
2238 * unplugged here prevents deadlock if they call
2239 * tb_xdomain_disable_paths(). We will tear down
2240 * all the tunnels below.
2242 xd->is_unplugged = true;
2243 tb_xdomain_remove(xd);
2244 port->xdomain = NULL;
2245 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2247 tb_port_unconfigure_xdomain(port);
2248 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2249 tb_dp_resource_unavailable(tb, port);
2250 } else if (!port->port) {
2251 tb_sw_dbg(sw, "xHCI disconnect request\n");
2252 tb_switch_xhci_disconnect(sw);
2255 "got unplug event for disconnected port, ignoring\n");
2257 } else if (port->remote) {
2258 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2259 } else if (!port->port && sw->authorized) {
2260 tb_sw_dbg(sw, "xHCI connect request\n");
2261 tb_switch_xhci_connect(sw);
2263 if (tb_port_is_null(port)) {
2264 tb_port_dbg(port, "hotplug: scanning\n");
2267 tb_port_dbg(port, "hotplug: no switch found\n");
2268 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2269 tb_dp_resource_available(tb, port);
2273 pm_runtime_mark_last_busy(&sw->dev);
2274 pm_runtime_put_autosuspend(&sw->dev);
2279 mutex_unlock(&tb->lock);
2281 pm_runtime_mark_last_busy(&tb->dev);
2282 pm_runtime_put_autosuspend(&tb->dev);
2287 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2288 int *requested_down)
2290 int allocated_up, allocated_down, available_up, available_down, ret;
2291 int requested_up_corrected, requested_down_corrected, granularity;
2292 int max_up, max_down, max_up_rounded, max_down_rounded;
2293 struct tb *tb = tunnel->tb;
2294 struct tb_port *in, *out;
2296 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2300 in = tunnel->src_port;
2301 out = tunnel->dst_port;
2303 tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2304 allocated_up, allocated_down);
2307 * If we get rounded up request from graphics side, say HBR2 x 4
2308 * that is 17500 instead of 17280 (this is because of the
2309 * granularity), we allow it too. Here the graphics has already
2310 * negotiated with the DPRX the maximum possible rates (which is
2311 * 17280 in this case).
2313 * Since the link cannot go higher than 17280 we use that in our
2314 * calculations but the DP IN adapter Allocated BW write must be
2315 * the same value (17500) otherwise the adapter will mark it as
2316 * failed for graphics.
2318 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2322 ret = usb4_dp_port_granularity(in);
2327 max_up_rounded = roundup(max_up, granularity);
2328 max_down_rounded = roundup(max_down, granularity);
2331 * This will "fix" the request down to the maximum supported
2332 * rate * lanes if it is at the maximum rounded up level.
2334 requested_up_corrected = *requested_up;
2335 if (requested_up_corrected == max_up_rounded)
2336 requested_up_corrected = max_up;
2337 else if (requested_up_corrected < 0)
2338 requested_up_corrected = 0;
2339 requested_down_corrected = *requested_down;
2340 if (requested_down_corrected == max_down_rounded)
2341 requested_down_corrected = max_down;
2342 else if (requested_down_corrected < 0)
2343 requested_down_corrected = 0;
2345 tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2346 requested_up_corrected, requested_down_corrected);
2348 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2349 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2350 tb_tunnel_dbg(tunnel,
2351 "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2352 requested_up_corrected, requested_down_corrected,
2353 max_up_rounded, max_down_rounded);
2357 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2358 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2360 * If bandwidth on a link is < asym_threshold transition
2361 * the link to symmetric.
2363 tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
2365 * If requested bandwidth is less or equal than what is
2366 * currently allocated to that tunnel we simply change
2367 * the reservation of the tunnel. Since all the tunnels
2368 * going out from the same USB4 port are in the same
2369 * group the released bandwidth will be taken into
2370 * account for the other tunnels automatically below.
2372 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2377 * More bandwidth is requested. Release all the potential
2378 * bandwidth from USB3 first.
2380 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2385 * Then go over all tunnels that cross the same USB4 ports (they
2386 * are also in the same group but we use the same function here
2387 * that we use with the normal bandwidth allocation).
2389 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2394 tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
2395 available_up, available_down);
2397 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2398 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2400 * If bandwidth on a link is >= asym_threshold
2401 * transition the link to asymmetric.
2403 ret = tb_configure_asym(tb, in, out, *requested_up,
2406 tb_configure_sym(tb, in, out, 0, 0, true);
2410 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2413 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2414 tb_configure_sym(tb, in, out, 0, 0, true);
2421 tb_reclaim_usb3_bandwidth(tb, in, out);
2425 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2427 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2428 int requested_bw, requested_up, requested_down, ret;
2429 struct tb_port *in, *out;
2430 struct tb_tunnel *tunnel;
2431 struct tb *tb = ev->tb;
2432 struct tb_cm *tcm = tb_priv(tb);
2433 struct tb_switch *sw;
2435 pm_runtime_get_sync(&tb->dev);
2437 mutex_lock(&tb->lock);
2438 if (!tcm->hotplug_active)
2441 sw = tb_switch_find_by_route(tb, ev->route);
2443 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2448 in = &sw->ports[ev->port];
2449 if (!tb_port_is_dpin(in)) {
2450 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2454 tb_port_dbg(in, "handling bandwidth allocation request\n");
2456 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2457 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2461 ret = usb4_dp_port_requested_bandwidth(in);
2463 if (ret == -ENODATA)
2464 tb_port_dbg(in, "no bandwidth request active\n");
2466 tb_port_warn(in, "failed to read requested bandwidth\n");
2471 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2473 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2475 tb_port_warn(in, "failed to find tunnel\n");
2479 out = tunnel->dst_port;
2481 if (tb_port_path_direction_downstream(in, out)) {
2483 requested_down = requested_bw;
2485 requested_up = requested_bw;
2486 requested_down = -1;
2489 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2491 if (ret == -ENOBUFS)
2492 tb_tunnel_warn(tunnel,
2493 "not enough bandwidth available\n");
2495 tb_tunnel_warn(tunnel,
2496 "failed to change bandwidth allocation\n");
2498 tb_tunnel_dbg(tunnel,
2499 "bandwidth allocation changed to %d/%d Mb/s\n",
2500 requested_up, requested_down);
2502 /* Update other clients about the allocation change */
2503 tb_recalc_estimated_bandwidth(tb);
2509 mutex_unlock(&tb->lock);
2511 pm_runtime_mark_last_busy(&tb->dev);
2512 pm_runtime_put_autosuspend(&tb->dev);
2517 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2519 struct tb_hotplug_event *ev;
2521 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2528 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2529 queue_work(tb->wq, &ev->work);
2532 static void tb_handle_notification(struct tb *tb, u64 route,
2533 const struct cfg_error_pkg *error)
2536 switch (error->error) {
2537 case TB_CFG_ERROR_PCIE_WAKE:
2538 case TB_CFG_ERROR_DP_CON_CHANGE:
2539 case TB_CFG_ERROR_DPTX_DISCOVERY:
2540 if (tb_cfg_ack_notification(tb->ctl, route, error))
2541 tb_warn(tb, "could not ack notification on %llx\n",
2545 case TB_CFG_ERROR_DP_BW:
2546 if (tb_cfg_ack_notification(tb->ctl, route, error))
2547 tb_warn(tb, "could not ack notification on %llx\n",
2549 tb_queue_dp_bandwidth_request(tb, route, error->port);
2553 /* Ignore for now */
2559 * tb_schedule_hotplug_handler() - callback function for the control channel
2561 * Delegates to tb_handle_hotplug.
2563 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2564 const void *buf, size_t size)
2566 const struct cfg_event_pkg *pkg = buf;
2567 u64 route = tb_cfg_get_route(&pkg->header);
2570 case TB_CFG_PKG_ERROR:
2571 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2573 case TB_CFG_PKG_EVENT:
2576 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2580 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2581 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2585 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2588 static void tb_stop(struct tb *tb)
2590 struct tb_cm *tcm = tb_priv(tb);
2591 struct tb_tunnel *tunnel;
2592 struct tb_tunnel *n;
2594 cancel_delayed_work(&tcm->remove_work);
2595 /* tunnels are only present after everything has been initialized */
2596 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2598 * DMA tunnels require the driver to be functional so we
2599 * tear them down. Other protocol tunnels can be left
2602 if (tb_tunnel_is_dma(tunnel))
2603 tb_tunnel_deactivate(tunnel);
2604 tb_tunnel_free(tunnel);
2606 tb_switch_remove(tb->root_switch);
2607 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2610 static int tb_scan_finalize_switch(struct device *dev, void *data)
2612 if (tb_is_switch(dev)) {
2613 struct tb_switch *sw = tb_to_switch(dev);
2616 * If we found that the switch was already setup by the
2617 * boot firmware, mark it as authorized now before we
2618 * send uevent to userspace.
2623 dev_set_uevent_suppress(dev, false);
2624 kobject_uevent(&dev->kobj, KOBJ_ADD);
2625 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2631 static int tb_start(struct tb *tb)
2633 struct tb_cm *tcm = tb_priv(tb);
2636 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2637 if (IS_ERR(tb->root_switch))
2638 return PTR_ERR(tb->root_switch);
2641 * ICM firmware upgrade needs running firmware and in native
2642 * mode that is not available so disable firmware upgrade of the
2645 * However, USB4 routers support NVM firmware upgrade if they
2646 * implement the necessary router operations.
2648 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2649 /* All USB4 routers support runtime PM */
2650 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2652 ret = tb_switch_configure(tb->root_switch);
2654 tb_switch_put(tb->root_switch);
2658 /* Announce the switch to the world */
2659 ret = tb_switch_add(tb->root_switch);
2661 tb_switch_put(tb->root_switch);
2666 * To support highest CLx state, we set host router's TMU to
2669 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2670 /* Enable TMU if it is off */
2671 tb_switch_tmu_enable(tb->root_switch);
2672 /* Full scan to discover devices added before the driver was loaded. */
2673 tb_scan_switch(tb->root_switch);
2674 /* Find out tunnels created by the boot firmware */
2675 tb_discover_tunnels(tb);
2676 /* Add DP resources from the DP tunnels created by the boot firmware */
2677 tb_discover_dp_resources(tb);
2679 * If the boot firmware did not create USB 3.x tunnels create them
2680 * now for the whole topology.
2682 tb_create_usb3_tunnels(tb->root_switch);
2683 /* Add DP IN resources for the root switch */
2684 tb_add_dp_resources(tb->root_switch);
2685 /* Make the discovered switches available to the userspace */
2686 device_for_each_child(&tb->root_switch->dev, NULL,
2687 tb_scan_finalize_switch);
2689 /* Allow tb_handle_hotplug to progress events */
2690 tcm->hotplug_active = true;
2694 static int tb_suspend_noirq(struct tb *tb)
2696 struct tb_cm *tcm = tb_priv(tb);
2698 tb_dbg(tb, "suspending...\n");
2699 tb_disconnect_and_release_dp(tb);
2700 tb_switch_suspend(tb->root_switch, false);
2701 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2702 tb_dbg(tb, "suspend finished\n");
2707 static void tb_restore_children(struct tb_switch *sw)
2709 struct tb_port *port;
2711 /* No need to restore if the router is already unplugged */
2712 if (sw->is_unplugged)
2715 if (tb_enable_clx(sw))
2716 tb_sw_warn(sw, "failed to re-enable CL states\n");
2718 if (tb_enable_tmu(sw))
2719 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2721 tb_switch_configuration_valid(sw);
2723 tb_switch_for_each_port(sw, port) {
2724 if (!tb_port_has_remote(port) && !port->xdomain)
2728 tb_switch_set_link_width(port->remote->sw,
2729 port->remote->sw->link_width);
2730 tb_switch_configure_link(port->remote->sw);
2732 tb_restore_children(port->remote->sw);
2733 } else if (port->xdomain) {
2734 tb_port_configure_xdomain(port, port->xdomain);
2739 static int tb_resume_noirq(struct tb *tb)
2741 struct tb_cm *tcm = tb_priv(tb);
2742 struct tb_tunnel *tunnel, *n;
2743 unsigned int usb3_delay = 0;
2746 tb_dbg(tb, "resuming...\n");
2748 /* remove any pci devices the firmware might have setup */
2749 tb_switch_reset(tb->root_switch);
2751 tb_switch_resume(tb->root_switch);
2752 tb_free_invalid_tunnels(tb);
2753 tb_free_unplugged_children(tb->root_switch);
2754 tb_restore_children(tb->root_switch);
2757 * If we get here from suspend to disk the boot firmware or the
2758 * restore kernel might have created tunnels of its own. Since
2759 * we cannot be sure they are usable for us we find and tear
2762 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2763 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2764 if (tb_tunnel_is_usb3(tunnel))
2766 tb_tunnel_deactivate(tunnel);
2767 tb_tunnel_free(tunnel);
2770 /* Re-create our tunnels now */
2771 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2772 /* USB3 requires delay before it can be re-activated */
2773 if (tb_tunnel_is_usb3(tunnel)) {
2775 /* Only need to do it once */
2778 tb_tunnel_restart(tunnel);
2780 if (!list_empty(&tcm->tunnel_list)) {
2782 * the pcie links need some time to get going.
2783 * 100ms works for me...
2785 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2788 /* Allow tb_handle_hotplug to progress events */
2789 tcm->hotplug_active = true;
2790 tb_dbg(tb, "resume finished\n");
2795 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2797 struct tb_port *port;
2800 tb_switch_for_each_port(sw, port) {
2801 if (tb_is_upstream_port(port))
2803 if (port->xdomain && port->xdomain->is_unplugged) {
2804 tb_retimer_remove_all(port);
2805 tb_xdomain_remove(port->xdomain);
2806 tb_port_unconfigure_xdomain(port);
2807 port->xdomain = NULL;
2809 } else if (port->remote) {
2810 ret += tb_free_unplugged_xdomains(port->remote->sw);
2817 static int tb_freeze_noirq(struct tb *tb)
2819 struct tb_cm *tcm = tb_priv(tb);
2821 tcm->hotplug_active = false;
2825 static int tb_thaw_noirq(struct tb *tb)
2827 struct tb_cm *tcm = tb_priv(tb);
2829 tcm->hotplug_active = true;
2833 static void tb_complete(struct tb *tb)
2836 * Release any unplugged XDomains and if there is a case where
2837 * another domain is swapped in place of unplugged XDomain we
2838 * need to run another rescan.
2840 mutex_lock(&tb->lock);
2841 if (tb_free_unplugged_xdomains(tb->root_switch))
2842 tb_scan_switch(tb->root_switch);
2843 mutex_unlock(&tb->lock);
2846 static int tb_runtime_suspend(struct tb *tb)
2848 struct tb_cm *tcm = tb_priv(tb);
2850 mutex_lock(&tb->lock);
2851 tb_switch_suspend(tb->root_switch, true);
2852 tcm->hotplug_active = false;
2853 mutex_unlock(&tb->lock);
2858 static void tb_remove_work(struct work_struct *work)
2860 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2861 struct tb *tb = tcm_to_tb(tcm);
2863 mutex_lock(&tb->lock);
2864 if (tb->root_switch) {
2865 tb_free_unplugged_children(tb->root_switch);
2866 tb_free_unplugged_xdomains(tb->root_switch);
2868 mutex_unlock(&tb->lock);
2871 static int tb_runtime_resume(struct tb *tb)
2873 struct tb_cm *tcm = tb_priv(tb);
2874 struct tb_tunnel *tunnel, *n;
2876 mutex_lock(&tb->lock);
2877 tb_switch_resume(tb->root_switch);
2878 tb_free_invalid_tunnels(tb);
2879 tb_restore_children(tb->root_switch);
2880 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2881 tb_tunnel_restart(tunnel);
2882 tcm->hotplug_active = true;
2883 mutex_unlock(&tb->lock);
2886 * Schedule cleanup of any unplugged devices. Run this in a
2887 * separate thread to avoid possible deadlock if the device
2888 * removal runtime resumes the unplugged device.
2890 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2894 static const struct tb_cm_ops tb_cm_ops = {
2897 .suspend_noirq = tb_suspend_noirq,
2898 .resume_noirq = tb_resume_noirq,
2899 .freeze_noirq = tb_freeze_noirq,
2900 .thaw_noirq = tb_thaw_noirq,
2901 .complete = tb_complete,
2902 .runtime_suspend = tb_runtime_suspend,
2903 .runtime_resume = tb_runtime_resume,
2904 .handle_event = tb_handle_event,
2905 .disapprove_switch = tb_disconnect_pci,
2906 .approve_switch = tb_tunnel_pci,
2907 .approve_xdomain_paths = tb_approve_xdomain_paths,
2908 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2912 * During suspend the Thunderbolt controller is reset and all PCIe
2913 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2914 * during resume. This adds device links between the tunneled PCIe
2915 * downstream ports and the NHI so that the device core will make sure
2916 * NHI is resumed first before the rest.
2918 static bool tb_apple_add_links(struct tb_nhi *nhi)
2920 struct pci_dev *upstream, *pdev;
2923 if (!x86_apple_machine)
2926 switch (nhi->pdev->device) {
2927 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2928 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2929 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2930 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2936 upstream = pci_upstream_bridge(nhi->pdev);
2938 if (!pci_is_pcie(upstream))
2940 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2942 upstream = pci_upstream_bridge(upstream);
2949 * For each hotplug downstream port, create add device link
2950 * back to NHI so that PCIe tunnels can be re-established after
2954 for_each_pci_bridge(pdev, upstream->subordinate) {
2955 const struct device_link *link;
2957 if (!pci_is_pcie(pdev))
2959 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2960 !pdev->is_hotplug_bridge)
2963 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2964 DL_FLAG_AUTOREMOVE_SUPPLIER |
2965 DL_FLAG_PM_RUNTIME);
2967 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2968 dev_name(&pdev->dev));
2971 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2972 dev_name(&pdev->dev));
2979 struct tb *tb_probe(struct tb_nhi *nhi)
2984 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2988 if (tb_acpi_may_tunnel_pcie())
2989 tb->security_level = TB_SECURITY_USER;
2991 tb->security_level = TB_SECURITY_NOPCIE;
2993 tb->cm_ops = &tb_cm_ops;
2996 INIT_LIST_HEAD(&tcm->tunnel_list);
2997 INIT_LIST_HEAD(&tcm->dp_resources);
2998 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2999 tb_init_bandwidth_groups(tcm);
3001 tb_dbg(tb, "using software connection manager\n");
3004 * Device links are needed to make sure we establish tunnels
3005 * before the PCIe/USB stack is resumed so complain here if we
3006 * found them missing.
3008 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
3009 tb_warn(tb, "device links to tunneled native ports are missing!\n");