1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID 8
21 #define TB_PCI_PATH_DOWN 0
22 #define TB_PCI_PATH_UP 1
24 #define TB_PCI_PRIORITY 3
25 #define TB_PCI_WEIGHT 1
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID 8
30 #define TB_USB3_PATH_DOWN 0
31 #define TB_USB3_PATH_UP 1
33 #define TB_USB3_PRIORITY 3
34 #define TB_USB3_WEIGHT 2
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID 8
38 #define TB_DP_AUX_RX_HOPID 8
39 #define TB_DP_VIDEO_HOPID 9
41 #define TB_DP_VIDEO_PATH_OUT 0
42 #define TB_DP_AUX_PATH_OUT 1
43 #define TB_DP_AUX_PATH_IN 2
45 #define TB_DP_VIDEO_PRIORITY 1
46 #define TB_DP_VIDEO_WEIGHT 1
48 #define TB_DP_AUX_PRIORITY 2
49 #define TB_DP_AUX_WEIGHT 1
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS 6U
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
57 #define TB_DMA_CREDITS 14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS 1
61 #define TB_DMA_PRIORITY 5
62 #define TB_DMA_WEIGHT 1
65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66 * according to USB4 v2 Connection Manager guide. This ends up reserving
67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
73 static unsigned int dma_credits = TB_DMA_CREDITS;
74 module_param(dma_credits, uint, 0444);
75 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76 __MODULE_STRING(TB_DMA_CREDITS) ")");
78 static bool bw_alloc_mode = true;
79 module_param(bw_alloc_mode, bool, 0444);
80 MODULE_PARM_DESC(bw_alloc_mode,
81 "enable bandwidth allocation mode if supported (default: true)");
83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
85 static inline unsigned int tb_usable_credits(const struct tb_port *port)
87 return port->total_credits - port->ctl_credits;
91 * tb_available_credits() - Available credits for PCIe and DMA
92 * @port: Lane adapter to check
93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94 * streams possible through this lane adapter
96 static unsigned int tb_available_credits(const struct tb_port *port,
97 size_t *max_dp_streams)
99 const struct tb_switch *sw = port->sw;
100 int credits, usb3, pcie, spare;
103 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
106 if (tb_acpi_is_xdomain_allowed()) {
107 spare = min_not_zero(sw->max_dma_credits, dma_credits);
108 /* Add some credits for potential second DMA tunnel */
109 spare += TB_MIN_DMA_CREDITS;
114 credits = tb_usable_credits(port);
115 if (tb_acpi_may_tunnel_dp()) {
117 * Maximum number of DP streams possible through the
120 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121 ndp = (credits - (usb3 + pcie + spare)) /
122 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
128 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
132 *max_dp_streams = ndp;
134 return credits > 0 ? credits : 0;
137 static void tb_init_pm_support(struct tb_path_hop *hop)
139 struct tb_port *out_port = hop->out_port;
140 struct tb_port *in_port = hop->in_port;
142 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143 usb4_switch_version(in_port->sw) >= 2)
144 hop->pm_support = true;
147 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
148 enum tb_tunnel_type type)
150 struct tb_tunnel *tunnel;
152 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
156 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
157 if (!tunnel->paths) {
158 tb_tunnel_free(tunnel);
162 INIT_LIST_HEAD(&tunnel->list);
164 tunnel->npaths = npaths;
170 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
172 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
175 /* Only supported of both routers are at least USB4 v2 */
176 if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
177 (usb4_switch_version(tunnel->dst_port->sw) < 2))
180 if (enable && tb_port_get_link_generation(port) < 4)
183 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
188 * Downstream router could be unplugged so disable of encapsulation
189 * in upstream router is still possible.
191 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
199 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
200 str_enabled_disabled(enable));
204 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
209 res = tb_pci_set_ext_encapsulation(tunnel, activate);
215 res = tb_pci_port_enable(tunnel->dst_port, activate);
217 res = tb_pci_port_enable(tunnel->src_port, activate);
223 res = tb_pci_port_enable(tunnel->src_port, activate);
227 /* Downstream router could be unplugged */
228 tb_pci_port_enable(tunnel->dst_port, activate);
231 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
234 static int tb_pci_init_credits(struct tb_path_hop *hop)
236 struct tb_port *port = hop->in_port;
237 struct tb_switch *sw = port->sw;
238 unsigned int credits;
240 if (tb_port_use_credit_allocation(port)) {
241 unsigned int available;
243 available = tb_available_credits(port, NULL);
244 credits = min(sw->max_pcie_credits, available);
246 if (credits < TB_MIN_PCIE_CREDITS)
249 credits = max(TB_MIN_PCIE_CREDITS, credits);
251 if (tb_port_is_null(port))
252 credits = port->bonded ? 32 : 16;
257 hop->initial_credits = credits;
261 static int tb_pci_init_path(struct tb_path *path)
263 struct tb_path_hop *hop;
265 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
266 path->egress_shared_buffer = TB_PATH_NONE;
267 path->ingress_fc_enable = TB_PATH_ALL;
268 path->ingress_shared_buffer = TB_PATH_NONE;
269 path->priority = TB_PCI_PRIORITY;
270 path->weight = TB_PCI_WEIGHT;
271 path->drop_packages = 0;
273 tb_path_for_each_hop(path, hop) {
276 ret = tb_pci_init_credits(hop);
285 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
286 * @tb: Pointer to the domain structure
287 * @down: PCIe downstream adapter
288 * @alloc_hopid: Allocate HopIDs from visited ports
290 * If @down adapter is active, follows the tunnel to the PCIe upstream
291 * adapter and back. Returns the discovered tunnel or %NULL if there was
294 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
297 struct tb_tunnel *tunnel;
298 struct tb_path *path;
300 if (!tb_pci_port_is_enabled(down))
303 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
307 tunnel->activate = tb_pci_activate;
308 tunnel->src_port = down;
311 * Discover both paths even if they are not complete. We will
312 * clean them up by calling tb_tunnel_deactivate() below in that
315 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
316 &tunnel->dst_port, "PCIe Up", alloc_hopid);
318 /* Just disable the downstream port */
319 tb_pci_port_enable(down, false);
322 tunnel->paths[TB_PCI_PATH_UP] = path;
323 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
326 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
327 "PCIe Down", alloc_hopid);
330 tunnel->paths[TB_PCI_PATH_DOWN] = path;
331 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
334 /* Validate that the tunnel is complete */
335 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
336 tb_port_warn(tunnel->dst_port,
337 "path does not end on a PCIe adapter, cleaning up\n");
341 if (down != tunnel->src_port) {
342 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
346 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
347 tb_tunnel_warn(tunnel,
348 "tunnel is not fully activated, cleaning up\n");
352 tb_tunnel_dbg(tunnel, "discovered\n");
356 tb_tunnel_deactivate(tunnel);
358 tb_tunnel_free(tunnel);
364 * tb_tunnel_alloc_pci() - allocate a pci tunnel
365 * @tb: Pointer to the domain structure
366 * @up: PCIe upstream adapter port
367 * @down: PCIe downstream adapter port
369 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
372 * Return: Returns a tb_tunnel on success or NULL on failure.
374 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
375 struct tb_port *down)
377 struct tb_tunnel *tunnel;
378 struct tb_path *path;
380 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
384 tunnel->activate = tb_pci_activate;
385 tunnel->src_port = down;
386 tunnel->dst_port = up;
388 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
392 tunnel->paths[TB_PCI_PATH_DOWN] = path;
393 if (tb_pci_init_path(path))
396 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
400 tunnel->paths[TB_PCI_PATH_UP] = path;
401 if (tb_pci_init_path(path))
407 tb_tunnel_free(tunnel);
412 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
413 * @port: Lane 0 adapter
414 * @reserved_up: Upstream bandwidth in Mb/s to reserve
415 * @reserved_down: Downstream bandwidth in Mb/s to reserve
417 * Can be called to any connected lane 0 adapter to find out how much
418 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
419 * Returns true if there is something to be reserved and writes the
420 * amount to @reserved_down/@reserved_up. Otherwise returns false and
421 * does not touch the parameters.
423 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
426 if (WARN_ON_ONCE(!port->remote))
429 if (!tb_acpi_may_tunnel_pcie())
432 if (tb_port_get_link_generation(port) < 4)
435 /* Must have PCIe adapters */
436 if (tb_is_upstream_port(port)) {
437 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
439 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
442 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
444 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
448 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
449 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
451 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
456 static bool tb_dp_is_usb4(const struct tb_switch *sw)
458 /* Titan Ridge DP adapters need the same treatment as USB4 */
459 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
462 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
465 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
469 /* Both ends need to support this */
470 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
473 ret = tb_port_read(out, &val, TB_CFG_PORT,
474 out->cap_adap + DP_STATUS_CTRL, 1);
478 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
480 ret = tb_port_write(out, &val, TB_CFG_PORT,
481 out->cap_adap + DP_STATUS_CTRL, 1);
486 ret = tb_port_read(out, &val, TB_CFG_PORT,
487 out->cap_adap + DP_STATUS_CTRL, 1);
490 if (!(val & DP_STATUS_CTRL_CMHS))
492 usleep_range(100, 150);
493 } while (ktime_before(ktime_get(), timeout));
499 * Returns maximum possible rate from capability supporting only DP 2.0
500 * and below. Used when DP BW allocation mode is not enabled.
502 static inline u32 tb_dp_cap_get_rate(u32 val)
504 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
507 case DP_COMMON_CAP_RATE_RBR:
509 case DP_COMMON_CAP_RATE_HBR:
511 case DP_COMMON_CAP_RATE_HBR2:
513 case DP_COMMON_CAP_RATE_HBR3:
521 * Returns maximum possible rate from capability supporting DP 2.1
522 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
525 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
527 if (val & DP_COMMON_CAP_UHBR20)
529 else if (val & DP_COMMON_CAP_UHBR13_5)
531 else if (val & DP_COMMON_CAP_UHBR10)
534 return tb_dp_cap_get_rate(val);
537 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
539 return rate >= 10000;
542 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
544 val &= ~DP_COMMON_CAP_RATE_MASK;
547 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
550 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
553 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
556 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
559 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
565 static inline u32 tb_dp_cap_get_lanes(u32 val)
567 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
570 case DP_COMMON_CAP_1_LANE:
572 case DP_COMMON_CAP_2_LANES:
574 case DP_COMMON_CAP_4_LANES:
581 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
583 val &= ~DP_COMMON_CAP_LANES_MASK;
586 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
590 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
593 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
596 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
602 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
604 /* Tunneling removes the DP 8b/10b 128/132b encoding */
605 if (tb_dp_is_uhbr_rate(rate))
606 return rate * lanes * 128 / 132;
607 return rate * lanes * 8 / 10;
610 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
611 u32 out_rate, u32 out_lanes, u32 *new_rate,
614 static const u32 dp_bw[][2] = {
616 { 8100, 4 }, /* 25920 Mb/s */
617 { 5400, 4 }, /* 17280 Mb/s */
618 { 8100, 2 }, /* 12960 Mb/s */
619 { 2700, 4 }, /* 8640 Mb/s */
620 { 5400, 2 }, /* 8640 Mb/s */
621 { 8100, 1 }, /* 6480 Mb/s */
622 { 1620, 4 }, /* 5184 Mb/s */
623 { 5400, 1 }, /* 4320 Mb/s */
624 { 2700, 2 }, /* 4320 Mb/s */
625 { 1620, 2 }, /* 2592 Mb/s */
626 { 2700, 1 }, /* 2160 Mb/s */
627 { 1620, 1 }, /* 1296 Mb/s */
632 * Find a combination that can fit into max_bw and does not
633 * exceed the maximum rate and lanes supported by the DP OUT and
636 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
637 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
640 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
643 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
644 *new_rate = dp_bw[i][0];
645 *new_lanes = dp_bw[i][1];
653 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
655 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
656 struct tb_port *out = tunnel->dst_port;
657 struct tb_port *in = tunnel->src_port;
661 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
662 * newer generation hardware.
664 if (in->sw->generation < 2 || out->sw->generation < 2)
668 * Perform connection manager handshake between IN and OUT ports
669 * before capabilities exchange can take place.
671 ret = tb_dp_cm_handshake(in, out, 3000);
675 /* Read both DP_LOCAL_CAP registers */
676 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
677 in->cap_adap + DP_LOCAL_CAP, 1);
681 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
682 out->cap_adap + DP_LOCAL_CAP, 1);
686 /* Write IN local caps to OUT remote caps */
687 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
688 out->cap_adap + DP_REMOTE_CAP, 1);
692 in_rate = tb_dp_cap_get_rate(in_dp_cap);
693 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
694 tb_tunnel_dbg(tunnel,
695 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
696 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
699 * If the tunnel bandwidth is limited (max_bw is set) then see
700 * if we need to reduce bandwidth to fit there.
702 out_rate = tb_dp_cap_get_rate(out_dp_cap);
703 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
704 bw = tb_dp_bandwidth(out_rate, out_lanes);
705 tb_tunnel_dbg(tunnel,
706 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
707 out_rate, out_lanes, bw);
709 if (tb_port_path_direction_downstream(in, out))
710 max_bw = tunnel->max_down;
712 max_bw = tunnel->max_up;
714 if (max_bw && bw > max_bw) {
715 u32 new_rate, new_lanes, new_bw;
717 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
718 out_rate, out_lanes, &new_rate,
721 tb_tunnel_info(tunnel, "not enough bandwidth\n");
725 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
726 tb_tunnel_dbg(tunnel,
727 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
728 new_rate, new_lanes, new_bw);
731 * Set new rate and number of lanes before writing it to
732 * the IN port remote caps.
734 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
735 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
739 * Titan Ridge does not disable AUX timers when it gets
740 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
743 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
744 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
745 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
748 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
749 in->cap_adap + DP_REMOTE_CAP, 1);
752 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
754 int ret, estimated_bw, granularity, tmp;
755 struct tb_port *out = tunnel->dst_port;
756 struct tb_port *in = tunnel->src_port;
757 u32 out_dp_cap, out_rate, out_lanes;
758 u32 in_dp_cap, in_rate, in_lanes;
764 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
768 ret = usb4_dp_port_set_group_id(in, in->group->index);
773 * Get the non-reduced rate and lanes based on the lowest
774 * capability of both adapters.
776 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
777 in->cap_adap + DP_LOCAL_CAP, 1);
781 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
782 out->cap_adap + DP_LOCAL_CAP, 1);
786 in_rate = tb_dp_cap_get_rate(in_dp_cap);
787 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
788 out_rate = tb_dp_cap_get_rate(out_dp_cap);
789 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
791 rate = min(in_rate, out_rate);
792 lanes = min(in_lanes, out_lanes);
793 tmp = tb_dp_bandwidth(rate, lanes);
795 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
798 ret = usb4_dp_port_set_nrd(in, rate, lanes);
803 * Pick up granularity that supports maximum possible bandwidth.
804 * For that we use the UHBR rates too.
806 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
807 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
808 rate = min(in_rate, out_rate);
809 tmp = tb_dp_bandwidth(rate, lanes);
811 tb_tunnel_dbg(tunnel,
812 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
815 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
819 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
822 * Returns -EINVAL if granularity above is outside of the
825 ret = usb4_dp_port_set_granularity(in, granularity);
830 * Bandwidth estimation is pretty much what we have in
831 * max_up/down fields. For discovery we just read what the
832 * estimation was set to.
834 if (tb_port_path_direction_downstream(in, out))
835 estimated_bw = tunnel->max_down;
837 estimated_bw = tunnel->max_up;
839 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
841 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
845 /* Initial allocation should be 0 according the spec */
846 ret = usb4_dp_port_allocate_bandwidth(in, 0);
850 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
854 static int tb_dp_init(struct tb_tunnel *tunnel)
856 struct tb_port *in = tunnel->src_port;
857 struct tb_switch *sw = in->sw;
858 struct tb *tb = in->sw->tb;
861 ret = tb_dp_xchg_caps(tunnel);
865 if (!tb_switch_is_usb4(sw))
868 if (!usb4_dp_port_bandwidth_mode_supported(in))
871 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
873 ret = usb4_dp_port_set_cm_id(in, tb->index);
877 return tb_dp_bandwidth_alloc_mode_enable(tunnel);
880 static void tb_dp_deinit(struct tb_tunnel *tunnel)
882 struct tb_port *in = tunnel->src_port;
884 if (!usb4_dp_port_bandwidth_mode_supported(in))
886 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
887 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
888 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
892 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
897 struct tb_path **paths;
900 paths = tunnel->paths;
901 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
903 tb_dp_port_set_hops(tunnel->src_port,
904 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
905 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
906 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
908 tb_dp_port_set_hops(tunnel->dst_port,
909 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
910 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
911 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
913 tb_dp_port_hpd_clear(tunnel->src_port);
914 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
915 if (tb_port_is_dpout(tunnel->dst_port))
916 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
919 ret = tb_dp_port_enable(tunnel->src_port, active);
923 if (tb_port_is_dpout(tunnel->dst_port))
924 return tb_dp_port_enable(tunnel->dst_port, active);
929 /* max_bw is rounded up to next granularity */
930 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
933 struct tb_port *in = tunnel->src_port;
934 int ret, rate, lanes, nrd_bw;
938 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
939 * read parameter values so this so we can use this to determine
940 * the maximum possible bandwidth over this link.
942 * See USB4 v2 spec 1.0 10.4.4.5.
944 ret = tb_port_read(in, &cap, TB_CFG_PORT,
945 in->cap_adap + DP_LOCAL_CAP, 1);
949 rate = tb_dp_cap_get_rate_ext(cap);
950 if (tb_dp_is_uhbr_rate(rate)) {
952 * When UHBR is used there is no reduction in lanes so
953 * we can use this directly.
955 lanes = tb_dp_cap_get_lanes(cap);
958 * If there is no UHBR supported then check the
959 * non-reduced rate and lanes.
961 ret = usb4_dp_port_nrd(in, &rate, &lanes);
966 nrd_bw = tb_dp_bandwidth(rate, lanes);
969 ret = usb4_dp_port_granularity(in);
972 *max_bw = roundup(nrd_bw, ret);
978 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
982 struct tb_port *out = tunnel->dst_port;
983 struct tb_port *in = tunnel->src_port;
984 int ret, allocated_bw, max_bw;
986 if (!usb4_dp_port_bandwidth_mode_enabled(in))
989 if (!tunnel->bw_mode)
992 /* Read what was allocated previously if any */
993 ret = usb4_dp_port_allocated_bandwidth(in);
998 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1001 if (allocated_bw == max_bw)
1004 if (tb_port_path_direction_downstream(in, out)) {
1006 *consumed_down = allocated_bw;
1008 *consumed_up = allocated_bw;
1015 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1016 int *allocated_down)
1018 struct tb_port *out = tunnel->dst_port;
1019 struct tb_port *in = tunnel->src_port;
1022 * If we have already set the allocated bandwidth then use that.
1023 * Otherwise we read it from the DPRX.
1025 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1026 int ret, allocated_bw, max_bw;
1028 ret = usb4_dp_port_allocated_bandwidth(in);
1033 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1036 if (allocated_bw == max_bw)
1039 if (tb_port_path_direction_downstream(in, out)) {
1041 *allocated_down = allocated_bw;
1043 *allocated_up = allocated_bw;
1044 *allocated_down = 0;
1049 return tunnel->consumed_bandwidth(tunnel, allocated_up,
1053 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1056 struct tb_port *out = tunnel->dst_port;
1057 struct tb_port *in = tunnel->src_port;
1058 int max_bw, ret, tmp;
1060 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1063 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1067 if (tb_port_path_direction_downstream(in, out)) {
1068 tmp = min(*alloc_down, max_bw);
1069 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1075 tmp = min(*alloc_up, max_bw);
1076 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1083 /* Now we can use BW mode registers to figure out the bandwidth */
1084 /* TODO: need to handle discovery too */
1085 tunnel->bw_mode = true;
1089 static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1091 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1092 struct tb_port *in = tunnel->src_port;
1095 * Wait for DPRX done. Normally it should be already set for
1102 ret = tb_port_read(in, &val, TB_CFG_PORT,
1103 in->cap_adap + DP_COMMON_CAP, 1);
1107 if (val & DP_COMMON_CAP_DPRX_DONE) {
1108 tb_tunnel_dbg(tunnel, "DPRX read done\n");
1111 usleep_range(100, 150);
1112 } while (ktime_before(ktime_get(), timeout));
1114 tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1118 /* Read cap from tunnel DP IN */
1119 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1122 struct tb_port *in = tunnel->src_port;
1133 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1138 * Read from the copied remote cap so that we take into account
1139 * if capabilities were reduced during exchange.
1141 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1145 *rate = tb_dp_cap_get_rate(val);
1146 *lanes = tb_dp_cap_get_lanes(val);
1150 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1153 struct tb_port *in = tunnel->src_port;
1156 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1159 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1163 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1174 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1177 struct tb_port *in = tunnel->src_port;
1178 const struct tb_switch *sw = in->sw;
1179 u32 rate = 0, lanes = 0;
1182 if (tb_dp_is_usb4(sw)) {
1184 * On USB4 routers check if the bandwidth allocation
1185 * mode is enabled first and then read the bandwidth
1186 * through those registers.
1188 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1191 if (ret != -EOPNOTSUPP)
1197 * Then see if the DPRX negotiation is ready and if yes
1198 * return that bandwidth (it may be smaller than the
1199 * reduced one). According to VESA spec, the DPRX
1200 * negotiation shall compete in 5 seconds after tunnel
1201 * established. We give it 100ms extra just in case.
1203 ret = tb_dp_wait_dprx(tunnel, 5100);
1206 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1209 } else if (sw->generation >= 2) {
1210 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1214 /* No bandwidth management for legacy devices */
1220 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1222 *consumed_down = tb_dp_bandwidth(rate, lanes);
1224 *consumed_up = tb_dp_bandwidth(rate, lanes);
1231 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1233 struct tb_port *port = hop->in_port;
1234 struct tb_switch *sw = port->sw;
1236 if (tb_port_use_credit_allocation(port))
1237 hop->initial_credits = sw->min_dp_aux_credits;
1239 hop->initial_credits = 1;
1242 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1244 struct tb_path_hop *hop;
1246 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1247 path->egress_shared_buffer = TB_PATH_NONE;
1248 path->ingress_fc_enable = TB_PATH_ALL;
1249 path->ingress_shared_buffer = TB_PATH_NONE;
1250 path->priority = TB_DP_AUX_PRIORITY;
1251 path->weight = TB_DP_AUX_WEIGHT;
1253 tb_path_for_each_hop(path, hop) {
1254 tb_dp_init_aux_credits(hop);
1256 tb_init_pm_support(hop);
1260 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1262 struct tb_port *port = hop->in_port;
1263 struct tb_switch *sw = port->sw;
1265 if (tb_port_use_credit_allocation(port)) {
1266 unsigned int nfc_credits;
1267 size_t max_dp_streams;
1269 tb_available_credits(port, &max_dp_streams);
1271 * Read the number of currently allocated NFC credits
1272 * from the lane adapter. Since we only use them for DP
1273 * tunneling we can use that to figure out how many DP
1274 * tunnels already go through the lane adapter.
1276 nfc_credits = port->config.nfc_credits &
1277 ADP_CS_4_NFC_BUFFERS_MASK;
1278 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1281 hop->nfc_credits = sw->min_dp_main_credits;
1283 hop->nfc_credits = min(port->total_credits - 2, 12U);
1289 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1291 struct tb_path_hop *hop;
1293 path->egress_fc_enable = TB_PATH_NONE;
1294 path->egress_shared_buffer = TB_PATH_NONE;
1295 path->ingress_fc_enable = TB_PATH_NONE;
1296 path->ingress_shared_buffer = TB_PATH_NONE;
1297 path->priority = TB_DP_VIDEO_PRIORITY;
1298 path->weight = TB_DP_VIDEO_WEIGHT;
1300 tb_path_for_each_hop(path, hop) {
1303 ret = tb_dp_init_video_credits(hop);
1307 tb_init_pm_support(hop);
1313 static void tb_dp_dump(struct tb_tunnel *tunnel)
1315 struct tb_port *in, *out;
1316 u32 dp_cap, rate, lanes;
1318 in = tunnel->src_port;
1319 out = tunnel->dst_port;
1321 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1322 in->cap_adap + DP_LOCAL_CAP, 1))
1325 rate = tb_dp_cap_get_rate(dp_cap);
1326 lanes = tb_dp_cap_get_lanes(dp_cap);
1328 tb_tunnel_dbg(tunnel,
1329 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1330 rate, lanes, tb_dp_bandwidth(rate, lanes));
1332 if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1333 out->cap_adap + DP_LOCAL_CAP, 1))
1336 rate = tb_dp_cap_get_rate(dp_cap);
1337 lanes = tb_dp_cap_get_lanes(dp_cap);
1339 tb_tunnel_dbg(tunnel,
1340 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1341 rate, lanes, tb_dp_bandwidth(rate, lanes));
1343 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1344 in->cap_adap + DP_REMOTE_CAP, 1))
1347 rate = tb_dp_cap_get_rate(dp_cap);
1348 lanes = tb_dp_cap_get_lanes(dp_cap);
1350 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1351 rate, lanes, tb_dp_bandwidth(rate, lanes));
1355 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1356 * @tb: Pointer to the domain structure
1357 * @in: DP in adapter
1358 * @alloc_hopid: Allocate HopIDs from visited ports
1360 * If @in adapter is active, follows the tunnel to the DP out adapter
1361 * and back. Returns the discovered tunnel or %NULL if there was no
1364 * Return: DP tunnel or %NULL if no tunnel found.
1366 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1369 struct tb_tunnel *tunnel;
1370 struct tb_port *port;
1371 struct tb_path *path;
1373 if (!tb_dp_port_is_enabled(in))
1376 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1380 tunnel->init = tb_dp_init;
1381 tunnel->deinit = tb_dp_deinit;
1382 tunnel->activate = tb_dp_activate;
1383 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1384 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1385 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1386 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1387 tunnel->src_port = in;
1389 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1390 &tunnel->dst_port, "Video", alloc_hopid);
1392 /* Just disable the DP IN port */
1393 tb_dp_port_enable(in, false);
1396 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1397 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1400 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1403 goto err_deactivate;
1404 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1405 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1407 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1408 &port, "AUX RX", alloc_hopid);
1410 goto err_deactivate;
1411 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1412 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1414 /* Validate that the tunnel is complete */
1415 if (!tb_port_is_dpout(tunnel->dst_port)) {
1416 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1417 goto err_deactivate;
1420 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1421 goto err_deactivate;
1423 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1424 goto err_deactivate;
1426 if (port != tunnel->src_port) {
1427 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1428 goto err_deactivate;
1433 tb_tunnel_dbg(tunnel, "discovered\n");
1437 tb_tunnel_deactivate(tunnel);
1439 tb_tunnel_free(tunnel);
1445 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1446 * @tb: Pointer to the domain structure
1447 * @in: DP in adapter port
1448 * @out: DP out adapter port
1449 * @link_nr: Preferred lane adapter when the link is not bonded
1450 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1452 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1453 * (%0 if not limited)
1455 * Allocates a tunnel between @in and @out that is capable of tunneling
1456 * Display Port traffic.
1458 * Return: Returns a tb_tunnel on success or NULL on failure.
1460 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1461 struct tb_port *out, int link_nr,
1462 int max_up, int max_down)
1464 struct tb_tunnel *tunnel;
1465 struct tb_path **paths;
1466 struct tb_path *path;
1469 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1472 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1476 tunnel->init = tb_dp_init;
1477 tunnel->deinit = tb_dp_deinit;
1478 tunnel->activate = tb_dp_activate;
1479 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1480 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1481 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1482 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1483 tunnel->src_port = in;
1484 tunnel->dst_port = out;
1485 tunnel->max_up = max_up;
1486 tunnel->max_down = max_down;
1488 paths = tunnel->paths;
1489 pm_support = usb4_switch_version(in->sw) >= 2;
1491 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1495 tb_dp_init_video_path(path, pm_support);
1496 paths[TB_DP_VIDEO_PATH_OUT] = path;
1498 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1499 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1502 tb_dp_init_aux_path(path, pm_support);
1503 paths[TB_DP_AUX_PATH_OUT] = path;
1505 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1506 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1509 tb_dp_init_aux_path(path, pm_support);
1510 paths[TB_DP_AUX_PATH_IN] = path;
1515 tb_tunnel_free(tunnel);
1519 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1521 const struct tb_switch *sw = port->sw;
1524 credits = tb_available_credits(port, NULL);
1525 if (tb_acpi_may_tunnel_pcie())
1526 credits -= sw->max_pcie_credits;
1527 credits -= port->dma_credits;
1529 return credits > 0 ? credits : 0;
1532 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1534 struct tb_port *port = hop->in_port;
1536 if (tb_port_use_credit_allocation(port)) {
1537 unsigned int available = tb_dma_available_credits(port);
1540 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1541 * DMA path cannot be established.
1543 if (available < TB_MIN_DMA_CREDITS)
1546 while (credits > available)
1549 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1552 port->dma_credits += credits;
1554 if (tb_port_is_null(port))
1555 credits = port->bonded ? 14 : 6;
1557 credits = min(port->total_credits, credits);
1560 hop->initial_credits = credits;
1564 /* Path from lane adapter to NHI */
1565 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1567 struct tb_path_hop *hop;
1568 unsigned int i, tmp;
1570 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1571 path->ingress_fc_enable = TB_PATH_ALL;
1572 path->egress_shared_buffer = TB_PATH_NONE;
1573 path->ingress_shared_buffer = TB_PATH_NONE;
1574 path->priority = TB_DMA_PRIORITY;
1575 path->weight = TB_DMA_WEIGHT;
1576 path->clear_fc = true;
1579 * First lane adapter is the one connected to the remote host.
1580 * We don't tunnel other traffic over this link so can use all
1581 * the credits (except the ones reserved for control traffic).
1583 hop = &path->hops[0];
1584 tmp = min(tb_usable_credits(hop->in_port), credits);
1585 hop->initial_credits = tmp;
1586 hop->in_port->dma_credits += tmp;
1588 for (i = 1; i < path->path_length; i++) {
1591 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1599 /* Path from NHI to lane adapter */
1600 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1602 struct tb_path_hop *hop;
1604 path->egress_fc_enable = TB_PATH_ALL;
1605 path->ingress_fc_enable = TB_PATH_ALL;
1606 path->egress_shared_buffer = TB_PATH_NONE;
1607 path->ingress_shared_buffer = TB_PATH_NONE;
1608 path->priority = TB_DMA_PRIORITY;
1609 path->weight = TB_DMA_WEIGHT;
1610 path->clear_fc = true;
1612 tb_path_for_each_hop(path, hop) {
1615 ret = tb_dma_reserve_credits(hop, credits);
1623 static void tb_dma_release_credits(struct tb_path_hop *hop)
1625 struct tb_port *port = hop->in_port;
1627 if (tb_port_use_credit_allocation(port)) {
1628 port->dma_credits -= hop->initial_credits;
1630 tb_port_dbg(port, "released %u DMA path credits\n",
1631 hop->initial_credits);
1635 static void tb_dma_deinit_path(struct tb_path *path)
1637 struct tb_path_hop *hop;
1639 tb_path_for_each_hop(path, hop)
1640 tb_dma_release_credits(hop);
1643 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1647 for (i = 0; i < tunnel->npaths; i++) {
1648 if (!tunnel->paths[i])
1650 tb_dma_deinit_path(tunnel->paths[i]);
1655 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1656 * @tb: Pointer to the domain structure
1657 * @nhi: Host controller port
1658 * @dst: Destination null port which the other domain is connected to
1659 * @transmit_path: HopID used for transmitting packets
1660 * @transmit_ring: NHI ring number used to send packets towards the
1661 * other domain. Set to %-1 if TX path is not needed.
1662 * @receive_path: HopID used for receiving packets
1663 * @receive_ring: NHI ring number used to receive packets from the
1664 * other domain. Set to %-1 if RX path is not needed.
1666 * Return: Returns a tb_tunnel on success or NULL on failure.
1668 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1669 struct tb_port *dst, int transmit_path,
1670 int transmit_ring, int receive_path,
1673 struct tb_tunnel *tunnel;
1674 size_t npaths = 0, i = 0;
1675 struct tb_path *path;
1678 /* Ring 0 is reserved for control channel */
1679 if (WARN_ON(!receive_ring || !transmit_ring))
1682 if (receive_ring > 0)
1684 if (transmit_ring > 0)
1687 if (WARN_ON(!npaths))
1690 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1694 tunnel->src_port = nhi;
1695 tunnel->dst_port = dst;
1696 tunnel->deinit = tb_dma_deinit;
1698 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1700 if (receive_ring > 0) {
1701 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1705 tunnel->paths[i++] = path;
1706 if (tb_dma_init_rx_path(path, credits)) {
1707 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1712 if (transmit_ring > 0) {
1713 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1717 tunnel->paths[i++] = path;
1718 if (tb_dma_init_tx_path(path, credits)) {
1719 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1727 tb_tunnel_free(tunnel);
1732 * tb_tunnel_match_dma() - Match DMA tunnel
1733 * @tunnel: Tunnel to match
1734 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1735 * @transmit_ring: NHI ring number used to send packets towards the
1736 * other domain. Pass %-1 to ignore.
1737 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1738 * @receive_ring: NHI ring number used to receive packets from the
1739 * other domain. Pass %-1 to ignore.
1741 * This function can be used to match specific DMA tunnel, if there are
1742 * multiple DMA tunnels going through the same XDomain connection.
1743 * Returns true if there is match and false otherwise.
1745 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1746 int transmit_ring, int receive_path, int receive_ring)
1748 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1751 if (!receive_ring || !transmit_ring)
1754 for (i = 0; i < tunnel->npaths; i++) {
1755 const struct tb_path *path = tunnel->paths[i];
1760 if (tb_port_is_nhi(path->hops[0].in_port))
1762 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1766 if (transmit_ring > 0 || transmit_path > 0) {
1769 if (transmit_ring > 0 &&
1770 (tx_path->hops[0].in_hop_index != transmit_ring))
1772 if (transmit_path > 0 &&
1773 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1777 if (receive_ring > 0 || receive_path > 0) {
1780 if (receive_path > 0 &&
1781 (rx_path->hops[0].in_hop_index != receive_path))
1783 if (receive_ring > 0 &&
1784 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1791 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1793 int ret, up_max_rate, down_max_rate;
1795 ret = usb4_usb3_port_max_link_rate(up);
1800 ret = usb4_usb3_port_max_link_rate(down);
1803 down_max_rate = ret;
1805 return min(up_max_rate, down_max_rate);
1808 static int tb_usb3_init(struct tb_tunnel *tunnel)
1810 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1811 tunnel->allocated_up, tunnel->allocated_down);
1813 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1814 &tunnel->allocated_up,
1815 &tunnel->allocated_down);
1818 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1822 res = tb_usb3_port_enable(tunnel->src_port, activate);
1826 if (tb_port_is_usb3_up(tunnel->dst_port))
1827 return tb_usb3_port_enable(tunnel->dst_port, activate);
1832 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1833 int *consumed_up, int *consumed_down)
1835 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1836 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1839 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1840 * take that it into account here.
1842 *consumed_up = tunnel->allocated_up *
1843 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1844 *consumed_down = tunnel->allocated_down *
1845 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1847 if (tb_port_get_link_generation(port) >= 4) {
1848 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1849 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1855 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1859 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1860 &tunnel->allocated_up,
1861 &tunnel->allocated_down);
1865 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1866 tunnel->allocated_up, tunnel->allocated_down);
1870 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1872 int *available_down)
1874 int ret, max_rate, allocate_up, allocate_down;
1876 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1878 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1883 * 90% of the max rate can be allocated for isochronous
1886 max_rate = ret * 90 / 100;
1888 /* No need to reclaim if already at maximum */
1889 if (tunnel->allocated_up >= max_rate &&
1890 tunnel->allocated_down >= max_rate)
1893 /* Don't go lower than what is already allocated */
1894 allocate_up = min(max_rate, *available_up);
1895 if (allocate_up < tunnel->allocated_up)
1896 allocate_up = tunnel->allocated_up;
1898 allocate_down = min(max_rate, *available_down);
1899 if (allocate_down < tunnel->allocated_down)
1900 allocate_down = tunnel->allocated_down;
1902 /* If no changes no need to do more */
1903 if (allocate_up == tunnel->allocated_up &&
1904 allocate_down == tunnel->allocated_down)
1907 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1910 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1914 tunnel->allocated_up = allocate_up;
1915 *available_up -= tunnel->allocated_up;
1917 tunnel->allocated_down = allocate_down;
1918 *available_down -= tunnel->allocated_down;
1920 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1921 tunnel->allocated_up, tunnel->allocated_down);
1924 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1926 struct tb_port *port = hop->in_port;
1927 struct tb_switch *sw = port->sw;
1928 unsigned int credits;
1930 if (tb_port_use_credit_allocation(port)) {
1931 credits = sw->max_usb3_credits;
1933 if (tb_port_is_null(port))
1934 credits = port->bonded ? 32 : 16;
1939 hop->initial_credits = credits;
1942 static void tb_usb3_init_path(struct tb_path *path)
1944 struct tb_path_hop *hop;
1946 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1947 path->egress_shared_buffer = TB_PATH_NONE;
1948 path->ingress_fc_enable = TB_PATH_ALL;
1949 path->ingress_shared_buffer = TB_PATH_NONE;
1950 path->priority = TB_USB3_PRIORITY;
1951 path->weight = TB_USB3_WEIGHT;
1952 path->drop_packages = 0;
1954 tb_path_for_each_hop(path, hop)
1955 tb_usb3_init_credits(hop);
1959 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1960 * @tb: Pointer to the domain structure
1961 * @down: USB3 downstream adapter
1962 * @alloc_hopid: Allocate HopIDs from visited ports
1964 * If @down adapter is active, follows the tunnel to the USB3 upstream
1965 * adapter and back. Returns the discovered tunnel or %NULL if there was
1968 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1971 struct tb_tunnel *tunnel;
1972 struct tb_path *path;
1974 if (!tb_usb3_port_is_enabled(down))
1977 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1981 tunnel->activate = tb_usb3_activate;
1982 tunnel->src_port = down;
1985 * Discover both paths even if they are not complete. We will
1986 * clean them up by calling tb_tunnel_deactivate() below in that
1989 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1990 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1992 /* Just disable the downstream port */
1993 tb_usb3_port_enable(down, false);
1996 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1997 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1999 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2000 "USB3 Up", alloc_hopid);
2002 goto err_deactivate;
2003 tunnel->paths[TB_USB3_PATH_UP] = path;
2004 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2006 /* Validate that the tunnel is complete */
2007 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2008 tb_port_warn(tunnel->dst_port,
2009 "path does not end on an USB3 adapter, cleaning up\n");
2010 goto err_deactivate;
2013 if (down != tunnel->src_port) {
2014 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2015 goto err_deactivate;
2018 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2019 tb_tunnel_warn(tunnel,
2020 "tunnel is not fully activated, cleaning up\n");
2021 goto err_deactivate;
2024 if (!tb_route(down->sw)) {
2028 * Read the initial bandwidth allocation for the first
2031 ret = usb4_usb3_port_allocated_bandwidth(down,
2032 &tunnel->allocated_up, &tunnel->allocated_down);
2034 goto err_deactivate;
2036 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2037 tunnel->allocated_up, tunnel->allocated_down);
2039 tunnel->init = tb_usb3_init;
2040 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2041 tunnel->release_unused_bandwidth =
2042 tb_usb3_release_unused_bandwidth;
2043 tunnel->reclaim_available_bandwidth =
2044 tb_usb3_reclaim_available_bandwidth;
2047 tb_tunnel_dbg(tunnel, "discovered\n");
2051 tb_tunnel_deactivate(tunnel);
2053 tb_tunnel_free(tunnel);
2059 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2060 * @tb: Pointer to the domain structure
2061 * @up: USB3 upstream adapter port
2062 * @down: USB3 downstream adapter port
2063 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2065 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2066 * (%0 if not limited).
2068 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2069 * @TB_TYPE_USB3_DOWN.
2071 * Return: Returns a tb_tunnel on success or %NULL on failure.
2073 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2074 struct tb_port *down, int max_up,
2077 struct tb_tunnel *tunnel;
2078 struct tb_path *path;
2082 * Check that we have enough bandwidth available for the new
2085 if (max_up > 0 || max_down > 0) {
2086 max_rate = tb_usb3_max_link_rate(down, up);
2090 /* Only 90% can be allocated for USB3 isochronous transfers */
2091 max_rate = max_rate * 90 / 100;
2092 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2095 if (max_rate > max_up || max_rate > max_down) {
2096 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2101 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2105 tunnel->activate = tb_usb3_activate;
2106 tunnel->src_port = down;
2107 tunnel->dst_port = up;
2108 tunnel->max_up = max_up;
2109 tunnel->max_down = max_down;
2111 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2114 tb_tunnel_free(tunnel);
2117 tb_usb3_init_path(path);
2118 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2120 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2123 tb_tunnel_free(tunnel);
2126 tb_usb3_init_path(path);
2127 tunnel->paths[TB_USB3_PATH_UP] = path;
2129 if (!tb_route(down->sw)) {
2130 tunnel->allocated_up = max_rate;
2131 tunnel->allocated_down = max_rate;
2133 tunnel->init = tb_usb3_init;
2134 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2135 tunnel->release_unused_bandwidth =
2136 tb_usb3_release_unused_bandwidth;
2137 tunnel->reclaim_available_bandwidth =
2138 tb_usb3_reclaim_available_bandwidth;
2145 * tb_tunnel_free() - free a tunnel
2146 * @tunnel: Tunnel to be freed
2148 * Frees a tunnel. The tunnel does not need to be deactivated.
2150 void tb_tunnel_free(struct tb_tunnel *tunnel)
2158 tunnel->deinit(tunnel);
2160 for (i = 0; i < tunnel->npaths; i++) {
2161 if (tunnel->paths[i])
2162 tb_path_free(tunnel->paths[i]);
2165 kfree(tunnel->paths);
2170 * tb_tunnel_is_invalid - check whether an activated path is still valid
2171 * @tunnel: Tunnel to check
2173 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2177 for (i = 0; i < tunnel->npaths; i++) {
2178 WARN_ON(!tunnel->paths[i]->activated);
2179 if (tb_path_is_invalid(tunnel->paths[i]))
2187 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2188 * @tunnel: Tunnel to restart
2190 * Return: 0 on success and negative errno in case if failure
2192 int tb_tunnel_restart(struct tb_tunnel *tunnel)
2196 tb_tunnel_dbg(tunnel, "activating\n");
2199 * Make sure all paths are properly disabled before enabling
2202 for (i = 0; i < tunnel->npaths; i++) {
2203 if (tunnel->paths[i]->activated) {
2204 tb_path_deactivate(tunnel->paths[i]);
2205 tunnel->paths[i]->activated = false;
2210 res = tunnel->init(tunnel);
2215 for (i = 0; i < tunnel->npaths; i++) {
2216 res = tb_path_activate(tunnel->paths[i]);
2221 if (tunnel->activate) {
2222 res = tunnel->activate(tunnel, true);
2230 tb_tunnel_warn(tunnel, "activation failed\n");
2231 tb_tunnel_deactivate(tunnel);
2236 * tb_tunnel_activate() - activate a tunnel
2237 * @tunnel: Tunnel to activate
2239 * Return: Returns 0 on success or an error code on failure.
2241 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2245 for (i = 0; i < tunnel->npaths; i++) {
2246 if (tunnel->paths[i]->activated) {
2247 tb_tunnel_WARN(tunnel,
2248 "trying to activate an already activated tunnel\n");
2253 return tb_tunnel_restart(tunnel);
2257 * tb_tunnel_deactivate() - deactivate a tunnel
2258 * @tunnel: Tunnel to deactivate
2260 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2264 tb_tunnel_dbg(tunnel, "deactivating\n");
2266 if (tunnel->activate)
2267 tunnel->activate(tunnel, false);
2269 for (i = 0; i < tunnel->npaths; i++) {
2270 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2271 tb_path_deactivate(tunnel->paths[i]);
2276 * tb_tunnel_port_on_path() - Does the tunnel go through port
2277 * @tunnel: Tunnel to check
2278 * @port: Port to check
2280 * Returns true if @tunnel goes through @port (direction does not matter),
2283 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2284 const struct tb_port *port)
2288 for (i = 0; i < tunnel->npaths; i++) {
2289 if (!tunnel->paths[i])
2292 if (tb_path_port_on_path(tunnel->paths[i], port))
2299 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2303 for (i = 0; i < tunnel->npaths; i++) {
2304 if (!tunnel->paths[i])
2306 if (!tunnel->paths[i]->activated)
2314 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2315 * @tunnel: Tunnel to check
2316 * @max_up: Maximum upstream bandwidth in Mb/s
2317 * @max_down: Maximum downstream bandwidth in Mb/s
2319 * Returns maximum possible bandwidth this tunnel can go if not limited
2320 * by other bandwidth clients. If the tunnel does not support this
2321 * returns %-EOPNOTSUPP.
2323 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2326 if (!tb_tunnel_is_active(tunnel))
2329 if (tunnel->maximum_bandwidth)
2330 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2335 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2336 * @tunnel: Tunnel to check
2337 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2338 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2341 * Returns the bandwidth allocated for the tunnel. This may be higher
2342 * than what the tunnel actually consumes.
2344 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2345 int *allocated_down)
2347 if (!tb_tunnel_is_active(tunnel))
2350 if (tunnel->allocated_bandwidth)
2351 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2357 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2358 * @tunnel: Tunnel whose bandwidth allocation to change
2359 * @alloc_up: New upstream bandwidth in Mb/s
2360 * @alloc_down: New downstream bandwidth in Mb/s
2362 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2363 * and updates @alloc_up and @alloc_down to that was actually allocated
2364 * (it may not be the same as passed originally). Returns negative errno
2365 * in case of failure.
2367 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2370 if (!tb_tunnel_is_active(tunnel))
2373 if (tunnel->alloc_bandwidth)
2374 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2380 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2381 * @tunnel: Tunnel to check
2382 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2384 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2387 * Stores the amount of isochronous bandwidth @tunnel consumes in
2388 * @consumed_up and @consumed_down. In case of success returns %0,
2389 * negative errno otherwise.
2391 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2394 int up_bw = 0, down_bw = 0;
2396 if (!tb_tunnel_is_active(tunnel))
2399 if (tunnel->consumed_bandwidth) {
2402 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2406 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2412 *consumed_up = up_bw;
2414 *consumed_down = down_bw;
2420 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2421 * @tunnel: Tunnel whose unused bandwidth to release
2423 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2424 * moment) this function makes it to release all the unused bandwidth.
2426 * Returns %0 in case of success and negative errno otherwise.
2428 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2430 if (!tb_tunnel_is_active(tunnel))
2433 if (tunnel->release_unused_bandwidth) {
2436 ret = tunnel->release_unused_bandwidth(tunnel);
2445 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2446 * @tunnel: Tunnel reclaiming available bandwidth
2447 * @available_up: Available upstream bandwidth (in Mb/s)
2448 * @available_down: Available downstream bandwidth (in Mb/s)
2450 * Reclaims bandwidth from @available_up and @available_down and updates
2451 * the variables accordingly (e.g decreases both according to what was
2452 * reclaimed by the tunnel). If nothing was reclaimed the values are
2455 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2457 int *available_down)
2459 if (!tb_tunnel_is_active(tunnel))
2462 if (tunnel->reclaim_available_bandwidth)
2463 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2467 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2469 return tb_tunnel_names[tunnel->type];