1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/ktime.h>
13 #include <linux/string_helpers.h>
18 /* PCIe adapters use always HopID of 8 for both directions */
19 #define TB_PCI_HOPID 8
21 #define TB_PCI_PATH_DOWN 0
22 #define TB_PCI_PATH_UP 1
24 #define TB_PCI_PRIORITY 3
25 #define TB_PCI_WEIGHT 1
27 /* USB3 adapters use always HopID of 8 for both directions */
28 #define TB_USB3_HOPID 8
30 #define TB_USB3_PATH_DOWN 0
31 #define TB_USB3_PATH_UP 1
33 #define TB_USB3_PRIORITY 3
34 #define TB_USB3_WEIGHT 2
36 /* DP adapters use HopID 8 for AUX and 9 for Video */
37 #define TB_DP_AUX_TX_HOPID 8
38 #define TB_DP_AUX_RX_HOPID 8
39 #define TB_DP_VIDEO_HOPID 9
41 #define TB_DP_VIDEO_PATH_OUT 0
42 #define TB_DP_AUX_PATH_OUT 1
43 #define TB_DP_AUX_PATH_IN 2
45 #define TB_DP_VIDEO_PRIORITY 1
46 #define TB_DP_VIDEO_WEIGHT 1
48 #define TB_DP_AUX_PRIORITY 2
49 #define TB_DP_AUX_WEIGHT 1
51 /* Minimum number of credits needed for PCIe path */
52 #define TB_MIN_PCIE_CREDITS 6U
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
57 #define TB_DMA_CREDITS 14
58 /* Minimum number of credits for DMA path */
59 #define TB_MIN_DMA_CREDITS 1
61 #define TB_DMA_PRIORITY 5
62 #define TB_DMA_WEIGHT 1
65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66 * according to USB4 v2 Connection Manager guide. This ends up reserving
67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
70 #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
71 #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
73 static unsigned int dma_credits = TB_DMA_CREDITS;
74 module_param(dma_credits, uint, 0444);
75 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76 __MODULE_STRING(TB_DMA_CREDITS) ")");
78 static bool bw_alloc_mode = true;
79 module_param(bw_alloc_mode, bool, 0444);
80 MODULE_PARM_DESC(bw_alloc_mode,
81 "enable bandwidth allocation mode if supported (default: true)");
83 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
85 static inline unsigned int tb_usable_credits(const struct tb_port *port)
87 return port->total_credits - port->ctl_credits;
91 * tb_available_credits() - Available credits for PCIe and DMA
92 * @port: Lane adapter to check
93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94 * streams possible through this lane adapter
96 static unsigned int tb_available_credits(const struct tb_port *port,
97 size_t *max_dp_streams)
99 const struct tb_switch *sw = port->sw;
100 int credits, usb3, pcie, spare;
103 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
106 if (tb_acpi_is_xdomain_allowed()) {
107 spare = min_not_zero(sw->max_dma_credits, dma_credits);
108 /* Add some credits for potential second DMA tunnel */
109 spare += TB_MIN_DMA_CREDITS;
114 credits = tb_usable_credits(port);
115 if (tb_acpi_may_tunnel_dp()) {
117 * Maximum number of DP streams possible through the
120 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121 ndp = (credits - (usb3 + pcie + spare)) /
122 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
128 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
132 *max_dp_streams = ndp;
134 return credits > 0 ? credits : 0;
137 static void tb_init_pm_support(struct tb_path_hop *hop)
139 struct tb_port *out_port = hop->out_port;
140 struct tb_port *in_port = hop->in_port;
142 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143 usb4_switch_version(in_port->sw) >= 2)
144 hop->pm_support = true;
147 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
148 enum tb_tunnel_type type)
150 struct tb_tunnel *tunnel;
152 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
156 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
157 if (!tunnel->paths) {
158 tb_tunnel_free(tunnel);
162 INIT_LIST_HEAD(&tunnel->list);
164 tunnel->npaths = npaths;
170 static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
172 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
175 /* Only supported of both routers are at least USB4 v2 */
176 if (tb_port_get_link_generation(port) < 4)
179 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
183 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
187 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
188 str_enabled_disabled(enable));
192 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
197 res = tb_pci_set_ext_encapsulation(tunnel, activate);
202 res = tb_pci_port_enable(tunnel->src_port, activate);
206 if (tb_port_is_pcie_up(tunnel->dst_port)) {
207 res = tb_pci_port_enable(tunnel->dst_port, activate);
212 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
215 static int tb_pci_init_credits(struct tb_path_hop *hop)
217 struct tb_port *port = hop->in_port;
218 struct tb_switch *sw = port->sw;
219 unsigned int credits;
221 if (tb_port_use_credit_allocation(port)) {
222 unsigned int available;
224 available = tb_available_credits(port, NULL);
225 credits = min(sw->max_pcie_credits, available);
227 if (credits < TB_MIN_PCIE_CREDITS)
230 credits = max(TB_MIN_PCIE_CREDITS, credits);
232 if (tb_port_is_null(port))
233 credits = port->bonded ? 32 : 16;
238 hop->initial_credits = credits;
242 static int tb_pci_init_path(struct tb_path *path)
244 struct tb_path_hop *hop;
246 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
247 path->egress_shared_buffer = TB_PATH_NONE;
248 path->ingress_fc_enable = TB_PATH_ALL;
249 path->ingress_shared_buffer = TB_PATH_NONE;
250 path->priority = TB_PCI_PRIORITY;
251 path->weight = TB_PCI_WEIGHT;
252 path->drop_packages = 0;
254 tb_path_for_each_hop(path, hop) {
257 ret = tb_pci_init_credits(hop);
266 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
267 * @tb: Pointer to the domain structure
268 * @down: PCIe downstream adapter
269 * @alloc_hopid: Allocate HopIDs from visited ports
271 * If @down adapter is active, follows the tunnel to the PCIe upstream
272 * adapter and back. Returns the discovered tunnel or %NULL if there was
275 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
278 struct tb_tunnel *tunnel;
279 struct tb_path *path;
281 if (!tb_pci_port_is_enabled(down))
284 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
288 tunnel->activate = tb_pci_activate;
289 tunnel->src_port = down;
292 * Discover both paths even if they are not complete. We will
293 * clean them up by calling tb_tunnel_deactivate() below in that
296 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
297 &tunnel->dst_port, "PCIe Up", alloc_hopid);
299 /* Just disable the downstream port */
300 tb_pci_port_enable(down, false);
303 tunnel->paths[TB_PCI_PATH_UP] = path;
304 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
307 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
308 "PCIe Down", alloc_hopid);
311 tunnel->paths[TB_PCI_PATH_DOWN] = path;
312 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
315 /* Validate that the tunnel is complete */
316 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
317 tb_port_warn(tunnel->dst_port,
318 "path does not end on a PCIe adapter, cleaning up\n");
322 if (down != tunnel->src_port) {
323 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
327 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
328 tb_tunnel_warn(tunnel,
329 "tunnel is not fully activated, cleaning up\n");
333 tb_tunnel_dbg(tunnel, "discovered\n");
337 tb_tunnel_deactivate(tunnel);
339 tb_tunnel_free(tunnel);
345 * tb_tunnel_alloc_pci() - allocate a pci tunnel
346 * @tb: Pointer to the domain structure
347 * @up: PCIe upstream adapter port
348 * @down: PCIe downstream adapter port
350 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
353 * Return: Returns a tb_tunnel on success or NULL on failure.
355 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
356 struct tb_port *down)
358 struct tb_tunnel *tunnel;
359 struct tb_path *path;
361 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
365 tunnel->activate = tb_pci_activate;
366 tunnel->src_port = down;
367 tunnel->dst_port = up;
369 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
373 tunnel->paths[TB_PCI_PATH_DOWN] = path;
374 if (tb_pci_init_path(path))
377 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
381 tunnel->paths[TB_PCI_PATH_UP] = path;
382 if (tb_pci_init_path(path))
388 tb_tunnel_free(tunnel);
393 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
394 * @port: Lane 0 adapter
395 * @reserved_up: Upstream bandwidth in Mb/s to reserve
396 * @reserved_down: Downstream bandwidth in Mb/s to reserve
398 * Can be called to any connected lane 0 adapter to find out how much
399 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
400 * Returns true if there is something to be reserved and writes the
401 * amount to @reserved_down/@reserved_up. Otherwise returns false and
402 * does not touch the parameters.
404 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
407 if (WARN_ON_ONCE(!port->remote))
410 if (!tb_acpi_may_tunnel_pcie())
413 if (tb_port_get_link_generation(port) < 4)
416 /* Must have PCIe adapters */
417 if (tb_is_upstream_port(port)) {
418 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
420 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
423 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
425 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
429 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
430 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
432 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
437 static bool tb_dp_is_usb4(const struct tb_switch *sw)
439 /* Titan Ridge DP adapters need the same treatment as USB4 */
440 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
443 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
446 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
450 /* Both ends need to support this */
451 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
454 ret = tb_port_read(out, &val, TB_CFG_PORT,
455 out->cap_adap + DP_STATUS_CTRL, 1);
459 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
461 ret = tb_port_write(out, &val, TB_CFG_PORT,
462 out->cap_adap + DP_STATUS_CTRL, 1);
467 ret = tb_port_read(out, &val, TB_CFG_PORT,
468 out->cap_adap + DP_STATUS_CTRL, 1);
471 if (!(val & DP_STATUS_CTRL_CMHS))
473 usleep_range(100, 150);
474 } while (ktime_before(ktime_get(), timeout));
480 * Returns maximum possible rate from capability supporting only DP 2.0
481 * and below. Used when DP BW allocation mode is not enabled.
483 static inline u32 tb_dp_cap_get_rate(u32 val)
485 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
488 case DP_COMMON_CAP_RATE_RBR:
490 case DP_COMMON_CAP_RATE_HBR:
492 case DP_COMMON_CAP_RATE_HBR2:
494 case DP_COMMON_CAP_RATE_HBR3:
502 * Returns maximum possible rate from capability supporting DP 2.1
503 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
506 static inline u32 tb_dp_cap_get_rate_ext(u32 val)
508 if (val & DP_COMMON_CAP_UHBR20)
510 else if (val & DP_COMMON_CAP_UHBR13_5)
512 else if (val & DP_COMMON_CAP_UHBR10)
515 return tb_dp_cap_get_rate(val);
518 static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
520 return rate >= 10000;
523 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
525 val &= ~DP_COMMON_CAP_RATE_MASK;
528 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
531 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
534 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
537 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
540 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
546 static inline u32 tb_dp_cap_get_lanes(u32 val)
548 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
551 case DP_COMMON_CAP_1_LANE:
553 case DP_COMMON_CAP_2_LANES:
555 case DP_COMMON_CAP_4_LANES:
562 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
564 val &= ~DP_COMMON_CAP_LANES_MASK;
567 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
571 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
574 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
577 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
583 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
585 /* Tunneling removes the DP 8b/10b 128/132b encoding */
586 if (tb_dp_is_uhbr_rate(rate))
587 return rate * lanes * 128 / 132;
588 return rate * lanes * 8 / 10;
591 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
592 u32 out_rate, u32 out_lanes, u32 *new_rate,
595 static const u32 dp_bw[][2] = {
597 { 8100, 4 }, /* 25920 Mb/s */
598 { 5400, 4 }, /* 17280 Mb/s */
599 { 8100, 2 }, /* 12960 Mb/s */
600 { 2700, 4 }, /* 8640 Mb/s */
601 { 5400, 2 }, /* 8640 Mb/s */
602 { 8100, 1 }, /* 6480 Mb/s */
603 { 1620, 4 }, /* 5184 Mb/s */
604 { 5400, 1 }, /* 4320 Mb/s */
605 { 2700, 2 }, /* 4320 Mb/s */
606 { 1620, 2 }, /* 2592 Mb/s */
607 { 2700, 1 }, /* 2160 Mb/s */
608 { 1620, 1 }, /* 1296 Mb/s */
613 * Find a combination that can fit into max_bw and does not
614 * exceed the maximum rate and lanes supported by the DP OUT and
617 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
618 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
621 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
624 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
625 *new_rate = dp_bw[i][0];
626 *new_lanes = dp_bw[i][1];
634 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
636 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
637 struct tb_port *out = tunnel->dst_port;
638 struct tb_port *in = tunnel->src_port;
642 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
643 * newer generation hardware.
645 if (in->sw->generation < 2 || out->sw->generation < 2)
649 * Perform connection manager handshake between IN and OUT ports
650 * before capabilities exchange can take place.
652 ret = tb_dp_cm_handshake(in, out, 3000);
656 /* Read both DP_LOCAL_CAP registers */
657 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
658 in->cap_adap + DP_LOCAL_CAP, 1);
662 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
663 out->cap_adap + DP_LOCAL_CAP, 1);
667 /* Write IN local caps to OUT remote caps */
668 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
669 out->cap_adap + DP_REMOTE_CAP, 1);
673 in_rate = tb_dp_cap_get_rate(in_dp_cap);
674 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
675 tb_tunnel_dbg(tunnel,
676 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
677 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
680 * If the tunnel bandwidth is limited (max_bw is set) then see
681 * if we need to reduce bandwidth to fit there.
683 out_rate = tb_dp_cap_get_rate(out_dp_cap);
684 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
685 bw = tb_dp_bandwidth(out_rate, out_lanes);
686 tb_tunnel_dbg(tunnel,
687 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
688 out_rate, out_lanes, bw);
690 if (tb_port_path_direction_downstream(in, out))
691 max_bw = tunnel->max_down;
693 max_bw = tunnel->max_up;
695 if (max_bw && bw > max_bw) {
696 u32 new_rate, new_lanes, new_bw;
698 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
699 out_rate, out_lanes, &new_rate,
702 tb_tunnel_info(tunnel, "not enough bandwidth\n");
706 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
707 tb_tunnel_dbg(tunnel,
708 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
709 new_rate, new_lanes, new_bw);
712 * Set new rate and number of lanes before writing it to
713 * the IN port remote caps.
715 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
716 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
720 * Titan Ridge does not disable AUX timers when it gets
721 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
724 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
725 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
726 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
729 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
730 in->cap_adap + DP_REMOTE_CAP, 1);
733 static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
735 int ret, estimated_bw, granularity, tmp;
736 struct tb_port *out = tunnel->dst_port;
737 struct tb_port *in = tunnel->src_port;
738 u32 out_dp_cap, out_rate, out_lanes;
739 u32 in_dp_cap, in_rate, in_lanes;
745 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
749 ret = usb4_dp_port_set_group_id(in, in->group->index);
754 * Get the non-reduced rate and lanes based on the lowest
755 * capability of both adapters.
757 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
758 in->cap_adap + DP_LOCAL_CAP, 1);
762 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
763 out->cap_adap + DP_LOCAL_CAP, 1);
767 in_rate = tb_dp_cap_get_rate(in_dp_cap);
768 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
769 out_rate = tb_dp_cap_get_rate(out_dp_cap);
770 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
772 rate = min(in_rate, out_rate);
773 lanes = min(in_lanes, out_lanes);
774 tmp = tb_dp_bandwidth(rate, lanes);
776 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
779 ret = usb4_dp_port_set_nrd(in, rate, lanes);
784 * Pick up granularity that supports maximum possible bandwidth.
785 * For that we use the UHBR rates too.
787 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
788 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
789 rate = min(in_rate, out_rate);
790 tmp = tb_dp_bandwidth(rate, lanes);
792 tb_tunnel_dbg(tunnel,
793 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
796 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
800 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
803 * Returns -EINVAL if granularity above is outside of the
806 ret = usb4_dp_port_set_granularity(in, granularity);
811 * Bandwidth estimation is pretty much what we have in
812 * max_up/down fields. For discovery we just read what the
813 * estimation was set to.
815 if (tb_port_path_direction_downstream(in, out))
816 estimated_bw = tunnel->max_down;
818 estimated_bw = tunnel->max_up;
820 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
822 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
826 /* Initial allocation should be 0 according the spec */
827 ret = usb4_dp_port_allocate_bandwidth(in, 0);
831 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
835 static int tb_dp_init(struct tb_tunnel *tunnel)
837 struct tb_port *in = tunnel->src_port;
838 struct tb_switch *sw = in->sw;
839 struct tb *tb = in->sw->tb;
842 ret = tb_dp_xchg_caps(tunnel);
846 if (!tb_switch_is_usb4(sw))
849 if (!usb4_dp_port_bandwidth_mode_supported(in))
852 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
854 ret = usb4_dp_port_set_cm_id(in, tb->index);
858 return tb_dp_bandwidth_alloc_mode_enable(tunnel);
861 static void tb_dp_deinit(struct tb_tunnel *tunnel)
863 struct tb_port *in = tunnel->src_port;
865 if (!usb4_dp_port_bandwidth_mode_supported(in))
867 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
868 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
869 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
873 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
878 struct tb_path **paths;
881 paths = tunnel->paths;
882 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
884 tb_dp_port_set_hops(tunnel->src_port,
885 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
886 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
887 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
889 tb_dp_port_set_hops(tunnel->dst_port,
890 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
891 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
892 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
894 tb_dp_port_hpd_clear(tunnel->src_port);
895 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
896 if (tb_port_is_dpout(tunnel->dst_port))
897 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
900 ret = tb_dp_port_enable(tunnel->src_port, active);
904 if (tb_port_is_dpout(tunnel->dst_port))
905 return tb_dp_port_enable(tunnel->dst_port, active);
910 /* max_bw is rounded up to next granularity */
911 static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
914 struct tb_port *in = tunnel->src_port;
915 int ret, rate, lanes, nrd_bw;
919 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
920 * read parameter values so this so we can use this to determine
921 * the maximum possible bandwidth over this link.
923 * See USB4 v2 spec 1.0 10.4.4.5.
925 ret = tb_port_read(in, &cap, TB_CFG_PORT,
926 in->cap_adap + DP_LOCAL_CAP, 1);
930 rate = tb_dp_cap_get_rate_ext(cap);
931 if (tb_dp_is_uhbr_rate(rate)) {
933 * When UHBR is used there is no reduction in lanes so
934 * we can use this directly.
936 lanes = tb_dp_cap_get_lanes(cap);
939 * If there is no UHBR supported then check the
940 * non-reduced rate and lanes.
942 ret = usb4_dp_port_nrd(in, &rate, &lanes);
947 nrd_bw = tb_dp_bandwidth(rate, lanes);
950 ret = usb4_dp_port_granularity(in);
953 *max_bw = roundup(nrd_bw, ret);
959 static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
963 struct tb_port *out = tunnel->dst_port;
964 struct tb_port *in = tunnel->src_port;
965 int ret, allocated_bw, max_bw;
967 if (!usb4_dp_port_bandwidth_mode_enabled(in))
970 if (!tunnel->bw_mode)
973 /* Read what was allocated previously if any */
974 ret = usb4_dp_port_allocated_bandwidth(in);
979 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
982 if (allocated_bw == max_bw)
985 if (tb_port_path_direction_downstream(in, out)) {
987 *consumed_down = allocated_bw;
989 *consumed_up = allocated_bw;
996 static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
999 struct tb_port *out = tunnel->dst_port;
1000 struct tb_port *in = tunnel->src_port;
1003 * If we have already set the allocated bandwidth then use that.
1004 * Otherwise we read it from the DPRX.
1006 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1007 int ret, allocated_bw, max_bw;
1009 ret = usb4_dp_port_allocated_bandwidth(in);
1014 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1017 if (allocated_bw == max_bw)
1020 if (tb_port_path_direction_downstream(in, out)) {
1022 *allocated_down = allocated_bw;
1024 *allocated_up = allocated_bw;
1025 *allocated_down = 0;
1030 return tunnel->consumed_bandwidth(tunnel, allocated_up,
1034 static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1037 struct tb_port *out = tunnel->dst_port;
1038 struct tb_port *in = tunnel->src_port;
1039 int max_bw, ret, tmp;
1041 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1044 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1048 if (tb_port_path_direction_downstream(in, out)) {
1049 tmp = min(*alloc_down, max_bw);
1050 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1056 tmp = min(*alloc_up, max_bw);
1057 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1064 /* Now we can use BW mode registers to figure out the bandwidth */
1065 /* TODO: need to handle discovery too */
1066 tunnel->bw_mode = true;
1070 static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
1073 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1074 struct tb_port *in = tunnel->src_port;
1077 * Wait for DPRX done. Normally it should be already set for
1084 ret = tb_port_read(in, &val, TB_CFG_PORT,
1085 in->cap_adap + DP_COMMON_CAP, 1);
1089 if (val & DP_COMMON_CAP_DPRX_DONE) {
1090 *rate = tb_dp_cap_get_rate(val);
1091 *lanes = tb_dp_cap_get_lanes(val);
1093 tb_tunnel_dbg(tunnel, "DPRX read done\n");
1096 usleep_range(100, 150);
1097 } while (ktime_before(ktime_get(), timeout));
1102 /* Read cap from tunnel DP IN */
1103 static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1106 struct tb_port *in = tunnel->src_port;
1116 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1121 * Read from the copied remote cap so that we take into account
1122 * if capabilities were reduced during exchange.
1124 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1128 *rate = tb_dp_cap_get_rate(val);
1129 *lanes = tb_dp_cap_get_lanes(val);
1133 static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1136 struct tb_port *in = tunnel->src_port;
1139 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1142 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1146 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1157 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1160 struct tb_port *in = tunnel->src_port;
1161 const struct tb_switch *sw = in->sw;
1162 u32 rate = 0, lanes = 0;
1165 if (tb_dp_is_usb4(sw)) {
1167 * On USB4 routers check if the bandwidth allocation
1168 * mode is enabled first and then read the bandwidth
1169 * through those registers.
1171 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1174 if (ret != -EOPNOTSUPP)
1180 * Then see if the DPRX negotiation is ready and if yes
1181 * return that bandwidth (it may be smaller than the
1182 * reduced one). Otherwise return the remote (possibly
1185 ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
1187 if (ret == -ETIMEDOUT)
1188 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1193 } else if (sw->generation >= 2) {
1194 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1198 /* No bandwidth management for legacy devices */
1204 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1206 *consumed_down = tb_dp_bandwidth(rate, lanes);
1208 *consumed_up = tb_dp_bandwidth(rate, lanes);
1215 static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1217 struct tb_port *port = hop->in_port;
1218 struct tb_switch *sw = port->sw;
1220 if (tb_port_use_credit_allocation(port))
1221 hop->initial_credits = sw->min_dp_aux_credits;
1223 hop->initial_credits = 1;
1226 static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1228 struct tb_path_hop *hop;
1230 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1231 path->egress_shared_buffer = TB_PATH_NONE;
1232 path->ingress_fc_enable = TB_PATH_ALL;
1233 path->ingress_shared_buffer = TB_PATH_NONE;
1234 path->priority = TB_DP_AUX_PRIORITY;
1235 path->weight = TB_DP_AUX_WEIGHT;
1237 tb_path_for_each_hop(path, hop) {
1238 tb_dp_init_aux_credits(hop);
1240 tb_init_pm_support(hop);
1244 static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1246 struct tb_port *port = hop->in_port;
1247 struct tb_switch *sw = port->sw;
1249 if (tb_port_use_credit_allocation(port)) {
1250 unsigned int nfc_credits;
1251 size_t max_dp_streams;
1253 tb_available_credits(port, &max_dp_streams);
1255 * Read the number of currently allocated NFC credits
1256 * from the lane adapter. Since we only use them for DP
1257 * tunneling we can use that to figure out how many DP
1258 * tunnels already go through the lane adapter.
1260 nfc_credits = port->config.nfc_credits &
1261 ADP_CS_4_NFC_BUFFERS_MASK;
1262 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1265 hop->nfc_credits = sw->min_dp_main_credits;
1267 hop->nfc_credits = min(port->total_credits - 2, 12U);
1273 static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1275 struct tb_path_hop *hop;
1277 path->egress_fc_enable = TB_PATH_NONE;
1278 path->egress_shared_buffer = TB_PATH_NONE;
1279 path->ingress_fc_enable = TB_PATH_NONE;
1280 path->ingress_shared_buffer = TB_PATH_NONE;
1281 path->priority = TB_DP_VIDEO_PRIORITY;
1282 path->weight = TB_DP_VIDEO_WEIGHT;
1284 tb_path_for_each_hop(path, hop) {
1287 ret = tb_dp_init_video_credits(hop);
1291 tb_init_pm_support(hop);
1297 static void tb_dp_dump(struct tb_tunnel *tunnel)
1299 struct tb_port *in, *out;
1300 u32 dp_cap, rate, lanes;
1302 in = tunnel->src_port;
1303 out = tunnel->dst_port;
1305 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1306 in->cap_adap + DP_LOCAL_CAP, 1))
1309 rate = tb_dp_cap_get_rate(dp_cap);
1310 lanes = tb_dp_cap_get_lanes(dp_cap);
1312 tb_tunnel_dbg(tunnel,
1313 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1314 rate, lanes, tb_dp_bandwidth(rate, lanes));
1316 out = tunnel->dst_port;
1318 if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1319 out->cap_adap + DP_LOCAL_CAP, 1))
1322 rate = tb_dp_cap_get_rate(dp_cap);
1323 lanes = tb_dp_cap_get_lanes(dp_cap);
1325 tb_tunnel_dbg(tunnel,
1326 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1327 rate, lanes, tb_dp_bandwidth(rate, lanes));
1329 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1330 in->cap_adap + DP_REMOTE_CAP, 1))
1333 rate = tb_dp_cap_get_rate(dp_cap);
1334 lanes = tb_dp_cap_get_lanes(dp_cap);
1336 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1337 rate, lanes, tb_dp_bandwidth(rate, lanes));
1341 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1342 * @tb: Pointer to the domain structure
1343 * @in: DP in adapter
1344 * @alloc_hopid: Allocate HopIDs from visited ports
1346 * If @in adapter is active, follows the tunnel to the DP out adapter
1347 * and back. Returns the discovered tunnel or %NULL if there was no
1350 * Return: DP tunnel or %NULL if no tunnel found.
1352 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1355 struct tb_tunnel *tunnel;
1356 struct tb_port *port;
1357 struct tb_path *path;
1359 if (!tb_dp_port_is_enabled(in))
1362 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1366 tunnel->init = tb_dp_init;
1367 tunnel->deinit = tb_dp_deinit;
1368 tunnel->activate = tb_dp_activate;
1369 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1370 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1371 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1372 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1373 tunnel->src_port = in;
1375 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1376 &tunnel->dst_port, "Video", alloc_hopid);
1378 /* Just disable the DP IN port */
1379 tb_dp_port_enable(in, false);
1382 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1383 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1386 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1389 goto err_deactivate;
1390 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1391 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1393 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1394 &port, "AUX RX", alloc_hopid);
1396 goto err_deactivate;
1397 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1398 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1400 /* Validate that the tunnel is complete */
1401 if (!tb_port_is_dpout(tunnel->dst_port)) {
1402 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1403 goto err_deactivate;
1406 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1407 goto err_deactivate;
1409 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1410 goto err_deactivate;
1412 if (port != tunnel->src_port) {
1413 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1414 goto err_deactivate;
1419 tb_tunnel_dbg(tunnel, "discovered\n");
1423 tb_tunnel_deactivate(tunnel);
1425 tb_tunnel_free(tunnel);
1431 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1432 * @tb: Pointer to the domain structure
1433 * @in: DP in adapter port
1434 * @out: DP out adapter port
1435 * @link_nr: Preferred lane adapter when the link is not bonded
1436 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1438 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1439 * (%0 if not limited)
1441 * Allocates a tunnel between @in and @out that is capable of tunneling
1442 * Display Port traffic.
1444 * Return: Returns a tb_tunnel on success or NULL on failure.
1446 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1447 struct tb_port *out, int link_nr,
1448 int max_up, int max_down)
1450 struct tb_tunnel *tunnel;
1451 struct tb_path **paths;
1452 struct tb_path *path;
1455 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1458 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1462 tunnel->init = tb_dp_init;
1463 tunnel->deinit = tb_dp_deinit;
1464 tunnel->activate = tb_dp_activate;
1465 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1466 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1467 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1468 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1469 tunnel->src_port = in;
1470 tunnel->dst_port = out;
1471 tunnel->max_up = max_up;
1472 tunnel->max_down = max_down;
1474 paths = tunnel->paths;
1475 pm_support = usb4_switch_version(in->sw) >= 2;
1477 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1481 tb_dp_init_video_path(path, pm_support);
1482 paths[TB_DP_VIDEO_PATH_OUT] = path;
1484 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1485 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1488 tb_dp_init_aux_path(path, pm_support);
1489 paths[TB_DP_AUX_PATH_OUT] = path;
1491 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1492 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1495 tb_dp_init_aux_path(path, pm_support);
1496 paths[TB_DP_AUX_PATH_IN] = path;
1501 tb_tunnel_free(tunnel);
1505 static unsigned int tb_dma_available_credits(const struct tb_port *port)
1507 const struct tb_switch *sw = port->sw;
1510 credits = tb_available_credits(port, NULL);
1511 if (tb_acpi_may_tunnel_pcie())
1512 credits -= sw->max_pcie_credits;
1513 credits -= port->dma_credits;
1515 return credits > 0 ? credits : 0;
1518 static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1520 struct tb_port *port = hop->in_port;
1522 if (tb_port_use_credit_allocation(port)) {
1523 unsigned int available = tb_dma_available_credits(port);
1526 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1527 * DMA path cannot be established.
1529 if (available < TB_MIN_DMA_CREDITS)
1532 while (credits > available)
1535 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1538 port->dma_credits += credits;
1540 if (tb_port_is_null(port))
1541 credits = port->bonded ? 14 : 6;
1543 credits = min(port->total_credits, credits);
1546 hop->initial_credits = credits;
1550 /* Path from lane adapter to NHI */
1551 static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1553 struct tb_path_hop *hop;
1554 unsigned int i, tmp;
1556 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1557 path->ingress_fc_enable = TB_PATH_ALL;
1558 path->egress_shared_buffer = TB_PATH_NONE;
1559 path->ingress_shared_buffer = TB_PATH_NONE;
1560 path->priority = TB_DMA_PRIORITY;
1561 path->weight = TB_DMA_WEIGHT;
1562 path->clear_fc = true;
1565 * First lane adapter is the one connected to the remote host.
1566 * We don't tunnel other traffic over this link so can use all
1567 * the credits (except the ones reserved for control traffic).
1569 hop = &path->hops[0];
1570 tmp = min(tb_usable_credits(hop->in_port), credits);
1571 hop->initial_credits = tmp;
1572 hop->in_port->dma_credits += tmp;
1574 for (i = 1; i < path->path_length; i++) {
1577 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1585 /* Path from NHI to lane adapter */
1586 static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1588 struct tb_path_hop *hop;
1590 path->egress_fc_enable = TB_PATH_ALL;
1591 path->ingress_fc_enable = TB_PATH_ALL;
1592 path->egress_shared_buffer = TB_PATH_NONE;
1593 path->ingress_shared_buffer = TB_PATH_NONE;
1594 path->priority = TB_DMA_PRIORITY;
1595 path->weight = TB_DMA_WEIGHT;
1596 path->clear_fc = true;
1598 tb_path_for_each_hop(path, hop) {
1601 ret = tb_dma_reserve_credits(hop, credits);
1609 static void tb_dma_release_credits(struct tb_path_hop *hop)
1611 struct tb_port *port = hop->in_port;
1613 if (tb_port_use_credit_allocation(port)) {
1614 port->dma_credits -= hop->initial_credits;
1616 tb_port_dbg(port, "released %u DMA path credits\n",
1617 hop->initial_credits);
1621 static void tb_dma_deinit_path(struct tb_path *path)
1623 struct tb_path_hop *hop;
1625 tb_path_for_each_hop(path, hop)
1626 tb_dma_release_credits(hop);
1629 static void tb_dma_deinit(struct tb_tunnel *tunnel)
1633 for (i = 0; i < tunnel->npaths; i++) {
1634 if (!tunnel->paths[i])
1636 tb_dma_deinit_path(tunnel->paths[i]);
1641 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1642 * @tb: Pointer to the domain structure
1643 * @nhi: Host controller port
1644 * @dst: Destination null port which the other domain is connected to
1645 * @transmit_path: HopID used for transmitting packets
1646 * @transmit_ring: NHI ring number used to send packets towards the
1647 * other domain. Set to %-1 if TX path is not needed.
1648 * @receive_path: HopID used for receiving packets
1649 * @receive_ring: NHI ring number used to receive packets from the
1650 * other domain. Set to %-1 if RX path is not needed.
1652 * Return: Returns a tb_tunnel on success or NULL on failure.
1654 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1655 struct tb_port *dst, int transmit_path,
1656 int transmit_ring, int receive_path,
1659 struct tb_tunnel *tunnel;
1660 size_t npaths = 0, i = 0;
1661 struct tb_path *path;
1664 /* Ring 0 is reserved for control channel */
1665 if (WARN_ON(!receive_ring || !transmit_ring))
1668 if (receive_ring > 0)
1670 if (transmit_ring > 0)
1673 if (WARN_ON(!npaths))
1676 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1680 tunnel->src_port = nhi;
1681 tunnel->dst_port = dst;
1682 tunnel->deinit = tb_dma_deinit;
1684 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1686 if (receive_ring > 0) {
1687 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1691 tunnel->paths[i++] = path;
1692 if (tb_dma_init_rx_path(path, credits)) {
1693 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1698 if (transmit_ring > 0) {
1699 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1703 tunnel->paths[i++] = path;
1704 if (tb_dma_init_tx_path(path, credits)) {
1705 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1713 tb_tunnel_free(tunnel);
1718 * tb_tunnel_match_dma() - Match DMA tunnel
1719 * @tunnel: Tunnel to match
1720 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1721 * @transmit_ring: NHI ring number used to send packets towards the
1722 * other domain. Pass %-1 to ignore.
1723 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1724 * @receive_ring: NHI ring number used to receive packets from the
1725 * other domain. Pass %-1 to ignore.
1727 * This function can be used to match specific DMA tunnel, if there are
1728 * multiple DMA tunnels going through the same XDomain connection.
1729 * Returns true if there is match and false otherwise.
1731 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1732 int transmit_ring, int receive_path, int receive_ring)
1734 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1737 if (!receive_ring || !transmit_ring)
1740 for (i = 0; i < tunnel->npaths; i++) {
1741 const struct tb_path *path = tunnel->paths[i];
1746 if (tb_port_is_nhi(path->hops[0].in_port))
1748 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1752 if (transmit_ring > 0 || transmit_path > 0) {
1755 if (transmit_ring > 0 &&
1756 (tx_path->hops[0].in_hop_index != transmit_ring))
1758 if (transmit_path > 0 &&
1759 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1763 if (receive_ring > 0 || receive_path > 0) {
1766 if (receive_path > 0 &&
1767 (rx_path->hops[0].in_hop_index != receive_path))
1769 if (receive_ring > 0 &&
1770 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1777 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1779 int ret, up_max_rate, down_max_rate;
1781 ret = usb4_usb3_port_max_link_rate(up);
1786 ret = usb4_usb3_port_max_link_rate(down);
1789 down_max_rate = ret;
1791 return min(up_max_rate, down_max_rate);
1794 static int tb_usb3_init(struct tb_tunnel *tunnel)
1796 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1797 tunnel->allocated_up, tunnel->allocated_down);
1799 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1800 &tunnel->allocated_up,
1801 &tunnel->allocated_down);
1804 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1808 res = tb_usb3_port_enable(tunnel->src_port, activate);
1812 if (tb_port_is_usb3_up(tunnel->dst_port))
1813 return tb_usb3_port_enable(tunnel->dst_port, activate);
1818 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1819 int *consumed_up, int *consumed_down)
1821 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1822 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1825 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1826 * take that it into account here.
1828 *consumed_up = tunnel->allocated_up *
1829 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1830 *consumed_down = tunnel->allocated_down *
1831 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1833 if (tb_port_get_link_generation(port) >= 4) {
1834 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1835 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1841 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1845 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1846 &tunnel->allocated_up,
1847 &tunnel->allocated_down);
1851 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1852 tunnel->allocated_up, tunnel->allocated_down);
1856 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1858 int *available_down)
1860 int ret, max_rate, allocate_up, allocate_down;
1862 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1864 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1869 * 90% of the max rate can be allocated for isochronous
1872 max_rate = ret * 90 / 100;
1874 /* No need to reclaim if already at maximum */
1875 if (tunnel->allocated_up >= max_rate &&
1876 tunnel->allocated_down >= max_rate)
1879 /* Don't go lower than what is already allocated */
1880 allocate_up = min(max_rate, *available_up);
1881 if (allocate_up < tunnel->allocated_up)
1882 allocate_up = tunnel->allocated_up;
1884 allocate_down = min(max_rate, *available_down);
1885 if (allocate_down < tunnel->allocated_down)
1886 allocate_down = tunnel->allocated_down;
1888 /* If no changes no need to do more */
1889 if (allocate_up == tunnel->allocated_up &&
1890 allocate_down == tunnel->allocated_down)
1893 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1896 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1900 tunnel->allocated_up = allocate_up;
1901 *available_up -= tunnel->allocated_up;
1903 tunnel->allocated_down = allocate_down;
1904 *available_down -= tunnel->allocated_down;
1906 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1907 tunnel->allocated_up, tunnel->allocated_down);
1910 static void tb_usb3_init_credits(struct tb_path_hop *hop)
1912 struct tb_port *port = hop->in_port;
1913 struct tb_switch *sw = port->sw;
1914 unsigned int credits;
1916 if (tb_port_use_credit_allocation(port)) {
1917 credits = sw->max_usb3_credits;
1919 if (tb_port_is_null(port))
1920 credits = port->bonded ? 32 : 16;
1925 hop->initial_credits = credits;
1928 static void tb_usb3_init_path(struct tb_path *path)
1930 struct tb_path_hop *hop;
1932 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1933 path->egress_shared_buffer = TB_PATH_NONE;
1934 path->ingress_fc_enable = TB_PATH_ALL;
1935 path->ingress_shared_buffer = TB_PATH_NONE;
1936 path->priority = TB_USB3_PRIORITY;
1937 path->weight = TB_USB3_WEIGHT;
1938 path->drop_packages = 0;
1940 tb_path_for_each_hop(path, hop)
1941 tb_usb3_init_credits(hop);
1945 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1946 * @tb: Pointer to the domain structure
1947 * @down: USB3 downstream adapter
1948 * @alloc_hopid: Allocate HopIDs from visited ports
1950 * If @down adapter is active, follows the tunnel to the USB3 upstream
1951 * adapter and back. Returns the discovered tunnel or %NULL if there was
1954 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1957 struct tb_tunnel *tunnel;
1958 struct tb_path *path;
1960 if (!tb_usb3_port_is_enabled(down))
1963 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1967 tunnel->activate = tb_usb3_activate;
1968 tunnel->src_port = down;
1971 * Discover both paths even if they are not complete. We will
1972 * clean them up by calling tb_tunnel_deactivate() below in that
1975 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1976 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1978 /* Just disable the downstream port */
1979 tb_usb3_port_enable(down, false);
1982 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1983 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1985 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1986 "USB3 Up", alloc_hopid);
1988 goto err_deactivate;
1989 tunnel->paths[TB_USB3_PATH_UP] = path;
1990 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1992 /* Validate that the tunnel is complete */
1993 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1994 tb_port_warn(tunnel->dst_port,
1995 "path does not end on an USB3 adapter, cleaning up\n");
1996 goto err_deactivate;
1999 if (down != tunnel->src_port) {
2000 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2001 goto err_deactivate;
2004 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2005 tb_tunnel_warn(tunnel,
2006 "tunnel is not fully activated, cleaning up\n");
2007 goto err_deactivate;
2010 if (!tb_route(down->sw)) {
2014 * Read the initial bandwidth allocation for the first
2017 ret = usb4_usb3_port_allocated_bandwidth(down,
2018 &tunnel->allocated_up, &tunnel->allocated_down);
2020 goto err_deactivate;
2022 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2023 tunnel->allocated_up, tunnel->allocated_down);
2025 tunnel->init = tb_usb3_init;
2026 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2027 tunnel->release_unused_bandwidth =
2028 tb_usb3_release_unused_bandwidth;
2029 tunnel->reclaim_available_bandwidth =
2030 tb_usb3_reclaim_available_bandwidth;
2033 tb_tunnel_dbg(tunnel, "discovered\n");
2037 tb_tunnel_deactivate(tunnel);
2039 tb_tunnel_free(tunnel);
2045 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2046 * @tb: Pointer to the domain structure
2047 * @up: USB3 upstream adapter port
2048 * @down: USB3 downstream adapter port
2049 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2051 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2052 * (%0 if not limited).
2054 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2055 * @TB_TYPE_USB3_DOWN.
2057 * Return: Returns a tb_tunnel on success or %NULL on failure.
2059 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2060 struct tb_port *down, int max_up,
2063 struct tb_tunnel *tunnel;
2064 struct tb_path *path;
2068 * Check that we have enough bandwidth available for the new
2071 if (max_up > 0 || max_down > 0) {
2072 max_rate = tb_usb3_max_link_rate(down, up);
2076 /* Only 90% can be allocated for USB3 isochronous transfers */
2077 max_rate = max_rate * 90 / 100;
2078 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2081 if (max_rate > max_up || max_rate > max_down) {
2082 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2087 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2091 tunnel->activate = tb_usb3_activate;
2092 tunnel->src_port = down;
2093 tunnel->dst_port = up;
2094 tunnel->max_up = max_up;
2095 tunnel->max_down = max_down;
2097 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2100 tb_tunnel_free(tunnel);
2103 tb_usb3_init_path(path);
2104 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2106 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2109 tb_tunnel_free(tunnel);
2112 tb_usb3_init_path(path);
2113 tunnel->paths[TB_USB3_PATH_UP] = path;
2115 if (!tb_route(down->sw)) {
2116 tunnel->allocated_up = max_rate;
2117 tunnel->allocated_down = max_rate;
2119 tunnel->init = tb_usb3_init;
2120 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2121 tunnel->release_unused_bandwidth =
2122 tb_usb3_release_unused_bandwidth;
2123 tunnel->reclaim_available_bandwidth =
2124 tb_usb3_reclaim_available_bandwidth;
2131 * tb_tunnel_free() - free a tunnel
2132 * @tunnel: Tunnel to be freed
2134 * Frees a tunnel. The tunnel does not need to be deactivated.
2136 void tb_tunnel_free(struct tb_tunnel *tunnel)
2144 tunnel->deinit(tunnel);
2146 for (i = 0; i < tunnel->npaths; i++) {
2147 if (tunnel->paths[i])
2148 tb_path_free(tunnel->paths[i]);
2151 kfree(tunnel->paths);
2156 * tb_tunnel_is_invalid - check whether an activated path is still valid
2157 * @tunnel: Tunnel to check
2159 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2163 for (i = 0; i < tunnel->npaths; i++) {
2164 WARN_ON(!tunnel->paths[i]->activated);
2165 if (tb_path_is_invalid(tunnel->paths[i]))
2173 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2174 * @tunnel: Tunnel to restart
2176 * Return: 0 on success and negative errno in case if failure
2178 int tb_tunnel_restart(struct tb_tunnel *tunnel)
2182 tb_tunnel_dbg(tunnel, "activating\n");
2185 * Make sure all paths are properly disabled before enabling
2188 for (i = 0; i < tunnel->npaths; i++) {
2189 if (tunnel->paths[i]->activated) {
2190 tb_path_deactivate(tunnel->paths[i]);
2191 tunnel->paths[i]->activated = false;
2196 res = tunnel->init(tunnel);
2201 for (i = 0; i < tunnel->npaths; i++) {
2202 res = tb_path_activate(tunnel->paths[i]);
2207 if (tunnel->activate) {
2208 res = tunnel->activate(tunnel, true);
2216 tb_tunnel_warn(tunnel, "activation failed\n");
2217 tb_tunnel_deactivate(tunnel);
2222 * tb_tunnel_activate() - activate a tunnel
2223 * @tunnel: Tunnel to activate
2225 * Return: Returns 0 on success or an error code on failure.
2227 int tb_tunnel_activate(struct tb_tunnel *tunnel)
2231 for (i = 0; i < tunnel->npaths; i++) {
2232 if (tunnel->paths[i]->activated) {
2233 tb_tunnel_WARN(tunnel,
2234 "trying to activate an already activated tunnel\n");
2239 return tb_tunnel_restart(tunnel);
2243 * tb_tunnel_deactivate() - deactivate a tunnel
2244 * @tunnel: Tunnel to deactivate
2246 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2250 tb_tunnel_dbg(tunnel, "deactivating\n");
2252 if (tunnel->activate)
2253 tunnel->activate(tunnel, false);
2255 for (i = 0; i < tunnel->npaths; i++) {
2256 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2257 tb_path_deactivate(tunnel->paths[i]);
2262 * tb_tunnel_port_on_path() - Does the tunnel go through port
2263 * @tunnel: Tunnel to check
2264 * @port: Port to check
2266 * Returns true if @tunnel goes through @port (direction does not matter),
2269 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2270 const struct tb_port *port)
2274 for (i = 0; i < tunnel->npaths; i++) {
2275 if (!tunnel->paths[i])
2278 if (tb_path_port_on_path(tunnel->paths[i], port))
2285 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2289 for (i = 0; i < tunnel->npaths; i++) {
2290 if (!tunnel->paths[i])
2292 if (!tunnel->paths[i]->activated)
2300 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2301 * @tunnel: Tunnel to check
2302 * @max_up: Maximum upstream bandwidth in Mb/s
2303 * @max_down: Maximum downstream bandwidth in Mb/s
2305 * Returns maximum possible bandwidth this tunnel can go if not limited
2306 * by other bandwidth clients. If the tunnel does not support this
2307 * returns %-EOPNOTSUPP.
2309 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2312 if (!tb_tunnel_is_active(tunnel))
2315 if (tunnel->maximum_bandwidth)
2316 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2321 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2322 * @tunnel: Tunnel to check
2323 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2324 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2327 * Returns the bandwidth allocated for the tunnel. This may be higher
2328 * than what the tunnel actually consumes.
2330 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2331 int *allocated_down)
2333 if (!tb_tunnel_is_active(tunnel))
2336 if (tunnel->allocated_bandwidth)
2337 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2343 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2344 * @tunnel: Tunnel whose bandwidth allocation to change
2345 * @alloc_up: New upstream bandwidth in Mb/s
2346 * @alloc_down: New downstream bandwidth in Mb/s
2348 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2349 * and updates @alloc_up and @alloc_down to that was actually allocated
2350 * (it may not be the same as passed originally). Returns negative errno
2351 * in case of failure.
2353 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2356 if (!tb_tunnel_is_active(tunnel))
2359 if (tunnel->alloc_bandwidth)
2360 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2366 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2367 * @tunnel: Tunnel to check
2368 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2370 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2373 * Stores the amount of isochronous bandwidth @tunnel consumes in
2374 * @consumed_up and @consumed_down. In case of success returns %0,
2375 * negative errno otherwise.
2377 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2380 int up_bw = 0, down_bw = 0;
2382 if (!tb_tunnel_is_active(tunnel))
2385 if (tunnel->consumed_bandwidth) {
2388 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2392 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2398 *consumed_up = up_bw;
2400 *consumed_down = down_bw;
2406 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2407 * @tunnel: Tunnel whose unused bandwidth to release
2409 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2410 * moment) this function makes it to release all the unused bandwidth.
2412 * Returns %0 in case of success and negative errno otherwise.
2414 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2416 if (!tb_tunnel_is_active(tunnel))
2419 if (tunnel->release_unused_bandwidth) {
2422 ret = tunnel->release_unused_bandwidth(tunnel);
2431 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2432 * @tunnel: Tunnel reclaiming available bandwidth
2433 * @available_up: Available upstream bandwidth (in Mb/s)
2434 * @available_down: Available downstream bandwidth (in Mb/s)
2436 * Reclaims bandwidth from @available_up and @available_down and updates
2437 * the variables accordingly (e.g decreases both according to what was
2438 * reclaimed by the tunnel). If nothing was reclaimed the values are
2441 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2443 int *available_down)
2445 if (!tb_tunnel_is_active(tunnel))
2448 if (tunnel->reclaim_available_bandwidth)
2449 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2453 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2455 return tb_tunnel_names[tunnel->type];