GNU Linux-libre 6.1.24-gnu
[releases.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT      100 /* ms */
20
21 /**
22  * struct tb_cm - Simple Thunderbolt connection manager
23  * @tunnel_list: List of active tunnels
24  * @dp_resources: List of available DP resources for DP tunneling
25  * @hotplug_active: tb_handle_hotplug will stop progressing plug
26  *                  events and exit if this is not set (it needs to
27  *                  acquire the lock one more time). Used to drain wq
28  *                  after cfg has been paused.
29  * @remove_work: Work used to remove any unplugged routers after
30  *               runtime resume
31  */
32 struct tb_cm {
33         struct list_head tunnel_list;
34         struct list_head dp_resources;
35         bool hotplug_active;
36         struct delayed_work remove_work;
37 };
38
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40 {
41         return ((void *)tcm - sizeof(struct tb));
42 }
43
44 struct tb_hotplug_event {
45         struct work_struct work;
46         struct tb *tb;
47         u64 route;
48         u8 port;
49         bool unplug;
50 };
51
52 static void tb_handle_hotplug(struct work_struct *work);
53
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55 {
56         struct tb_hotplug_event *ev;
57
58         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59         if (!ev)
60                 return;
61
62         ev->tb = tb;
63         ev->route = route;
64         ev->port = port;
65         ev->unplug = unplug;
66         INIT_WORK(&ev->work, tb_handle_hotplug);
67         queue_work(tb->wq, &ev->work);
68 }
69
70 /* enumeration & hot plug handling */
71
72 static void tb_add_dp_resources(struct tb_switch *sw)
73 {
74         struct tb_cm *tcm = tb_priv(sw->tb);
75         struct tb_port *port;
76
77         tb_switch_for_each_port(sw, port) {
78                 if (!tb_port_is_dpin(port))
79                         continue;
80
81                 if (!tb_switch_query_dp_resource(sw, port))
82                         continue;
83
84                 list_add_tail(&port->list, &tcm->dp_resources);
85                 tb_port_dbg(port, "DP IN resource available\n");
86         }
87 }
88
89 static void tb_remove_dp_resources(struct tb_switch *sw)
90 {
91         struct tb_cm *tcm = tb_priv(sw->tb);
92         struct tb_port *port, *tmp;
93
94         /* Clear children resources first */
95         tb_switch_for_each_port(sw, port) {
96                 if (tb_port_has_remote(port))
97                         tb_remove_dp_resources(port->remote->sw);
98         }
99
100         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101                 if (port->sw == sw) {
102                         tb_port_dbg(port, "DP OUT resource unavailable\n");
103                         list_del_init(&port->list);
104                 }
105         }
106 }
107
108 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
109 {
110         struct tb_cm *tcm = tb_priv(tb);
111         struct tb_port *p;
112
113         list_for_each_entry(p, &tcm->dp_resources, list) {
114                 if (p == port)
115                         return;
116         }
117
118         tb_port_dbg(port, "DP %s resource available discovered\n",
119                     tb_port_is_dpin(port) ? "IN" : "OUT");
120         list_add_tail(&port->list, &tcm->dp_resources);
121 }
122
123 static void tb_discover_dp_resources(struct tb *tb)
124 {
125         struct tb_cm *tcm = tb_priv(tb);
126         struct tb_tunnel *tunnel;
127
128         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
129                 if (tb_tunnel_is_dp(tunnel))
130                         tb_discover_dp_resource(tb, tunnel->dst_port);
131         }
132 }
133
134 static void tb_switch_discover_tunnels(struct tb_switch *sw,
135                                        struct list_head *list,
136                                        bool alloc_hopids)
137 {
138         struct tb *tb = sw->tb;
139         struct tb_port *port;
140
141         tb_switch_for_each_port(sw, port) {
142                 struct tb_tunnel *tunnel = NULL;
143
144                 switch (port->config.type) {
145                 case TB_TYPE_DP_HDMI_IN:
146                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
147                         /*
148                          * In case of DP tunnel exists, change host router's
149                          * 1st children TMU mode to HiFi for CL0s to work.
150                          */
151                         if (tunnel)
152                                 tb_switch_enable_tmu_1st_child(tb->root_switch,
153                                                 TB_SWITCH_TMU_RATE_HIFI);
154                         break;
155
156                 case TB_TYPE_PCIE_DOWN:
157                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
158                         break;
159
160                 case TB_TYPE_USB3_DOWN:
161                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
162                         break;
163
164                 default:
165                         break;
166                 }
167
168                 if (tunnel)
169                         list_add_tail(&tunnel->list, list);
170         }
171
172         tb_switch_for_each_port(sw, port) {
173                 if (tb_port_has_remote(port)) {
174                         tb_switch_discover_tunnels(port->remote->sw, list,
175                                                    alloc_hopids);
176                 }
177         }
178 }
179
180 static void tb_discover_tunnels(struct tb *tb)
181 {
182         struct tb_cm *tcm = tb_priv(tb);
183         struct tb_tunnel *tunnel;
184
185         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
186
187         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
188                 if (tb_tunnel_is_pci(tunnel)) {
189                         struct tb_switch *parent = tunnel->dst_port->sw;
190
191                         while (parent != tunnel->src_port->sw) {
192                                 parent->boot = true;
193                                 parent = tb_switch_parent(parent);
194                         }
195                 } else if (tb_tunnel_is_dp(tunnel)) {
196                         /* Keep the domain from powering down */
197                         pm_runtime_get_sync(&tunnel->src_port->sw->dev);
198                         pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
199                 }
200         }
201 }
202
203 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
204 {
205         if (tb_switch_is_usb4(port->sw))
206                 return usb4_port_configure_xdomain(port, xd);
207         return tb_lc_configure_xdomain(port);
208 }
209
210 static void tb_port_unconfigure_xdomain(struct tb_port *port)
211 {
212         if (tb_switch_is_usb4(port->sw))
213                 usb4_port_unconfigure_xdomain(port);
214         else
215                 tb_lc_unconfigure_xdomain(port);
216
217         tb_port_enable(port->dual_link_port);
218 }
219
220 static void tb_scan_xdomain(struct tb_port *port)
221 {
222         struct tb_switch *sw = port->sw;
223         struct tb *tb = sw->tb;
224         struct tb_xdomain *xd;
225         u64 route;
226
227         if (!tb_is_xdomain_enabled())
228                 return;
229
230         route = tb_downstream_route(port);
231         xd = tb_xdomain_find_by_route(tb, route);
232         if (xd) {
233                 tb_xdomain_put(xd);
234                 return;
235         }
236
237         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
238                               NULL);
239         if (xd) {
240                 tb_port_at(route, sw)->xdomain = xd;
241                 tb_port_configure_xdomain(port, xd);
242                 tb_xdomain_add(xd);
243         }
244 }
245
246 static int tb_enable_tmu(struct tb_switch *sw)
247 {
248         int ret;
249
250         /* If it is already enabled in correct mode, don't touch it */
251         if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
252                 return 0;
253
254         ret = tb_switch_tmu_disable(sw);
255         if (ret)
256                 return ret;
257
258         ret = tb_switch_tmu_post_time(sw);
259         if (ret)
260                 return ret;
261
262         return tb_switch_tmu_enable(sw);
263 }
264
265 /**
266  * tb_find_unused_port() - return the first inactive port on @sw
267  * @sw: Switch to find the port on
268  * @type: Port type to look for
269  */
270 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
271                                            enum tb_port_type type)
272 {
273         struct tb_port *port;
274
275         tb_switch_for_each_port(sw, port) {
276                 if (tb_is_upstream_port(port))
277                         continue;
278                 if (port->config.type != type)
279                         continue;
280                 if (!port->cap_adap)
281                         continue;
282                 if (tb_port_is_enabled(port))
283                         continue;
284                 return port;
285         }
286         return NULL;
287 }
288
289 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
290                                          const struct tb_port *port)
291 {
292         struct tb_port *down;
293
294         down = usb4_switch_map_usb3_down(sw, port);
295         if (down && !tb_usb3_port_is_enabled(down))
296                 return down;
297         return NULL;
298 }
299
300 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
301                                         struct tb_port *src_port,
302                                         struct tb_port *dst_port)
303 {
304         struct tb_cm *tcm = tb_priv(tb);
305         struct tb_tunnel *tunnel;
306
307         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
308                 if (tunnel->type == type &&
309                     ((src_port && src_port == tunnel->src_port) ||
310                      (dst_port && dst_port == tunnel->dst_port))) {
311                         return tunnel;
312                 }
313         }
314
315         return NULL;
316 }
317
318 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
319                                                    struct tb_port *src_port,
320                                                    struct tb_port *dst_port)
321 {
322         struct tb_port *port, *usb3_down;
323         struct tb_switch *sw;
324
325         /* Pick the router that is deepest in the topology */
326         if (dst_port->sw->config.depth > src_port->sw->config.depth)
327                 sw = dst_port->sw;
328         else
329                 sw = src_port->sw;
330
331         /* Can't be the host router */
332         if (sw == tb->root_switch)
333                 return NULL;
334
335         /* Find the downstream USB4 port that leads to this router */
336         port = tb_port_at(tb_route(sw), tb->root_switch);
337         /* Find the corresponding host router USB3 downstream port */
338         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
339         if (!usb3_down)
340                 return NULL;
341
342         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
343 }
344
345 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
346         struct tb_port *dst_port, int *available_up, int *available_down)
347 {
348         int usb3_consumed_up, usb3_consumed_down, ret;
349         struct tb_cm *tcm = tb_priv(tb);
350         struct tb_tunnel *tunnel;
351         struct tb_port *port;
352
353         tb_port_dbg(dst_port, "calculating available bandwidth\n");
354
355         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
356         if (tunnel) {
357                 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
358                                                    &usb3_consumed_down);
359                 if (ret)
360                         return ret;
361         } else {
362                 usb3_consumed_up = 0;
363                 usb3_consumed_down = 0;
364         }
365
366         *available_up = *available_down = 40000;
367
368         /* Find the minimum available bandwidth over all links */
369         tb_for_each_port_on_path(src_port, dst_port, port) {
370                 int link_speed, link_width, up_bw, down_bw;
371
372                 if (!tb_port_is_null(port))
373                         continue;
374
375                 if (tb_is_upstream_port(port)) {
376                         link_speed = port->sw->link_speed;
377                 } else {
378                         link_speed = tb_port_get_link_speed(port);
379                         if (link_speed < 0)
380                                 return link_speed;
381                 }
382
383                 link_width = port->bonded ? 2 : 1;
384
385                 up_bw = link_speed * link_width * 1000; /* Mb/s */
386                 /* Leave 10% guard band */
387                 up_bw -= up_bw / 10;
388                 down_bw = up_bw;
389
390                 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
391
392                 /*
393                  * Find all DP tunnels that cross the port and reduce
394                  * their consumed bandwidth from the available.
395                  */
396                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
397                         int dp_consumed_up, dp_consumed_down;
398
399                         if (!tb_tunnel_is_dp(tunnel))
400                                 continue;
401
402                         if (!tb_tunnel_port_on_path(tunnel, port))
403                                 continue;
404
405                         ret = tb_tunnel_consumed_bandwidth(tunnel,
406                                                            &dp_consumed_up,
407                                                            &dp_consumed_down);
408                         if (ret)
409                                 return ret;
410
411                         up_bw -= dp_consumed_up;
412                         down_bw -= dp_consumed_down;
413                 }
414
415                 /*
416                  * If USB3 is tunneled from the host router down to the
417                  * branch leading to port we need to take USB3 consumed
418                  * bandwidth into account regardless whether it actually
419                  * crosses the port.
420                  */
421                 up_bw -= usb3_consumed_up;
422                 down_bw -= usb3_consumed_down;
423
424                 if (up_bw < *available_up)
425                         *available_up = up_bw;
426                 if (down_bw < *available_down)
427                         *available_down = down_bw;
428         }
429
430         if (*available_up < 0)
431                 *available_up = 0;
432         if (*available_down < 0)
433                 *available_down = 0;
434
435         return 0;
436 }
437
438 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
439                                             struct tb_port *src_port,
440                                             struct tb_port *dst_port)
441 {
442         struct tb_tunnel *tunnel;
443
444         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
445         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
446 }
447
448 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
449                                       struct tb_port *dst_port)
450 {
451         int ret, available_up, available_down;
452         struct tb_tunnel *tunnel;
453
454         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
455         if (!tunnel)
456                 return;
457
458         tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
459
460         /*
461          * Calculate available bandwidth for the first hop USB3 tunnel.
462          * That determines the whole USB3 bandwidth for this branch.
463          */
464         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
465                                      &available_up, &available_down);
466         if (ret) {
467                 tb_warn(tb, "failed to calculate available bandwidth\n");
468                 return;
469         }
470
471         tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
472                available_up, available_down);
473
474         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
475 }
476
477 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
478 {
479         struct tb_switch *parent = tb_switch_parent(sw);
480         int ret, available_up, available_down;
481         struct tb_port *up, *down, *port;
482         struct tb_cm *tcm = tb_priv(tb);
483         struct tb_tunnel *tunnel;
484
485         if (!tb_acpi_may_tunnel_usb3()) {
486                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
487                 return 0;
488         }
489
490         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
491         if (!up)
492                 return 0;
493
494         if (!sw->link_usb4)
495                 return 0;
496
497         /*
498          * Look up available down port. Since we are chaining it should
499          * be found right above this switch.
500          */
501         port = tb_port_at(tb_route(sw), parent);
502         down = tb_find_usb3_down(parent, port);
503         if (!down)
504                 return 0;
505
506         if (tb_route(parent)) {
507                 struct tb_port *parent_up;
508                 /*
509                  * Check first that the parent switch has its upstream USB3
510                  * port enabled. Otherwise the chain is not complete and
511                  * there is no point setting up a new tunnel.
512                  */
513                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
514                 if (!parent_up || !tb_port_is_enabled(parent_up))
515                         return 0;
516
517                 /* Make all unused bandwidth available for the new tunnel */
518                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
519                 if (ret)
520                         return ret;
521         }
522
523         ret = tb_available_bandwidth(tb, down, up, &available_up,
524                                      &available_down);
525         if (ret)
526                 goto err_reclaim;
527
528         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
529                     available_up, available_down);
530
531         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
532                                       available_down);
533         if (!tunnel) {
534                 ret = -ENOMEM;
535                 goto err_reclaim;
536         }
537
538         if (tb_tunnel_activate(tunnel)) {
539                 tb_port_info(up,
540                              "USB3 tunnel activation failed, aborting\n");
541                 ret = -EIO;
542                 goto err_free;
543         }
544
545         list_add_tail(&tunnel->list, &tcm->tunnel_list);
546         if (tb_route(parent))
547                 tb_reclaim_usb3_bandwidth(tb, down, up);
548
549         return 0;
550
551 err_free:
552         tb_tunnel_free(tunnel);
553 err_reclaim:
554         if (tb_route(parent))
555                 tb_reclaim_usb3_bandwidth(tb, down, up);
556
557         return ret;
558 }
559
560 static int tb_create_usb3_tunnels(struct tb_switch *sw)
561 {
562         struct tb_port *port;
563         int ret;
564
565         if (!tb_acpi_may_tunnel_usb3())
566                 return 0;
567
568         if (tb_route(sw)) {
569                 ret = tb_tunnel_usb3(sw->tb, sw);
570                 if (ret)
571                         return ret;
572         }
573
574         tb_switch_for_each_port(sw, port) {
575                 if (!tb_port_has_remote(port))
576                         continue;
577                 ret = tb_create_usb3_tunnels(port->remote->sw);
578                 if (ret)
579                         return ret;
580         }
581
582         return 0;
583 }
584
585 static void tb_scan_port(struct tb_port *port);
586
587 /*
588  * tb_scan_switch() - scan for and initialize downstream switches
589  */
590 static void tb_scan_switch(struct tb_switch *sw)
591 {
592         struct tb_port *port;
593
594         pm_runtime_get_sync(&sw->dev);
595
596         tb_switch_for_each_port(sw, port)
597                 tb_scan_port(port);
598
599         pm_runtime_mark_last_busy(&sw->dev);
600         pm_runtime_put_autosuspend(&sw->dev);
601 }
602
603 /*
604  * tb_scan_port() - check for and initialize switches below port
605  */
606 static void tb_scan_port(struct tb_port *port)
607 {
608         struct tb_cm *tcm = tb_priv(port->sw->tb);
609         struct tb_port *upstream_port;
610         struct tb_switch *sw;
611         int ret;
612
613         if (tb_is_upstream_port(port))
614                 return;
615
616         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
617             !tb_dp_port_is_enabled(port)) {
618                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
619                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
620                                  false);
621                 return;
622         }
623
624         if (port->config.type != TB_TYPE_PORT)
625                 return;
626         if (port->dual_link_port && port->link_nr)
627                 return; /*
628                          * Downstream switch is reachable through two ports.
629                          * Only scan on the primary port (link_nr == 0).
630                          */
631
632         if (port->usb4)
633                 pm_runtime_get_sync(&port->usb4->dev);
634
635         if (tb_wait_for_port(port, false) <= 0)
636                 goto out_rpm_put;
637         if (port->remote) {
638                 tb_port_dbg(port, "port already has a remote\n");
639                 goto out_rpm_put;
640         }
641
642         tb_retimer_scan(port, true);
643
644         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
645                              tb_downstream_route(port));
646         if (IS_ERR(sw)) {
647                 /*
648                  * If there is an error accessing the connected switch
649                  * it may be connected to another domain. Also we allow
650                  * the other domain to be connected to a max depth switch.
651                  */
652                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
653                         tb_scan_xdomain(port);
654                 goto out_rpm_put;
655         }
656
657         if (tb_switch_configure(sw)) {
658                 tb_switch_put(sw);
659                 goto out_rpm_put;
660         }
661
662         /*
663          * If there was previously another domain connected remove it
664          * first.
665          */
666         if (port->xdomain) {
667                 tb_xdomain_remove(port->xdomain);
668                 tb_port_unconfigure_xdomain(port);
669                 port->xdomain = NULL;
670         }
671
672         /*
673          * Do not send uevents until we have discovered all existing
674          * tunnels and know which switches were authorized already by
675          * the boot firmware.
676          */
677         if (!tcm->hotplug_active)
678                 dev_set_uevent_suppress(&sw->dev, true);
679
680         /*
681          * At the moment Thunderbolt 2 and beyond (devices with LC) we
682          * can support runtime PM.
683          */
684         sw->rpm = sw->generation > 1;
685
686         if (tb_switch_add(sw)) {
687                 tb_switch_put(sw);
688                 goto out_rpm_put;
689         }
690
691         /* Link the switches using both links if available */
692         upstream_port = tb_upstream_port(sw);
693         port->remote = upstream_port;
694         upstream_port->remote = port;
695         if (port->dual_link_port && upstream_port->dual_link_port) {
696                 port->dual_link_port->remote = upstream_port->dual_link_port;
697                 upstream_port->dual_link_port->remote = port->dual_link_port;
698         }
699
700         /* Enable lane bonding if supported */
701         tb_switch_lane_bonding_enable(sw);
702         /* Set the link configured */
703         tb_switch_configure_link(sw);
704         /*
705          * CL0s and CL1 are enabled and supported together.
706          * Silently ignore CLx enabling in case CLx is not supported.
707          */
708         ret = tb_switch_enable_clx(sw, TB_CL1);
709         if (ret && ret != -EOPNOTSUPP)
710                 tb_sw_warn(sw, "failed to enable %s on upstream port\n",
711                            tb_switch_clx_name(TB_CL1));
712
713         if (tb_switch_is_clx_enabled(sw, TB_CL1))
714                 /*
715                  * To support highest CLx state, we set router's TMU to
716                  * Normal-Uni mode.
717                  */
718                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
719         else
720                 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
721                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
722
723         if (tb_enable_tmu(sw))
724                 tb_sw_warn(sw, "failed to enable TMU\n");
725
726         /* Scan upstream retimers */
727         tb_retimer_scan(upstream_port, true);
728
729         /*
730          * Create USB 3.x tunnels only when the switch is plugged to the
731          * domain. This is because we scan the domain also during discovery
732          * and want to discover existing USB 3.x tunnels before we create
733          * any new.
734          */
735         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
736                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
737
738         tb_add_dp_resources(sw);
739         tb_scan_switch(sw);
740
741 out_rpm_put:
742         if (port->usb4) {
743                 pm_runtime_mark_last_busy(&port->usb4->dev);
744                 pm_runtime_put_autosuspend(&port->usb4->dev);
745         }
746 }
747
748 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
749 {
750         struct tb_port *src_port, *dst_port;
751         struct tb *tb;
752
753         if (!tunnel)
754                 return;
755
756         tb_tunnel_deactivate(tunnel);
757         list_del(&tunnel->list);
758
759         tb = tunnel->tb;
760         src_port = tunnel->src_port;
761         dst_port = tunnel->dst_port;
762
763         switch (tunnel->type) {
764         case TB_TUNNEL_DP:
765                 /*
766                  * In case of DP tunnel make sure the DP IN resource is
767                  * deallocated properly.
768                  */
769                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
770                 /* Now we can allow the domain to runtime suspend again */
771                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
772                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
773                 pm_runtime_mark_last_busy(&src_port->sw->dev);
774                 pm_runtime_put_autosuspend(&src_port->sw->dev);
775                 fallthrough;
776
777         case TB_TUNNEL_USB3:
778                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
779                 break;
780
781         default:
782                 /*
783                  * PCIe and DMA tunnels do not consume guaranteed
784                  * bandwidth.
785                  */
786                 break;
787         }
788
789         tb_tunnel_free(tunnel);
790 }
791
792 /*
793  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
794  */
795 static void tb_free_invalid_tunnels(struct tb *tb)
796 {
797         struct tb_cm *tcm = tb_priv(tb);
798         struct tb_tunnel *tunnel;
799         struct tb_tunnel *n;
800
801         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
802                 if (tb_tunnel_is_invalid(tunnel))
803                         tb_deactivate_and_free_tunnel(tunnel);
804         }
805 }
806
807 /*
808  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
809  */
810 static void tb_free_unplugged_children(struct tb_switch *sw)
811 {
812         struct tb_port *port;
813
814         tb_switch_for_each_port(sw, port) {
815                 if (!tb_port_has_remote(port))
816                         continue;
817
818                 if (port->remote->sw->is_unplugged) {
819                         tb_retimer_remove_all(port);
820                         tb_remove_dp_resources(port->remote->sw);
821                         tb_switch_unconfigure_link(port->remote->sw);
822                         tb_switch_lane_bonding_disable(port->remote->sw);
823                         tb_switch_remove(port->remote->sw);
824                         port->remote = NULL;
825                         if (port->dual_link_port)
826                                 port->dual_link_port->remote = NULL;
827                 } else {
828                         tb_free_unplugged_children(port->remote->sw);
829                 }
830         }
831 }
832
833 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
834                                          const struct tb_port *port)
835 {
836         struct tb_port *down = NULL;
837
838         /*
839          * To keep plugging devices consistently in the same PCIe
840          * hierarchy, do mapping here for switch downstream PCIe ports.
841          */
842         if (tb_switch_is_usb4(sw)) {
843                 down = usb4_switch_map_pcie_down(sw, port);
844         } else if (!tb_route(sw)) {
845                 int phy_port = tb_phy_port_from_link(port->port);
846                 int index;
847
848                 /*
849                  * Hard-coded Thunderbolt port to PCIe down port mapping
850                  * per controller.
851                  */
852                 if (tb_switch_is_cactus_ridge(sw) ||
853                     tb_switch_is_alpine_ridge(sw))
854                         index = !phy_port ? 6 : 7;
855                 else if (tb_switch_is_falcon_ridge(sw))
856                         index = !phy_port ? 6 : 8;
857                 else if (tb_switch_is_titan_ridge(sw))
858                         index = !phy_port ? 8 : 9;
859                 else
860                         goto out;
861
862                 /* Validate the hard-coding */
863                 if (WARN_ON(index > sw->config.max_port_number))
864                         goto out;
865
866                 down = &sw->ports[index];
867         }
868
869         if (down) {
870                 if (WARN_ON(!tb_port_is_pcie_down(down)))
871                         goto out;
872                 if (tb_pci_port_is_enabled(down))
873                         goto out;
874
875                 return down;
876         }
877
878 out:
879         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
880 }
881
882 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
883 {
884         struct tb_port *host_port, *port;
885         struct tb_cm *tcm = tb_priv(tb);
886
887         host_port = tb_route(in->sw) ?
888                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
889
890         list_for_each_entry(port, &tcm->dp_resources, list) {
891                 if (!tb_port_is_dpout(port))
892                         continue;
893
894                 if (tb_port_is_enabled(port)) {
895                         tb_port_dbg(port, "in use\n");
896                         continue;
897                 }
898
899                 tb_port_dbg(port, "DP OUT available\n");
900
901                 /*
902                  * Keep the DP tunnel under the topology starting from
903                  * the same host router downstream port.
904                  */
905                 if (host_port && tb_route(port->sw)) {
906                         struct tb_port *p;
907
908                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
909                         if (p != host_port)
910                                 continue;
911                 }
912
913                 return port;
914         }
915
916         return NULL;
917 }
918
919 static void tb_tunnel_dp(struct tb *tb)
920 {
921         int available_up, available_down, ret, link_nr;
922         struct tb_cm *tcm = tb_priv(tb);
923         struct tb_port *port, *in, *out;
924         struct tb_tunnel *tunnel;
925
926         if (!tb_acpi_may_tunnel_dp()) {
927                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
928                 return;
929         }
930
931         /*
932          * Find pair of inactive DP IN and DP OUT adapters and then
933          * establish a DP tunnel between them.
934          */
935         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
936
937         in = NULL;
938         out = NULL;
939         list_for_each_entry(port, &tcm->dp_resources, list) {
940                 if (!tb_port_is_dpin(port))
941                         continue;
942
943                 if (tb_port_is_enabled(port)) {
944                         tb_port_dbg(port, "in use\n");
945                         continue;
946                 }
947
948                 tb_port_dbg(port, "DP IN available\n");
949
950                 out = tb_find_dp_out(tb, port);
951                 if (out) {
952                         in = port;
953                         break;
954                 }
955         }
956
957         if (!in) {
958                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
959                 return;
960         }
961         if (!out) {
962                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
963                 return;
964         }
965
966         /*
967          * This is only applicable to links that are not bonded (so
968          * when Thunderbolt 1 hardware is involved somewhere in the
969          * topology). For these try to share the DP bandwidth between
970          * the two lanes.
971          */
972         link_nr = 1;
973         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
974                 if (tb_tunnel_is_dp(tunnel)) {
975                         link_nr = 0;
976                         break;
977                 }
978         }
979
980         /*
981          * DP stream needs the domain to be active so runtime resume
982          * both ends of the tunnel.
983          *
984          * This should bring the routers in the middle active as well
985          * and keeps the domain from runtime suspending while the DP
986          * tunnel is active.
987          */
988         pm_runtime_get_sync(&in->sw->dev);
989         pm_runtime_get_sync(&out->sw->dev);
990
991         if (tb_switch_alloc_dp_resource(in->sw, in)) {
992                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
993                 goto err_rpm_put;
994         }
995
996         /* Make all unused USB3 bandwidth available for the new DP tunnel */
997         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
998         if (ret) {
999                 tb_warn(tb, "failed to release unused bandwidth\n");
1000                 goto err_dealloc_dp;
1001         }
1002
1003         ret = tb_available_bandwidth(tb, in, out, &available_up,
1004                                      &available_down);
1005         if (ret)
1006                 goto err_reclaim;
1007
1008         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1009                available_up, available_down);
1010
1011         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1012                                     available_down);
1013         if (!tunnel) {
1014                 tb_port_dbg(out, "could not allocate DP tunnel\n");
1015                 goto err_reclaim;
1016         }
1017
1018         if (tb_tunnel_activate(tunnel)) {
1019                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1020                 goto err_free;
1021         }
1022
1023         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1024         tb_reclaim_usb3_bandwidth(tb, in, out);
1025         /*
1026          * In case of DP tunnel exists, change host router's 1st children
1027          * TMU mode to HiFi for CL0s to work.
1028          */
1029         tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
1030
1031         return;
1032
1033 err_free:
1034         tb_tunnel_free(tunnel);
1035 err_reclaim:
1036         tb_reclaim_usb3_bandwidth(tb, in, out);
1037 err_dealloc_dp:
1038         tb_switch_dealloc_dp_resource(in->sw, in);
1039 err_rpm_put:
1040         pm_runtime_mark_last_busy(&out->sw->dev);
1041         pm_runtime_put_autosuspend(&out->sw->dev);
1042         pm_runtime_mark_last_busy(&in->sw->dev);
1043         pm_runtime_put_autosuspend(&in->sw->dev);
1044 }
1045
1046 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1047 {
1048         struct tb_port *in, *out;
1049         struct tb_tunnel *tunnel;
1050
1051         if (tb_port_is_dpin(port)) {
1052                 tb_port_dbg(port, "DP IN resource unavailable\n");
1053                 in = port;
1054                 out = NULL;
1055         } else {
1056                 tb_port_dbg(port, "DP OUT resource unavailable\n");
1057                 in = NULL;
1058                 out = port;
1059         }
1060
1061         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1062         tb_deactivate_and_free_tunnel(tunnel);
1063         list_del_init(&port->list);
1064
1065         /*
1066          * See if there is another DP OUT port that can be used for
1067          * to create another tunnel.
1068          */
1069         tb_tunnel_dp(tb);
1070 }
1071
1072 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1073 {
1074         struct tb_cm *tcm = tb_priv(tb);
1075         struct tb_port *p;
1076
1077         if (tb_port_is_enabled(port))
1078                 return;
1079
1080         list_for_each_entry(p, &tcm->dp_resources, list) {
1081                 if (p == port)
1082                         return;
1083         }
1084
1085         tb_port_dbg(port, "DP %s resource available\n",
1086                     tb_port_is_dpin(port) ? "IN" : "OUT");
1087         list_add_tail(&port->list, &tcm->dp_resources);
1088
1089         /* Look for suitable DP IN <-> DP OUT pairs now */
1090         tb_tunnel_dp(tb);
1091 }
1092
1093 static void tb_disconnect_and_release_dp(struct tb *tb)
1094 {
1095         struct tb_cm *tcm = tb_priv(tb);
1096         struct tb_tunnel *tunnel, *n;
1097
1098         /*
1099          * Tear down all DP tunnels and release their resources. They
1100          * will be re-established after resume based on plug events.
1101          */
1102         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1103                 if (tb_tunnel_is_dp(tunnel))
1104                         tb_deactivate_and_free_tunnel(tunnel);
1105         }
1106
1107         while (!list_empty(&tcm->dp_resources)) {
1108                 struct tb_port *port;
1109
1110                 port = list_first_entry(&tcm->dp_resources,
1111                                         struct tb_port, list);
1112                 list_del_init(&port->list);
1113         }
1114 }
1115
1116 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1117 {
1118         struct tb_tunnel *tunnel;
1119         struct tb_port *up;
1120
1121         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1122         if (WARN_ON(!up))
1123                 return -ENODEV;
1124
1125         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1126         if (WARN_ON(!tunnel))
1127                 return -ENODEV;
1128
1129         tb_switch_xhci_disconnect(sw);
1130
1131         tb_tunnel_deactivate(tunnel);
1132         list_del(&tunnel->list);
1133         tb_tunnel_free(tunnel);
1134         return 0;
1135 }
1136
1137 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1138 {
1139         struct tb_port *up, *down, *port;
1140         struct tb_cm *tcm = tb_priv(tb);
1141         struct tb_switch *parent_sw;
1142         struct tb_tunnel *tunnel;
1143
1144         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1145         if (!up)
1146                 return 0;
1147
1148         /*
1149          * Look up available down port. Since we are chaining it should
1150          * be found right above this switch.
1151          */
1152         parent_sw = tb_to_switch(sw->dev.parent);
1153         port = tb_port_at(tb_route(sw), parent_sw);
1154         down = tb_find_pcie_down(parent_sw, port);
1155         if (!down)
1156                 return 0;
1157
1158         tunnel = tb_tunnel_alloc_pci(tb, up, down);
1159         if (!tunnel)
1160                 return -ENOMEM;
1161
1162         if (tb_tunnel_activate(tunnel)) {
1163                 tb_port_info(up,
1164                              "PCIe tunnel activation failed, aborting\n");
1165                 tb_tunnel_free(tunnel);
1166                 return -EIO;
1167         }
1168
1169         /*
1170          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1171          * here.
1172          */
1173         if (tb_switch_pcie_l1_enable(sw))
1174                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1175
1176         if (tb_switch_xhci_connect(sw))
1177                 tb_sw_warn(sw, "failed to connect xHCI\n");
1178
1179         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1180         return 0;
1181 }
1182
1183 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1184                                     int transmit_path, int transmit_ring,
1185                                     int receive_path, int receive_ring)
1186 {
1187         struct tb_cm *tcm = tb_priv(tb);
1188         struct tb_port *nhi_port, *dst_port;
1189         struct tb_tunnel *tunnel;
1190         struct tb_switch *sw;
1191
1192         sw = tb_to_switch(xd->dev.parent);
1193         dst_port = tb_port_at(xd->route, sw);
1194         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1195
1196         mutex_lock(&tb->lock);
1197         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1198                                      transmit_ring, receive_path, receive_ring);
1199         if (!tunnel) {
1200                 mutex_unlock(&tb->lock);
1201                 return -ENOMEM;
1202         }
1203
1204         if (tb_tunnel_activate(tunnel)) {
1205                 tb_port_info(nhi_port,
1206                              "DMA tunnel activation failed, aborting\n");
1207                 tb_tunnel_free(tunnel);
1208                 mutex_unlock(&tb->lock);
1209                 return -EIO;
1210         }
1211
1212         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1213         mutex_unlock(&tb->lock);
1214         return 0;
1215 }
1216
1217 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1218                                           int transmit_path, int transmit_ring,
1219                                           int receive_path, int receive_ring)
1220 {
1221         struct tb_cm *tcm = tb_priv(tb);
1222         struct tb_port *nhi_port, *dst_port;
1223         struct tb_tunnel *tunnel, *n;
1224         struct tb_switch *sw;
1225
1226         sw = tb_to_switch(xd->dev.parent);
1227         dst_port = tb_port_at(xd->route, sw);
1228         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1229
1230         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1231                 if (!tb_tunnel_is_dma(tunnel))
1232                         continue;
1233                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1234                         continue;
1235
1236                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1237                                         receive_path, receive_ring))
1238                         tb_deactivate_and_free_tunnel(tunnel);
1239         }
1240 }
1241
1242 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1243                                        int transmit_path, int transmit_ring,
1244                                        int receive_path, int receive_ring)
1245 {
1246         if (!xd->is_unplugged) {
1247                 mutex_lock(&tb->lock);
1248                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1249                                               transmit_ring, receive_path,
1250                                               receive_ring);
1251                 mutex_unlock(&tb->lock);
1252         }
1253         return 0;
1254 }
1255
1256 /* hotplug handling */
1257
1258 /*
1259  * tb_handle_hotplug() - handle hotplug event
1260  *
1261  * Executes on tb->wq.
1262  */
1263 static void tb_handle_hotplug(struct work_struct *work)
1264 {
1265         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1266         struct tb *tb = ev->tb;
1267         struct tb_cm *tcm = tb_priv(tb);
1268         struct tb_switch *sw;
1269         struct tb_port *port;
1270
1271         /* Bring the domain back from sleep if it was suspended */
1272         pm_runtime_get_sync(&tb->dev);
1273
1274         mutex_lock(&tb->lock);
1275         if (!tcm->hotplug_active)
1276                 goto out; /* during init, suspend or shutdown */
1277
1278         sw = tb_switch_find_by_route(tb, ev->route);
1279         if (!sw) {
1280                 tb_warn(tb,
1281                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1282                         ev->route, ev->port, ev->unplug);
1283                 goto out;
1284         }
1285         if (ev->port > sw->config.max_port_number) {
1286                 tb_warn(tb,
1287                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1288                         ev->route, ev->port, ev->unplug);
1289                 goto put_sw;
1290         }
1291         port = &sw->ports[ev->port];
1292         if (tb_is_upstream_port(port)) {
1293                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1294                        ev->route, ev->port, ev->unplug);
1295                 goto put_sw;
1296         }
1297
1298         pm_runtime_get_sync(&sw->dev);
1299
1300         if (ev->unplug) {
1301                 tb_retimer_remove_all(port);
1302
1303                 if (tb_port_has_remote(port)) {
1304                         tb_port_dbg(port, "switch unplugged\n");
1305                         tb_sw_set_unplugged(port->remote->sw);
1306                         tb_free_invalid_tunnels(tb);
1307                         tb_remove_dp_resources(port->remote->sw);
1308                         tb_switch_tmu_disable(port->remote->sw);
1309                         tb_switch_unconfigure_link(port->remote->sw);
1310                         tb_switch_lane_bonding_disable(port->remote->sw);
1311                         tb_switch_remove(port->remote->sw);
1312                         port->remote = NULL;
1313                         if (port->dual_link_port)
1314                                 port->dual_link_port->remote = NULL;
1315                         /* Maybe we can create another DP tunnel */
1316                         tb_tunnel_dp(tb);
1317                 } else if (port->xdomain) {
1318                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1319
1320                         tb_port_dbg(port, "xdomain unplugged\n");
1321                         /*
1322                          * Service drivers are unbound during
1323                          * tb_xdomain_remove() so setting XDomain as
1324                          * unplugged here prevents deadlock if they call
1325                          * tb_xdomain_disable_paths(). We will tear down
1326                          * all the tunnels below.
1327                          */
1328                         xd->is_unplugged = true;
1329                         tb_xdomain_remove(xd);
1330                         port->xdomain = NULL;
1331                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1332                         tb_xdomain_put(xd);
1333                         tb_port_unconfigure_xdomain(port);
1334                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1335                         tb_dp_resource_unavailable(tb, port);
1336                 } else if (!port->port) {
1337                         tb_sw_dbg(sw, "xHCI disconnect request\n");
1338                         tb_switch_xhci_disconnect(sw);
1339                 } else {
1340                         tb_port_dbg(port,
1341                                    "got unplug event for disconnected port, ignoring\n");
1342                 }
1343         } else if (port->remote) {
1344                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1345         } else if (!port->port && sw->authorized) {
1346                 tb_sw_dbg(sw, "xHCI connect request\n");
1347                 tb_switch_xhci_connect(sw);
1348         } else {
1349                 if (tb_port_is_null(port)) {
1350                         tb_port_dbg(port, "hotplug: scanning\n");
1351                         tb_scan_port(port);
1352                         if (!port->remote)
1353                                 tb_port_dbg(port, "hotplug: no switch found\n");
1354                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1355                         tb_dp_resource_available(tb, port);
1356                 }
1357         }
1358
1359         pm_runtime_mark_last_busy(&sw->dev);
1360         pm_runtime_put_autosuspend(&sw->dev);
1361
1362 put_sw:
1363         tb_switch_put(sw);
1364 out:
1365         mutex_unlock(&tb->lock);
1366
1367         pm_runtime_mark_last_busy(&tb->dev);
1368         pm_runtime_put_autosuspend(&tb->dev);
1369
1370         kfree(ev);
1371 }
1372
1373 /*
1374  * tb_schedule_hotplug_handler() - callback function for the control channel
1375  *
1376  * Delegates to tb_handle_hotplug.
1377  */
1378 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1379                             const void *buf, size_t size)
1380 {
1381         const struct cfg_event_pkg *pkg = buf;
1382         u64 route;
1383
1384         if (type != TB_CFG_PKG_EVENT) {
1385                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1386                 return;
1387         }
1388
1389         route = tb_cfg_get_route(&pkg->header);
1390
1391         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1392                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1393                         pkg->port);
1394         }
1395
1396         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1397 }
1398
1399 static void tb_stop(struct tb *tb)
1400 {
1401         struct tb_cm *tcm = tb_priv(tb);
1402         struct tb_tunnel *tunnel;
1403         struct tb_tunnel *n;
1404
1405         cancel_delayed_work(&tcm->remove_work);
1406         /* tunnels are only present after everything has been initialized */
1407         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1408                 /*
1409                  * DMA tunnels require the driver to be functional so we
1410                  * tear them down. Other protocol tunnels can be left
1411                  * intact.
1412                  */
1413                 if (tb_tunnel_is_dma(tunnel))
1414                         tb_tunnel_deactivate(tunnel);
1415                 tb_tunnel_free(tunnel);
1416         }
1417         tb_switch_remove(tb->root_switch);
1418         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1419 }
1420
1421 static int tb_scan_finalize_switch(struct device *dev, void *data)
1422 {
1423         if (tb_is_switch(dev)) {
1424                 struct tb_switch *sw = tb_to_switch(dev);
1425
1426                 /*
1427                  * If we found that the switch was already setup by the
1428                  * boot firmware, mark it as authorized now before we
1429                  * send uevent to userspace.
1430                  */
1431                 if (sw->boot)
1432                         sw->authorized = 1;
1433
1434                 dev_set_uevent_suppress(dev, false);
1435                 kobject_uevent(&dev->kobj, KOBJ_ADD);
1436                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1437         }
1438
1439         return 0;
1440 }
1441
1442 static int tb_start(struct tb *tb)
1443 {
1444         struct tb_cm *tcm = tb_priv(tb);
1445         int ret;
1446
1447         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1448         if (IS_ERR(tb->root_switch))
1449                 return PTR_ERR(tb->root_switch);
1450
1451         /*
1452          * ICM firmware upgrade needs running firmware and in native
1453          * mode that is not available so disable firmware upgrade of the
1454          * root switch.
1455          *
1456          * However, USB4 routers support NVM firmware upgrade if they
1457          * implement the necessary router operations.
1458          */
1459         tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
1460         /* All USB4 routers support runtime PM */
1461         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1462
1463         ret = tb_switch_configure(tb->root_switch);
1464         if (ret) {
1465                 tb_switch_put(tb->root_switch);
1466                 return ret;
1467         }
1468
1469         /* Announce the switch to the world */
1470         ret = tb_switch_add(tb->root_switch);
1471         if (ret) {
1472                 tb_switch_put(tb->root_switch);
1473                 return ret;
1474         }
1475
1476         /*
1477          * To support highest CLx state, we set host router's TMU to
1478          * Normal mode.
1479          */
1480         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
1481                                 false);
1482         /* Enable TMU if it is off */
1483         tb_switch_tmu_enable(tb->root_switch);
1484         /* Full scan to discover devices added before the driver was loaded. */
1485         tb_scan_switch(tb->root_switch);
1486         /* Find out tunnels created by the boot firmware */
1487         tb_discover_tunnels(tb);
1488         /* Add DP resources from the DP tunnels created by the boot firmware */
1489         tb_discover_dp_resources(tb);
1490         /*
1491          * If the boot firmware did not create USB 3.x tunnels create them
1492          * now for the whole topology.
1493          */
1494         tb_create_usb3_tunnels(tb->root_switch);
1495         /* Add DP IN resources for the root switch */
1496         tb_add_dp_resources(tb->root_switch);
1497         /* Make the discovered switches available to the userspace */
1498         device_for_each_child(&tb->root_switch->dev, NULL,
1499                               tb_scan_finalize_switch);
1500
1501         /* Allow tb_handle_hotplug to progress events */
1502         tcm->hotplug_active = true;
1503         return 0;
1504 }
1505
1506 static int tb_suspend_noirq(struct tb *tb)
1507 {
1508         struct tb_cm *tcm = tb_priv(tb);
1509
1510         tb_dbg(tb, "suspending...\n");
1511         tb_disconnect_and_release_dp(tb);
1512         tb_switch_suspend(tb->root_switch, false);
1513         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1514         tb_dbg(tb, "suspend finished\n");
1515
1516         return 0;
1517 }
1518
1519 static void tb_restore_children(struct tb_switch *sw)
1520 {
1521         struct tb_port *port;
1522         int ret;
1523
1524         /* No need to restore if the router is already unplugged */
1525         if (sw->is_unplugged)
1526                 return;
1527
1528         /*
1529          * CL0s and CL1 are enabled and supported together.
1530          * Silently ignore CLx re-enabling in case CLx is not supported.
1531          */
1532         ret = tb_switch_enable_clx(sw, TB_CL1);
1533         if (ret && ret != -EOPNOTSUPP)
1534                 tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
1535                            tb_switch_clx_name(TB_CL1));
1536
1537         if (tb_switch_is_clx_enabled(sw, TB_CL1))
1538                 /*
1539                  * To support highest CLx state, we set router's TMU to
1540                  * Normal-Uni mode.
1541                  */
1542                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
1543         else
1544                 /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
1545                 tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
1546
1547         if (tb_enable_tmu(sw))
1548                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1549
1550         tb_switch_for_each_port(sw, port) {
1551                 if (!tb_port_has_remote(port) && !port->xdomain)
1552                         continue;
1553
1554                 if (port->remote) {
1555                         tb_switch_lane_bonding_enable(port->remote->sw);
1556                         tb_switch_configure_link(port->remote->sw);
1557
1558                         tb_restore_children(port->remote->sw);
1559                 } else if (port->xdomain) {
1560                         tb_port_configure_xdomain(port, port->xdomain);
1561                 }
1562         }
1563 }
1564
1565 static int tb_resume_noirq(struct tb *tb)
1566 {
1567         struct tb_cm *tcm = tb_priv(tb);
1568         struct tb_tunnel *tunnel, *n;
1569         unsigned int usb3_delay = 0;
1570         LIST_HEAD(tunnels);
1571
1572         tb_dbg(tb, "resuming...\n");
1573
1574         /* remove any pci devices the firmware might have setup */
1575         tb_switch_reset(tb->root_switch);
1576
1577         tb_switch_resume(tb->root_switch);
1578         tb_free_invalid_tunnels(tb);
1579         tb_free_unplugged_children(tb->root_switch);
1580         tb_restore_children(tb->root_switch);
1581
1582         /*
1583          * If we get here from suspend to disk the boot firmware or the
1584          * restore kernel might have created tunnels of its own. Since
1585          * we cannot be sure they are usable for us we find and tear
1586          * them down.
1587          */
1588         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1589         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1590                 if (tb_tunnel_is_usb3(tunnel))
1591                         usb3_delay = 500;
1592                 tb_tunnel_deactivate(tunnel);
1593                 tb_tunnel_free(tunnel);
1594         }
1595
1596         /* Re-create our tunnels now */
1597         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1598                 /* USB3 requires delay before it can be re-activated */
1599                 if (tb_tunnel_is_usb3(tunnel)) {
1600                         msleep(usb3_delay);
1601                         /* Only need to do it once */
1602                         usb3_delay = 0;
1603                 }
1604                 tb_tunnel_restart(tunnel);
1605         }
1606         if (!list_empty(&tcm->tunnel_list)) {
1607                 /*
1608                  * the pcie links need some time to get going.
1609                  * 100ms works for me...
1610                  */
1611                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1612                 msleep(100);
1613         }
1614          /* Allow tb_handle_hotplug to progress events */
1615         tcm->hotplug_active = true;
1616         tb_dbg(tb, "resume finished\n");
1617
1618         return 0;
1619 }
1620
1621 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1622 {
1623         struct tb_port *port;
1624         int ret = 0;
1625
1626         tb_switch_for_each_port(sw, port) {
1627                 if (tb_is_upstream_port(port))
1628                         continue;
1629                 if (port->xdomain && port->xdomain->is_unplugged) {
1630                         tb_retimer_remove_all(port);
1631                         tb_xdomain_remove(port->xdomain);
1632                         tb_port_unconfigure_xdomain(port);
1633                         port->xdomain = NULL;
1634                         ret++;
1635                 } else if (port->remote) {
1636                         ret += tb_free_unplugged_xdomains(port->remote->sw);
1637                 }
1638         }
1639
1640         return ret;
1641 }
1642
1643 static int tb_freeze_noirq(struct tb *tb)
1644 {
1645         struct tb_cm *tcm = tb_priv(tb);
1646
1647         tcm->hotplug_active = false;
1648         return 0;
1649 }
1650
1651 static int tb_thaw_noirq(struct tb *tb)
1652 {
1653         struct tb_cm *tcm = tb_priv(tb);
1654
1655         tcm->hotplug_active = true;
1656         return 0;
1657 }
1658
1659 static void tb_complete(struct tb *tb)
1660 {
1661         /*
1662          * Release any unplugged XDomains and if there is a case where
1663          * another domain is swapped in place of unplugged XDomain we
1664          * need to run another rescan.
1665          */
1666         mutex_lock(&tb->lock);
1667         if (tb_free_unplugged_xdomains(tb->root_switch))
1668                 tb_scan_switch(tb->root_switch);
1669         mutex_unlock(&tb->lock);
1670 }
1671
1672 static int tb_runtime_suspend(struct tb *tb)
1673 {
1674         struct tb_cm *tcm = tb_priv(tb);
1675
1676         mutex_lock(&tb->lock);
1677         tb_switch_suspend(tb->root_switch, true);
1678         tcm->hotplug_active = false;
1679         mutex_unlock(&tb->lock);
1680
1681         return 0;
1682 }
1683
1684 static void tb_remove_work(struct work_struct *work)
1685 {
1686         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1687         struct tb *tb = tcm_to_tb(tcm);
1688
1689         mutex_lock(&tb->lock);
1690         if (tb->root_switch) {
1691                 tb_free_unplugged_children(tb->root_switch);
1692                 tb_free_unplugged_xdomains(tb->root_switch);
1693         }
1694         mutex_unlock(&tb->lock);
1695 }
1696
1697 static int tb_runtime_resume(struct tb *tb)
1698 {
1699         struct tb_cm *tcm = tb_priv(tb);
1700         struct tb_tunnel *tunnel, *n;
1701
1702         mutex_lock(&tb->lock);
1703         tb_switch_resume(tb->root_switch);
1704         tb_free_invalid_tunnels(tb);
1705         tb_restore_children(tb->root_switch);
1706         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1707                 tb_tunnel_restart(tunnel);
1708         tcm->hotplug_active = true;
1709         mutex_unlock(&tb->lock);
1710
1711         /*
1712          * Schedule cleanup of any unplugged devices. Run this in a
1713          * separate thread to avoid possible deadlock if the device
1714          * removal runtime resumes the unplugged device.
1715          */
1716         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1717         return 0;
1718 }
1719
1720 static const struct tb_cm_ops tb_cm_ops = {
1721         .start = tb_start,
1722         .stop = tb_stop,
1723         .suspend_noirq = tb_suspend_noirq,
1724         .resume_noirq = tb_resume_noirq,
1725         .freeze_noirq = tb_freeze_noirq,
1726         .thaw_noirq = tb_thaw_noirq,
1727         .complete = tb_complete,
1728         .runtime_suspend = tb_runtime_suspend,
1729         .runtime_resume = tb_runtime_resume,
1730         .handle_event = tb_handle_event,
1731         .disapprove_switch = tb_disconnect_pci,
1732         .approve_switch = tb_tunnel_pci,
1733         .approve_xdomain_paths = tb_approve_xdomain_paths,
1734         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1735 };
1736
1737 /*
1738  * During suspend the Thunderbolt controller is reset and all PCIe
1739  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1740  * during resume. This adds device links between the tunneled PCIe
1741  * downstream ports and the NHI so that the device core will make sure
1742  * NHI is resumed first before the rest.
1743  */
1744 static void tb_apple_add_links(struct tb_nhi *nhi)
1745 {
1746         struct pci_dev *upstream, *pdev;
1747
1748         if (!x86_apple_machine)
1749                 return;
1750
1751         switch (nhi->pdev->device) {
1752         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1753         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1754         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1755         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1756                 break;
1757         default:
1758                 return;
1759         }
1760
1761         upstream = pci_upstream_bridge(nhi->pdev);
1762         while (upstream) {
1763                 if (!pci_is_pcie(upstream))
1764                         return;
1765                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1766                         break;
1767                 upstream = pci_upstream_bridge(upstream);
1768         }
1769
1770         if (!upstream)
1771                 return;
1772
1773         /*
1774          * For each hotplug downstream port, create add device link
1775          * back to NHI so that PCIe tunnels can be re-established after
1776          * sleep.
1777          */
1778         for_each_pci_bridge(pdev, upstream->subordinate) {
1779                 const struct device_link *link;
1780
1781                 if (!pci_is_pcie(pdev))
1782                         continue;
1783                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1784                     !pdev->is_hotplug_bridge)
1785                         continue;
1786
1787                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1788                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
1789                                        DL_FLAG_PM_RUNTIME);
1790                 if (link) {
1791                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1792                                 dev_name(&pdev->dev));
1793                 } else {
1794                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1795                                  dev_name(&pdev->dev));
1796                 }
1797         }
1798 }
1799
1800 struct tb *tb_probe(struct tb_nhi *nhi)
1801 {
1802         struct tb_cm *tcm;
1803         struct tb *tb;
1804
1805         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1806         if (!tb)
1807                 return NULL;
1808
1809         if (tb_acpi_may_tunnel_pcie())
1810                 tb->security_level = TB_SECURITY_USER;
1811         else
1812                 tb->security_level = TB_SECURITY_NOPCIE;
1813
1814         tb->cm_ops = &tb_cm_ops;
1815
1816         tcm = tb_priv(tb);
1817         INIT_LIST_HEAD(&tcm->tunnel_list);
1818         INIT_LIST_HEAD(&tcm->dp_resources);
1819         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1820
1821         tb_dbg(tb, "using software connection manager\n");
1822
1823         tb_apple_add_links(nhi);
1824         tb_acpi_add_links(nhi);
1825
1826         return tb;
1827 }