GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
14
15 #include "tb.h"
16 #include "tb_regs.h"
17 #include "tunnel.h"
18
19 #define TB_TIMEOUT      100 /* ms */
20
21 /**
22  * struct tb_cm - Simple Thunderbolt connection manager
23  * @tunnel_list: List of active tunnels
24  * @dp_resources: List of available DP resources for DP tunneling
25  * @hotplug_active: tb_handle_hotplug will stop progressing plug
26  *                  events and exit if this is not set (it needs to
27  *                  acquire the lock one more time). Used to drain wq
28  *                  after cfg has been paused.
29  * @remove_work: Work used to remove any unplugged routers after
30  *               runtime resume
31  */
32 struct tb_cm {
33         struct list_head tunnel_list;
34         struct list_head dp_resources;
35         bool hotplug_active;
36         struct delayed_work remove_work;
37 };
38
39 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
40 {
41         return ((void *)tcm - sizeof(struct tb));
42 }
43
44 struct tb_hotplug_event {
45         struct work_struct work;
46         struct tb *tb;
47         u64 route;
48         u8 port;
49         bool unplug;
50 };
51
52 static void tb_handle_hotplug(struct work_struct *work);
53
54 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
55 {
56         struct tb_hotplug_event *ev;
57
58         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
59         if (!ev)
60                 return;
61
62         ev->tb = tb;
63         ev->route = route;
64         ev->port = port;
65         ev->unplug = unplug;
66         INIT_WORK(&ev->work, tb_handle_hotplug);
67         queue_work(tb->wq, &ev->work);
68 }
69
70 /* enumeration & hot plug handling */
71
72 static void tb_add_dp_resources(struct tb_switch *sw)
73 {
74         struct tb_cm *tcm = tb_priv(sw->tb);
75         struct tb_port *port;
76
77         tb_switch_for_each_port(sw, port) {
78                 if (!tb_port_is_dpin(port))
79                         continue;
80
81                 if (!tb_switch_query_dp_resource(sw, port))
82                         continue;
83
84                 list_add_tail(&port->list, &tcm->dp_resources);
85                 tb_port_dbg(port, "DP IN resource available\n");
86         }
87 }
88
89 static void tb_remove_dp_resources(struct tb_switch *sw)
90 {
91         struct tb_cm *tcm = tb_priv(sw->tb);
92         struct tb_port *port, *tmp;
93
94         /* Clear children resources first */
95         tb_switch_for_each_port(sw, port) {
96                 if (tb_port_has_remote(port))
97                         tb_remove_dp_resources(port->remote->sw);
98         }
99
100         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
101                 if (port->sw == sw) {
102                         tb_port_dbg(port, "DP OUT resource unavailable\n");
103                         list_del_init(&port->list);
104                 }
105         }
106 }
107
108 static void tb_switch_discover_tunnels(struct tb_switch *sw,
109                                        struct list_head *list,
110                                        bool alloc_hopids)
111 {
112         struct tb *tb = sw->tb;
113         struct tb_port *port;
114
115         tb_switch_for_each_port(sw, port) {
116                 struct tb_tunnel *tunnel = NULL;
117
118                 switch (port->config.type) {
119                 case TB_TYPE_DP_HDMI_IN:
120                         tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
121                         break;
122
123                 case TB_TYPE_PCIE_DOWN:
124                         tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
125                         break;
126
127                 case TB_TYPE_USB3_DOWN:
128                         tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
129                         break;
130
131                 default:
132                         break;
133                 }
134
135                 if (tunnel)
136                         list_add_tail(&tunnel->list, list);
137         }
138
139         tb_switch_for_each_port(sw, port) {
140                 if (tb_port_has_remote(port)) {
141                         tb_switch_discover_tunnels(port->remote->sw, list,
142                                                    alloc_hopids);
143                 }
144         }
145 }
146
147 static void tb_discover_tunnels(struct tb *tb)
148 {
149         struct tb_cm *tcm = tb_priv(tb);
150         struct tb_tunnel *tunnel;
151
152         tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
153
154         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
155                 if (tb_tunnel_is_pci(tunnel)) {
156                         struct tb_switch *parent = tunnel->dst_port->sw;
157
158                         while (parent != tunnel->src_port->sw) {
159                                 parent->boot = true;
160                                 parent = tb_switch_parent(parent);
161                         }
162                 } else if (tb_tunnel_is_dp(tunnel)) {
163                         /* Keep the domain from powering down */
164                         pm_runtime_get_sync(&tunnel->src_port->sw->dev);
165                         pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
166                 }
167         }
168 }
169
170 static int tb_port_configure_xdomain(struct tb_port *port)
171 {
172         if (tb_switch_is_usb4(port->sw))
173                 return usb4_port_configure_xdomain(port);
174         return tb_lc_configure_xdomain(port);
175 }
176
177 static void tb_port_unconfigure_xdomain(struct tb_port *port)
178 {
179         if (tb_switch_is_usb4(port->sw))
180                 usb4_port_unconfigure_xdomain(port);
181         else
182                 tb_lc_unconfigure_xdomain(port);
183
184         tb_port_enable(port->dual_link_port);
185 }
186
187 static void tb_scan_xdomain(struct tb_port *port)
188 {
189         struct tb_switch *sw = port->sw;
190         struct tb *tb = sw->tb;
191         struct tb_xdomain *xd;
192         u64 route;
193
194         if (!tb_is_xdomain_enabled())
195                 return;
196
197         route = tb_downstream_route(port);
198         xd = tb_xdomain_find_by_route(tb, route);
199         if (xd) {
200                 tb_xdomain_put(xd);
201                 return;
202         }
203
204         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
205                               NULL);
206         if (xd) {
207                 tb_port_at(route, sw)->xdomain = xd;
208                 tb_port_configure_xdomain(port);
209                 tb_xdomain_add(xd);
210         }
211 }
212
213 static int tb_enable_tmu(struct tb_switch *sw)
214 {
215         int ret;
216
217         /* If it is already enabled in correct mode, don't touch it */
218         if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
219                 return 0;
220
221         ret = tb_switch_tmu_disable(sw);
222         if (ret)
223                 return ret;
224
225         ret = tb_switch_tmu_post_time(sw);
226         if (ret)
227                 return ret;
228
229         return tb_switch_tmu_enable(sw);
230 }
231
232 /**
233  * tb_find_unused_port() - return the first inactive port on @sw
234  * @sw: Switch to find the port on
235  * @type: Port type to look for
236  */
237 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
238                                            enum tb_port_type type)
239 {
240         struct tb_port *port;
241
242         tb_switch_for_each_port(sw, port) {
243                 if (tb_is_upstream_port(port))
244                         continue;
245                 if (port->config.type != type)
246                         continue;
247                 if (!port->cap_adap)
248                         continue;
249                 if (tb_port_is_enabled(port))
250                         continue;
251                 return port;
252         }
253         return NULL;
254 }
255
256 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
257                                          const struct tb_port *port)
258 {
259         struct tb_port *down;
260
261         down = usb4_switch_map_usb3_down(sw, port);
262         if (down && !tb_usb3_port_is_enabled(down))
263                 return down;
264         return NULL;
265 }
266
267 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
268                                         struct tb_port *src_port,
269                                         struct tb_port *dst_port)
270 {
271         struct tb_cm *tcm = tb_priv(tb);
272         struct tb_tunnel *tunnel;
273
274         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
275                 if (tunnel->type == type &&
276                     ((src_port && src_port == tunnel->src_port) ||
277                      (dst_port && dst_port == tunnel->dst_port))) {
278                         return tunnel;
279                 }
280         }
281
282         return NULL;
283 }
284
285 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
286                                                    struct tb_port *src_port,
287                                                    struct tb_port *dst_port)
288 {
289         struct tb_port *port, *usb3_down;
290         struct tb_switch *sw;
291
292         /* Pick the router that is deepest in the topology */
293         if (dst_port->sw->config.depth > src_port->sw->config.depth)
294                 sw = dst_port->sw;
295         else
296                 sw = src_port->sw;
297
298         /* Can't be the host router */
299         if (sw == tb->root_switch)
300                 return NULL;
301
302         /* Find the downstream USB4 port that leads to this router */
303         port = tb_port_at(tb_route(sw), tb->root_switch);
304         /* Find the corresponding host router USB3 downstream port */
305         usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
306         if (!usb3_down)
307                 return NULL;
308
309         return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
310 }
311
312 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
313         struct tb_port *dst_port, int *available_up, int *available_down)
314 {
315         int usb3_consumed_up, usb3_consumed_down, ret;
316         struct tb_cm *tcm = tb_priv(tb);
317         struct tb_tunnel *tunnel;
318         struct tb_port *port;
319
320         tb_port_dbg(dst_port, "calculating available bandwidth\n");
321
322         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
323         if (tunnel) {
324                 ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
325                                                    &usb3_consumed_down);
326                 if (ret)
327                         return ret;
328         } else {
329                 usb3_consumed_up = 0;
330                 usb3_consumed_down = 0;
331         }
332
333         *available_up = *available_down = 40000;
334
335         /* Find the minimum available bandwidth over all links */
336         tb_for_each_port_on_path(src_port, dst_port, port) {
337                 int link_speed, link_width, up_bw, down_bw;
338
339                 if (!tb_port_is_null(port))
340                         continue;
341
342                 if (tb_is_upstream_port(port)) {
343                         link_speed = port->sw->link_speed;
344                 } else {
345                         link_speed = tb_port_get_link_speed(port);
346                         if (link_speed < 0)
347                                 return link_speed;
348                 }
349
350                 link_width = port->bonded ? 2 : 1;
351
352                 up_bw = link_speed * link_width * 1000; /* Mb/s */
353                 /* Leave 10% guard band */
354                 up_bw -= up_bw / 10;
355                 down_bw = up_bw;
356
357                 tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
358
359                 /*
360                  * Find all DP tunnels that cross the port and reduce
361                  * their consumed bandwidth from the available.
362                  */
363                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
364                         int dp_consumed_up, dp_consumed_down;
365
366                         if (!tb_tunnel_is_dp(tunnel))
367                                 continue;
368
369                         if (!tb_tunnel_port_on_path(tunnel, port))
370                                 continue;
371
372                         ret = tb_tunnel_consumed_bandwidth(tunnel,
373                                                            &dp_consumed_up,
374                                                            &dp_consumed_down);
375                         if (ret)
376                                 return ret;
377
378                         up_bw -= dp_consumed_up;
379                         down_bw -= dp_consumed_down;
380                 }
381
382                 /*
383                  * If USB3 is tunneled from the host router down to the
384                  * branch leading to port we need to take USB3 consumed
385                  * bandwidth into account regardless whether it actually
386                  * crosses the port.
387                  */
388                 up_bw -= usb3_consumed_up;
389                 down_bw -= usb3_consumed_down;
390
391                 if (up_bw < *available_up)
392                         *available_up = up_bw;
393                 if (down_bw < *available_down)
394                         *available_down = down_bw;
395         }
396
397         if (*available_up < 0)
398                 *available_up = 0;
399         if (*available_down < 0)
400                 *available_down = 0;
401
402         return 0;
403 }
404
405 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
406                                             struct tb_port *src_port,
407                                             struct tb_port *dst_port)
408 {
409         struct tb_tunnel *tunnel;
410
411         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
412         return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
413 }
414
415 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
416                                       struct tb_port *dst_port)
417 {
418         int ret, available_up, available_down;
419         struct tb_tunnel *tunnel;
420
421         tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
422         if (!tunnel)
423                 return;
424
425         tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
426
427         /*
428          * Calculate available bandwidth for the first hop USB3 tunnel.
429          * That determines the whole USB3 bandwidth for this branch.
430          */
431         ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
432                                      &available_up, &available_down);
433         if (ret) {
434                 tb_warn(tb, "failed to calculate available bandwidth\n");
435                 return;
436         }
437
438         tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
439                available_up, available_down);
440
441         tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
442 }
443
444 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
445 {
446         struct tb_switch *parent = tb_switch_parent(sw);
447         int ret, available_up, available_down;
448         struct tb_port *up, *down, *port;
449         struct tb_cm *tcm = tb_priv(tb);
450         struct tb_tunnel *tunnel;
451
452         if (!tb_acpi_may_tunnel_usb3()) {
453                 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
454                 return 0;
455         }
456
457         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
458         if (!up)
459                 return 0;
460
461         if (!sw->link_usb4)
462                 return 0;
463
464         /*
465          * Look up available down port. Since we are chaining it should
466          * be found right above this switch.
467          */
468         port = tb_port_at(tb_route(sw), parent);
469         down = tb_find_usb3_down(parent, port);
470         if (!down)
471                 return 0;
472
473         if (tb_route(parent)) {
474                 struct tb_port *parent_up;
475                 /*
476                  * Check first that the parent switch has its upstream USB3
477                  * port enabled. Otherwise the chain is not complete and
478                  * there is no point setting up a new tunnel.
479                  */
480                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
481                 if (!parent_up || !tb_port_is_enabled(parent_up))
482                         return 0;
483
484                 /* Make all unused bandwidth available for the new tunnel */
485                 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
486                 if (ret)
487                         return ret;
488         }
489
490         ret = tb_available_bandwidth(tb, down, up, &available_up,
491                                      &available_down);
492         if (ret)
493                 goto err_reclaim;
494
495         tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
496                     available_up, available_down);
497
498         tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
499                                       available_down);
500         if (!tunnel) {
501                 ret = -ENOMEM;
502                 goto err_reclaim;
503         }
504
505         if (tb_tunnel_activate(tunnel)) {
506                 tb_port_info(up,
507                              "USB3 tunnel activation failed, aborting\n");
508                 ret = -EIO;
509                 goto err_free;
510         }
511
512         list_add_tail(&tunnel->list, &tcm->tunnel_list);
513         if (tb_route(parent))
514                 tb_reclaim_usb3_bandwidth(tb, down, up);
515
516         return 0;
517
518 err_free:
519         tb_tunnel_free(tunnel);
520 err_reclaim:
521         if (tb_route(parent))
522                 tb_reclaim_usb3_bandwidth(tb, down, up);
523
524         return ret;
525 }
526
527 static int tb_create_usb3_tunnels(struct tb_switch *sw)
528 {
529         struct tb_port *port;
530         int ret;
531
532         if (!tb_acpi_may_tunnel_usb3())
533                 return 0;
534
535         if (tb_route(sw)) {
536                 ret = tb_tunnel_usb3(sw->tb, sw);
537                 if (ret)
538                         return ret;
539         }
540
541         tb_switch_for_each_port(sw, port) {
542                 if (!tb_port_has_remote(port))
543                         continue;
544                 ret = tb_create_usb3_tunnels(port->remote->sw);
545                 if (ret)
546                         return ret;
547         }
548
549         return 0;
550 }
551
552 static void tb_scan_port(struct tb_port *port);
553
554 /*
555  * tb_scan_switch() - scan for and initialize downstream switches
556  */
557 static void tb_scan_switch(struct tb_switch *sw)
558 {
559         struct tb_port *port;
560
561         pm_runtime_get_sync(&sw->dev);
562
563         tb_switch_for_each_port(sw, port)
564                 tb_scan_port(port);
565
566         pm_runtime_mark_last_busy(&sw->dev);
567         pm_runtime_put_autosuspend(&sw->dev);
568 }
569
570 /*
571  * tb_scan_port() - check for and initialize switches below port
572  */
573 static void tb_scan_port(struct tb_port *port)
574 {
575         struct tb_cm *tcm = tb_priv(port->sw->tb);
576         struct tb_port *upstream_port;
577         struct tb_switch *sw;
578
579         if (tb_is_upstream_port(port))
580                 return;
581
582         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
583             !tb_dp_port_is_enabled(port)) {
584                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
585                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
586                                  false);
587                 return;
588         }
589
590         if (port->config.type != TB_TYPE_PORT)
591                 return;
592         if (port->dual_link_port && port->link_nr)
593                 return; /*
594                          * Downstream switch is reachable through two ports.
595                          * Only scan on the primary port (link_nr == 0).
596                          */
597         if (tb_wait_for_port(port, false) <= 0)
598                 return;
599         if (port->remote) {
600                 tb_port_dbg(port, "port already has a remote\n");
601                 return;
602         }
603
604         tb_retimer_scan(port, true);
605
606         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
607                              tb_downstream_route(port));
608         if (IS_ERR(sw)) {
609                 /*
610                  * If there is an error accessing the connected switch
611                  * it may be connected to another domain. Also we allow
612                  * the other domain to be connected to a max depth switch.
613                  */
614                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
615                         tb_scan_xdomain(port);
616                 return;
617         }
618
619         if (tb_switch_configure(sw)) {
620                 tb_switch_put(sw);
621                 return;
622         }
623
624         /*
625          * If there was previously another domain connected remove it
626          * first.
627          */
628         if (port->xdomain) {
629                 tb_xdomain_remove(port->xdomain);
630                 tb_port_unconfigure_xdomain(port);
631                 port->xdomain = NULL;
632         }
633
634         /*
635          * Do not send uevents until we have discovered all existing
636          * tunnels and know which switches were authorized already by
637          * the boot firmware.
638          */
639         if (!tcm->hotplug_active)
640                 dev_set_uevent_suppress(&sw->dev, true);
641
642         /*
643          * At the moment Thunderbolt 2 and beyond (devices with LC) we
644          * can support runtime PM.
645          */
646         sw->rpm = sw->generation > 1;
647
648         if (tb_switch_add(sw)) {
649                 tb_switch_put(sw);
650                 return;
651         }
652
653         /* Link the switches using both links if available */
654         upstream_port = tb_upstream_port(sw);
655         port->remote = upstream_port;
656         upstream_port->remote = port;
657         if (port->dual_link_port && upstream_port->dual_link_port) {
658                 port->dual_link_port->remote = upstream_port->dual_link_port;
659                 upstream_port->dual_link_port->remote = port->dual_link_port;
660         }
661
662         /* Enable lane bonding if supported */
663         tb_switch_lane_bonding_enable(sw);
664         /* Set the link configured */
665         tb_switch_configure_link(sw);
666         if (tb_switch_enable_clx(sw, TB_CL0S))
667                 tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
668
669         tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
670                                 tb_switch_is_clx_enabled(sw));
671
672         if (tb_enable_tmu(sw))
673                 tb_sw_warn(sw, "failed to enable TMU\n");
674
675         /* Scan upstream retimers */
676         tb_retimer_scan(upstream_port, true);
677
678         /*
679          * Create USB 3.x tunnels only when the switch is plugged to the
680          * domain. This is because we scan the domain also during discovery
681          * and want to discover existing USB 3.x tunnels before we create
682          * any new.
683          */
684         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
685                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
686
687         tb_add_dp_resources(sw);
688         tb_scan_switch(sw);
689 }
690
691 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
692 {
693         struct tb_port *src_port, *dst_port;
694         struct tb *tb;
695
696         if (!tunnel)
697                 return;
698
699         tb_tunnel_deactivate(tunnel);
700         list_del(&tunnel->list);
701
702         tb = tunnel->tb;
703         src_port = tunnel->src_port;
704         dst_port = tunnel->dst_port;
705
706         switch (tunnel->type) {
707         case TB_TUNNEL_DP:
708                 /*
709                  * In case of DP tunnel make sure the DP IN resource is
710                  * deallocated properly.
711                  */
712                 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
713                 /* Now we can allow the domain to runtime suspend again */
714                 pm_runtime_mark_last_busy(&dst_port->sw->dev);
715                 pm_runtime_put_autosuspend(&dst_port->sw->dev);
716                 pm_runtime_mark_last_busy(&src_port->sw->dev);
717                 pm_runtime_put_autosuspend(&src_port->sw->dev);
718                 fallthrough;
719
720         case TB_TUNNEL_USB3:
721                 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
722                 break;
723
724         default:
725                 /*
726                  * PCIe and DMA tunnels do not consume guaranteed
727                  * bandwidth.
728                  */
729                 break;
730         }
731
732         tb_tunnel_free(tunnel);
733 }
734
735 /*
736  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
737  */
738 static void tb_free_invalid_tunnels(struct tb *tb)
739 {
740         struct tb_cm *tcm = tb_priv(tb);
741         struct tb_tunnel *tunnel;
742         struct tb_tunnel *n;
743
744         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
745                 if (tb_tunnel_is_invalid(tunnel))
746                         tb_deactivate_and_free_tunnel(tunnel);
747         }
748 }
749
750 /*
751  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
752  */
753 static void tb_free_unplugged_children(struct tb_switch *sw)
754 {
755         struct tb_port *port;
756
757         tb_switch_for_each_port(sw, port) {
758                 if (!tb_port_has_remote(port))
759                         continue;
760
761                 if (port->remote->sw->is_unplugged) {
762                         tb_retimer_remove_all(port);
763                         tb_remove_dp_resources(port->remote->sw);
764                         tb_switch_unconfigure_link(port->remote->sw);
765                         tb_switch_lane_bonding_disable(port->remote->sw);
766                         tb_switch_remove(port->remote->sw);
767                         port->remote = NULL;
768                         if (port->dual_link_port)
769                                 port->dual_link_port->remote = NULL;
770                 } else {
771                         tb_free_unplugged_children(port->remote->sw);
772                 }
773         }
774 }
775
776 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
777                                          const struct tb_port *port)
778 {
779         struct tb_port *down = NULL;
780
781         /*
782          * To keep plugging devices consistently in the same PCIe
783          * hierarchy, do mapping here for switch downstream PCIe ports.
784          */
785         if (tb_switch_is_usb4(sw)) {
786                 down = usb4_switch_map_pcie_down(sw, port);
787         } else if (!tb_route(sw)) {
788                 int phy_port = tb_phy_port_from_link(port->port);
789                 int index;
790
791                 /*
792                  * Hard-coded Thunderbolt port to PCIe down port mapping
793                  * per controller.
794                  */
795                 if (tb_switch_is_cactus_ridge(sw) ||
796                     tb_switch_is_alpine_ridge(sw))
797                         index = !phy_port ? 6 : 7;
798                 else if (tb_switch_is_falcon_ridge(sw))
799                         index = !phy_port ? 6 : 8;
800                 else if (tb_switch_is_titan_ridge(sw))
801                         index = !phy_port ? 8 : 9;
802                 else
803                         goto out;
804
805                 /* Validate the hard-coding */
806                 if (WARN_ON(index > sw->config.max_port_number))
807                         goto out;
808
809                 down = &sw->ports[index];
810         }
811
812         if (down) {
813                 if (WARN_ON(!tb_port_is_pcie_down(down)))
814                         goto out;
815                 if (tb_pci_port_is_enabled(down))
816                         goto out;
817
818                 return down;
819         }
820
821 out:
822         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
823 }
824
825 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
826 {
827         struct tb_port *host_port, *port;
828         struct tb_cm *tcm = tb_priv(tb);
829
830         host_port = tb_route(in->sw) ?
831                 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
832
833         list_for_each_entry(port, &tcm->dp_resources, list) {
834                 if (!tb_port_is_dpout(port))
835                         continue;
836
837                 if (tb_port_is_enabled(port)) {
838                         tb_port_dbg(port, "in use\n");
839                         continue;
840                 }
841
842                 tb_port_dbg(port, "DP OUT available\n");
843
844                 /*
845                  * Keep the DP tunnel under the topology starting from
846                  * the same host router downstream port.
847                  */
848                 if (host_port && tb_route(port->sw)) {
849                         struct tb_port *p;
850
851                         p = tb_port_at(tb_route(port->sw), tb->root_switch);
852                         if (p != host_port)
853                                 continue;
854                 }
855
856                 return port;
857         }
858
859         return NULL;
860 }
861
862 static void tb_tunnel_dp(struct tb *tb)
863 {
864         int available_up, available_down, ret, link_nr;
865         struct tb_cm *tcm = tb_priv(tb);
866         struct tb_port *port, *in, *out;
867         struct tb_tunnel *tunnel;
868
869         if (!tb_acpi_may_tunnel_dp()) {
870                 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
871                 return;
872         }
873
874         /*
875          * Find pair of inactive DP IN and DP OUT adapters and then
876          * establish a DP tunnel between them.
877          */
878         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
879
880         in = NULL;
881         out = NULL;
882         list_for_each_entry(port, &tcm->dp_resources, list) {
883                 if (!tb_port_is_dpin(port))
884                         continue;
885
886                 if (tb_port_is_enabled(port)) {
887                         tb_port_dbg(port, "in use\n");
888                         continue;
889                 }
890
891                 tb_port_dbg(port, "DP IN available\n");
892
893                 out = tb_find_dp_out(tb, port);
894                 if (out) {
895                         in = port;
896                         break;
897                 }
898         }
899
900         if (!in) {
901                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
902                 return;
903         }
904         if (!out) {
905                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
906                 return;
907         }
908
909         /*
910          * This is only applicable to links that are not bonded (so
911          * when Thunderbolt 1 hardware is involved somewhere in the
912          * topology). For these try to share the DP bandwidth between
913          * the two lanes.
914          */
915         link_nr = 1;
916         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
917                 if (tb_tunnel_is_dp(tunnel)) {
918                         link_nr = 0;
919                         break;
920                 }
921         }
922
923         /*
924          * DP stream needs the domain to be active so runtime resume
925          * both ends of the tunnel.
926          *
927          * This should bring the routers in the middle active as well
928          * and keeps the domain from runtime suspending while the DP
929          * tunnel is active.
930          */
931         pm_runtime_get_sync(&in->sw->dev);
932         pm_runtime_get_sync(&out->sw->dev);
933
934         if (tb_switch_alloc_dp_resource(in->sw, in)) {
935                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
936                 goto err_rpm_put;
937         }
938
939         /* Make all unused USB3 bandwidth available for the new DP tunnel */
940         ret = tb_release_unused_usb3_bandwidth(tb, in, out);
941         if (ret) {
942                 tb_warn(tb, "failed to release unused bandwidth\n");
943                 goto err_dealloc_dp;
944         }
945
946         ret = tb_available_bandwidth(tb, in, out, &available_up,
947                                      &available_down);
948         if (ret)
949                 goto err_reclaim;
950
951         tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
952                available_up, available_down);
953
954         tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
955                                     available_down);
956         if (!tunnel) {
957                 tb_port_dbg(out, "could not allocate DP tunnel\n");
958                 goto err_reclaim;
959         }
960
961         if (tb_tunnel_activate(tunnel)) {
962                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
963                 goto err_free;
964         }
965
966         list_add_tail(&tunnel->list, &tcm->tunnel_list);
967         tb_reclaim_usb3_bandwidth(tb, in, out);
968         return;
969
970 err_free:
971         tb_tunnel_free(tunnel);
972 err_reclaim:
973         tb_reclaim_usb3_bandwidth(tb, in, out);
974 err_dealloc_dp:
975         tb_switch_dealloc_dp_resource(in->sw, in);
976 err_rpm_put:
977         pm_runtime_mark_last_busy(&out->sw->dev);
978         pm_runtime_put_autosuspend(&out->sw->dev);
979         pm_runtime_mark_last_busy(&in->sw->dev);
980         pm_runtime_put_autosuspend(&in->sw->dev);
981 }
982
983 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
984 {
985         struct tb_port *in, *out;
986         struct tb_tunnel *tunnel;
987
988         if (tb_port_is_dpin(port)) {
989                 tb_port_dbg(port, "DP IN resource unavailable\n");
990                 in = port;
991                 out = NULL;
992         } else {
993                 tb_port_dbg(port, "DP OUT resource unavailable\n");
994                 in = NULL;
995                 out = port;
996         }
997
998         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
999         tb_deactivate_and_free_tunnel(tunnel);
1000         list_del_init(&port->list);
1001
1002         /*
1003          * See if there is another DP OUT port that can be used for
1004          * to create another tunnel.
1005          */
1006         tb_tunnel_dp(tb);
1007 }
1008
1009 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1010 {
1011         struct tb_cm *tcm = tb_priv(tb);
1012         struct tb_port *p;
1013
1014         if (tb_port_is_enabled(port))
1015                 return;
1016
1017         list_for_each_entry(p, &tcm->dp_resources, list) {
1018                 if (p == port)
1019                         return;
1020         }
1021
1022         tb_port_dbg(port, "DP %s resource available\n",
1023                     tb_port_is_dpin(port) ? "IN" : "OUT");
1024         list_add_tail(&port->list, &tcm->dp_resources);
1025
1026         /* Look for suitable DP IN <-> DP OUT pairs now */
1027         tb_tunnel_dp(tb);
1028 }
1029
1030 static void tb_disconnect_and_release_dp(struct tb *tb)
1031 {
1032         struct tb_cm *tcm = tb_priv(tb);
1033         struct tb_tunnel *tunnel, *n;
1034
1035         /*
1036          * Tear down all DP tunnels and release their resources. They
1037          * will be re-established after resume based on plug events.
1038          */
1039         list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1040                 if (tb_tunnel_is_dp(tunnel))
1041                         tb_deactivate_and_free_tunnel(tunnel);
1042         }
1043
1044         while (!list_empty(&tcm->dp_resources)) {
1045                 struct tb_port *port;
1046
1047                 port = list_first_entry(&tcm->dp_resources,
1048                                         struct tb_port, list);
1049                 list_del_init(&port->list);
1050         }
1051 }
1052
1053 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1054 {
1055         struct tb_tunnel *tunnel;
1056         struct tb_port *up;
1057
1058         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1059         if (WARN_ON(!up))
1060                 return -ENODEV;
1061
1062         tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1063         if (WARN_ON(!tunnel))
1064                 return -ENODEV;
1065
1066         tb_switch_xhci_disconnect(sw);
1067
1068         tb_tunnel_deactivate(tunnel);
1069         list_del(&tunnel->list);
1070         tb_tunnel_free(tunnel);
1071         return 0;
1072 }
1073
1074 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1075 {
1076         struct tb_port *up, *down, *port;
1077         struct tb_cm *tcm = tb_priv(tb);
1078         struct tb_switch *parent_sw;
1079         struct tb_tunnel *tunnel;
1080
1081         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1082         if (!up)
1083                 return 0;
1084
1085         /*
1086          * Look up available down port. Since we are chaining it should
1087          * be found right above this switch.
1088          */
1089         parent_sw = tb_to_switch(sw->dev.parent);
1090         port = tb_port_at(tb_route(sw), parent_sw);
1091         down = tb_find_pcie_down(parent_sw, port);
1092         if (!down)
1093                 return 0;
1094
1095         tunnel = tb_tunnel_alloc_pci(tb, up, down);
1096         if (!tunnel)
1097                 return -ENOMEM;
1098
1099         if (tb_tunnel_activate(tunnel)) {
1100                 tb_port_info(up,
1101                              "PCIe tunnel activation failed, aborting\n");
1102                 tb_tunnel_free(tunnel);
1103                 return -EIO;
1104         }
1105
1106         /*
1107          * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1108          * here.
1109          */
1110         if (tb_switch_pcie_l1_enable(sw))
1111                 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1112
1113         if (tb_switch_xhci_connect(sw))
1114                 tb_sw_warn(sw, "failed to connect xHCI\n");
1115
1116         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1117         return 0;
1118 }
1119
1120 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1121                                     int transmit_path, int transmit_ring,
1122                                     int receive_path, int receive_ring)
1123 {
1124         struct tb_cm *tcm = tb_priv(tb);
1125         struct tb_port *nhi_port, *dst_port;
1126         struct tb_tunnel *tunnel;
1127         struct tb_switch *sw;
1128
1129         sw = tb_to_switch(xd->dev.parent);
1130         dst_port = tb_port_at(xd->route, sw);
1131         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1132
1133         mutex_lock(&tb->lock);
1134         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
1135                                      transmit_ring, receive_path, receive_ring);
1136         if (!tunnel) {
1137                 mutex_unlock(&tb->lock);
1138                 return -ENOMEM;
1139         }
1140
1141         if (tb_tunnel_activate(tunnel)) {
1142                 tb_port_info(nhi_port,
1143                              "DMA tunnel activation failed, aborting\n");
1144                 tb_tunnel_free(tunnel);
1145                 mutex_unlock(&tb->lock);
1146                 return -EIO;
1147         }
1148
1149         list_add_tail(&tunnel->list, &tcm->tunnel_list);
1150         mutex_unlock(&tb->lock);
1151         return 0;
1152 }
1153
1154 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1155                                           int transmit_path, int transmit_ring,
1156                                           int receive_path, int receive_ring)
1157 {
1158         struct tb_cm *tcm = tb_priv(tb);
1159         struct tb_port *nhi_port, *dst_port;
1160         struct tb_tunnel *tunnel, *n;
1161         struct tb_switch *sw;
1162
1163         sw = tb_to_switch(xd->dev.parent);
1164         dst_port = tb_port_at(xd->route, sw);
1165         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
1166
1167         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1168                 if (!tb_tunnel_is_dma(tunnel))
1169                         continue;
1170                 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
1171                         continue;
1172
1173                 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
1174                                         receive_path, receive_ring))
1175                         tb_deactivate_and_free_tunnel(tunnel);
1176         }
1177 }
1178
1179 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1180                                        int transmit_path, int transmit_ring,
1181                                        int receive_path, int receive_ring)
1182 {
1183         if (!xd->is_unplugged) {
1184                 mutex_lock(&tb->lock);
1185                 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
1186                                               transmit_ring, receive_path,
1187                                               receive_ring);
1188                 mutex_unlock(&tb->lock);
1189         }
1190         return 0;
1191 }
1192
1193 /* hotplug handling */
1194
1195 /*
1196  * tb_handle_hotplug() - handle hotplug event
1197  *
1198  * Executes on tb->wq.
1199  */
1200 static void tb_handle_hotplug(struct work_struct *work)
1201 {
1202         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
1203         struct tb *tb = ev->tb;
1204         struct tb_cm *tcm = tb_priv(tb);
1205         struct tb_switch *sw;
1206         struct tb_port *port;
1207
1208         /* Bring the domain back from sleep if it was suspended */
1209         pm_runtime_get_sync(&tb->dev);
1210
1211         mutex_lock(&tb->lock);
1212         if (!tcm->hotplug_active)
1213                 goto out; /* during init, suspend or shutdown */
1214
1215         sw = tb_switch_find_by_route(tb, ev->route);
1216         if (!sw) {
1217                 tb_warn(tb,
1218                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
1219                         ev->route, ev->port, ev->unplug);
1220                 goto out;
1221         }
1222         if (ev->port > sw->config.max_port_number) {
1223                 tb_warn(tb,
1224                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
1225                         ev->route, ev->port, ev->unplug);
1226                 goto put_sw;
1227         }
1228         port = &sw->ports[ev->port];
1229         if (tb_is_upstream_port(port)) {
1230                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
1231                        ev->route, ev->port, ev->unplug);
1232                 goto put_sw;
1233         }
1234
1235         pm_runtime_get_sync(&sw->dev);
1236
1237         if (ev->unplug) {
1238                 tb_retimer_remove_all(port);
1239
1240                 if (tb_port_has_remote(port)) {
1241                         tb_port_dbg(port, "switch unplugged\n");
1242                         tb_sw_set_unplugged(port->remote->sw);
1243                         tb_free_invalid_tunnels(tb);
1244                         tb_remove_dp_resources(port->remote->sw);
1245                         tb_switch_tmu_disable(port->remote->sw);
1246                         tb_switch_unconfigure_link(port->remote->sw);
1247                         tb_switch_lane_bonding_disable(port->remote->sw);
1248                         tb_switch_remove(port->remote->sw);
1249                         port->remote = NULL;
1250                         if (port->dual_link_port)
1251                                 port->dual_link_port->remote = NULL;
1252                         /* Maybe we can create another DP tunnel */
1253                         tb_tunnel_dp(tb);
1254                 } else if (port->xdomain) {
1255                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
1256
1257                         tb_port_dbg(port, "xdomain unplugged\n");
1258                         /*
1259                          * Service drivers are unbound during
1260                          * tb_xdomain_remove() so setting XDomain as
1261                          * unplugged here prevents deadlock if they call
1262                          * tb_xdomain_disable_paths(). We will tear down
1263                          * all the tunnels below.
1264                          */
1265                         xd->is_unplugged = true;
1266                         tb_xdomain_remove(xd);
1267                         port->xdomain = NULL;
1268                         __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
1269                         tb_xdomain_put(xd);
1270                         tb_port_unconfigure_xdomain(port);
1271                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1272                         tb_dp_resource_unavailable(tb, port);
1273                 } else if (!port->port) {
1274                         tb_sw_dbg(sw, "xHCI disconnect request\n");
1275                         tb_switch_xhci_disconnect(sw);
1276                 } else {
1277                         tb_port_dbg(port,
1278                                    "got unplug event for disconnected port, ignoring\n");
1279                 }
1280         } else if (port->remote) {
1281                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
1282         } else if (!port->port && sw->authorized) {
1283                 tb_sw_dbg(sw, "xHCI connect request\n");
1284                 tb_switch_xhci_connect(sw);
1285         } else {
1286                 if (tb_port_is_null(port)) {
1287                         tb_port_dbg(port, "hotplug: scanning\n");
1288                         tb_scan_port(port);
1289                         if (!port->remote)
1290                                 tb_port_dbg(port, "hotplug: no switch found\n");
1291                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
1292                         tb_dp_resource_available(tb, port);
1293                 }
1294         }
1295
1296         pm_runtime_mark_last_busy(&sw->dev);
1297         pm_runtime_put_autosuspend(&sw->dev);
1298
1299 put_sw:
1300         tb_switch_put(sw);
1301 out:
1302         mutex_unlock(&tb->lock);
1303
1304         pm_runtime_mark_last_busy(&tb->dev);
1305         pm_runtime_put_autosuspend(&tb->dev);
1306
1307         kfree(ev);
1308 }
1309
1310 /*
1311  * tb_schedule_hotplug_handler() - callback function for the control channel
1312  *
1313  * Delegates to tb_handle_hotplug.
1314  */
1315 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1316                             const void *buf, size_t size)
1317 {
1318         const struct cfg_event_pkg *pkg = buf;
1319         u64 route;
1320
1321         if (type != TB_CFG_PKG_EVENT) {
1322                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
1323                 return;
1324         }
1325
1326         route = tb_cfg_get_route(&pkg->header);
1327
1328         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
1329                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
1330                         pkg->port);
1331         }
1332
1333         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
1334 }
1335
1336 static void tb_stop(struct tb *tb)
1337 {
1338         struct tb_cm *tcm = tb_priv(tb);
1339         struct tb_tunnel *tunnel;
1340         struct tb_tunnel *n;
1341
1342         cancel_delayed_work(&tcm->remove_work);
1343         /* tunnels are only present after everything has been initialized */
1344         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1345                 /*
1346                  * DMA tunnels require the driver to be functional so we
1347                  * tear them down. Other protocol tunnels can be left
1348                  * intact.
1349                  */
1350                 if (tb_tunnel_is_dma(tunnel))
1351                         tb_tunnel_deactivate(tunnel);
1352                 tb_tunnel_free(tunnel);
1353         }
1354         tb_switch_remove(tb->root_switch);
1355         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1356 }
1357
1358 static int tb_scan_finalize_switch(struct device *dev, void *data)
1359 {
1360         if (tb_is_switch(dev)) {
1361                 struct tb_switch *sw = tb_to_switch(dev);
1362
1363                 /*
1364                  * If we found that the switch was already setup by the
1365                  * boot firmware, mark it as authorized now before we
1366                  * send uevent to userspace.
1367                  */
1368                 if (sw->boot)
1369                         sw->authorized = 1;
1370
1371                 dev_set_uevent_suppress(dev, false);
1372                 kobject_uevent(&dev->kobj, KOBJ_ADD);
1373                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
1374         }
1375
1376         return 0;
1377 }
1378
1379 static int tb_start(struct tb *tb)
1380 {
1381         struct tb_cm *tcm = tb_priv(tb);
1382         int ret;
1383
1384         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1385         if (IS_ERR(tb->root_switch))
1386                 return PTR_ERR(tb->root_switch);
1387
1388         /*
1389          * ICM firmware upgrade needs running firmware and in native
1390          * mode that is not available so disable firmware upgrade of the
1391          * root switch.
1392          */
1393         tb->root_switch->no_nvm_upgrade = true;
1394         /* All USB4 routers support runtime PM */
1395         tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
1396
1397         ret = tb_switch_configure(tb->root_switch);
1398         if (ret) {
1399                 tb_switch_put(tb->root_switch);
1400                 return ret;
1401         }
1402
1403         /* Announce the switch to the world */
1404         ret = tb_switch_add(tb->root_switch);
1405         if (ret) {
1406                 tb_switch_put(tb->root_switch);
1407                 return ret;
1408         }
1409
1410         tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
1411         /* Enable TMU if it is off */
1412         tb_switch_tmu_enable(tb->root_switch);
1413         /* Full scan to discover devices added before the driver was loaded. */
1414         tb_scan_switch(tb->root_switch);
1415         /* Find out tunnels created by the boot firmware */
1416         tb_discover_tunnels(tb);
1417         /*
1418          * If the boot firmware did not create USB 3.x tunnels create them
1419          * now for the whole topology.
1420          */
1421         tb_create_usb3_tunnels(tb->root_switch);
1422         /* Add DP IN resources for the root switch */
1423         tb_add_dp_resources(tb->root_switch);
1424         /* Make the discovered switches available to the userspace */
1425         device_for_each_child(&tb->root_switch->dev, NULL,
1426                               tb_scan_finalize_switch);
1427
1428         /* Allow tb_handle_hotplug to progress events */
1429         tcm->hotplug_active = true;
1430         return 0;
1431 }
1432
1433 static int tb_suspend_noirq(struct tb *tb)
1434 {
1435         struct tb_cm *tcm = tb_priv(tb);
1436
1437         tb_dbg(tb, "suspending...\n");
1438         tb_disconnect_and_release_dp(tb);
1439         tb_switch_suspend(tb->root_switch, false);
1440         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1441         tb_dbg(tb, "suspend finished\n");
1442
1443         return 0;
1444 }
1445
1446 static void tb_restore_children(struct tb_switch *sw)
1447 {
1448         struct tb_port *port;
1449
1450         /* No need to restore if the router is already unplugged */
1451         if (sw->is_unplugged)
1452                 return;
1453
1454         if (tb_switch_enable_clx(sw, TB_CL0S))
1455                 tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
1456
1457         /*
1458          * tb_switch_tmu_configure() was already called when the switch was
1459          * added before entering system sleep or runtime suspend,
1460          * so no need to call it again before enabling TMU.
1461          */
1462         if (tb_enable_tmu(sw))
1463                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1464
1465         tb_switch_for_each_port(sw, port) {
1466                 if (!tb_port_has_remote(port) && !port->xdomain)
1467                         continue;
1468
1469                 if (port->remote) {
1470                         tb_switch_lane_bonding_enable(port->remote->sw);
1471                         tb_switch_configure_link(port->remote->sw);
1472
1473                         tb_restore_children(port->remote->sw);
1474                 } else if (port->xdomain) {
1475                         tb_port_configure_xdomain(port);
1476                 }
1477         }
1478 }
1479
1480 static int tb_resume_noirq(struct tb *tb)
1481 {
1482         struct tb_cm *tcm = tb_priv(tb);
1483         struct tb_tunnel *tunnel, *n;
1484         unsigned int usb3_delay = 0;
1485         LIST_HEAD(tunnels);
1486
1487         tb_dbg(tb, "resuming...\n");
1488
1489         /* remove any pci devices the firmware might have setup */
1490         tb_switch_reset(tb->root_switch);
1491
1492         tb_switch_resume(tb->root_switch);
1493         tb_free_invalid_tunnels(tb);
1494         tb_free_unplugged_children(tb->root_switch);
1495         tb_restore_children(tb->root_switch);
1496
1497         /*
1498          * If we get here from suspend to disk the boot firmware or the
1499          * restore kernel might have created tunnels of its own. Since
1500          * we cannot be sure they are usable for us we find and tear
1501          * them down.
1502          */
1503         tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
1504         list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
1505                 if (tb_tunnel_is_usb3(tunnel))
1506                         usb3_delay = 500;
1507                 tb_tunnel_deactivate(tunnel);
1508                 tb_tunnel_free(tunnel);
1509         }
1510
1511         /* Re-create our tunnels now */
1512         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1513                 /* USB3 requires delay before it can be re-activated */
1514                 if (tb_tunnel_is_usb3(tunnel)) {
1515                         msleep(usb3_delay);
1516                         /* Only need to do it once */
1517                         usb3_delay = 0;
1518                 }
1519                 tb_tunnel_restart(tunnel);
1520         }
1521         if (!list_empty(&tcm->tunnel_list)) {
1522                 /*
1523                  * the pcie links need some time to get going.
1524                  * 100ms works for me...
1525                  */
1526                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1527                 msleep(100);
1528         }
1529          /* Allow tb_handle_hotplug to progress events */
1530         tcm->hotplug_active = true;
1531         tb_dbg(tb, "resume finished\n");
1532
1533         return 0;
1534 }
1535
1536 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1537 {
1538         struct tb_port *port;
1539         int ret = 0;
1540
1541         tb_switch_for_each_port(sw, port) {
1542                 if (tb_is_upstream_port(port))
1543                         continue;
1544                 if (port->xdomain && port->xdomain->is_unplugged) {
1545                         tb_retimer_remove_all(port);
1546                         tb_xdomain_remove(port->xdomain);
1547                         tb_port_unconfigure_xdomain(port);
1548                         port->xdomain = NULL;
1549                         ret++;
1550                 } else if (port->remote) {
1551                         ret += tb_free_unplugged_xdomains(port->remote->sw);
1552                 }
1553         }
1554
1555         return ret;
1556 }
1557
1558 static int tb_freeze_noirq(struct tb *tb)
1559 {
1560         struct tb_cm *tcm = tb_priv(tb);
1561
1562         tcm->hotplug_active = false;
1563         return 0;
1564 }
1565
1566 static int tb_thaw_noirq(struct tb *tb)
1567 {
1568         struct tb_cm *tcm = tb_priv(tb);
1569
1570         tcm->hotplug_active = true;
1571         return 0;
1572 }
1573
1574 static void tb_complete(struct tb *tb)
1575 {
1576         /*
1577          * Release any unplugged XDomains and if there is a case where
1578          * another domain is swapped in place of unplugged XDomain we
1579          * need to run another rescan.
1580          */
1581         mutex_lock(&tb->lock);
1582         if (tb_free_unplugged_xdomains(tb->root_switch))
1583                 tb_scan_switch(tb->root_switch);
1584         mutex_unlock(&tb->lock);
1585 }
1586
1587 static int tb_runtime_suspend(struct tb *tb)
1588 {
1589         struct tb_cm *tcm = tb_priv(tb);
1590
1591         mutex_lock(&tb->lock);
1592         tb_switch_suspend(tb->root_switch, true);
1593         tcm->hotplug_active = false;
1594         mutex_unlock(&tb->lock);
1595
1596         return 0;
1597 }
1598
1599 static void tb_remove_work(struct work_struct *work)
1600 {
1601         struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
1602         struct tb *tb = tcm_to_tb(tcm);
1603
1604         mutex_lock(&tb->lock);
1605         if (tb->root_switch) {
1606                 tb_free_unplugged_children(tb->root_switch);
1607                 tb_free_unplugged_xdomains(tb->root_switch);
1608         }
1609         mutex_unlock(&tb->lock);
1610 }
1611
1612 static int tb_runtime_resume(struct tb *tb)
1613 {
1614         struct tb_cm *tcm = tb_priv(tb);
1615         struct tb_tunnel *tunnel, *n;
1616
1617         mutex_lock(&tb->lock);
1618         tb_switch_resume(tb->root_switch);
1619         tb_free_invalid_tunnels(tb);
1620         tb_restore_children(tb->root_switch);
1621         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1622                 tb_tunnel_restart(tunnel);
1623         tcm->hotplug_active = true;
1624         mutex_unlock(&tb->lock);
1625
1626         /*
1627          * Schedule cleanup of any unplugged devices. Run this in a
1628          * separate thread to avoid possible deadlock if the device
1629          * removal runtime resumes the unplugged device.
1630          */
1631         queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
1632         return 0;
1633 }
1634
1635 static const struct tb_cm_ops tb_cm_ops = {
1636         .start = tb_start,
1637         .stop = tb_stop,
1638         .suspend_noirq = tb_suspend_noirq,
1639         .resume_noirq = tb_resume_noirq,
1640         .freeze_noirq = tb_freeze_noirq,
1641         .thaw_noirq = tb_thaw_noirq,
1642         .complete = tb_complete,
1643         .runtime_suspend = tb_runtime_suspend,
1644         .runtime_resume = tb_runtime_resume,
1645         .handle_event = tb_handle_event,
1646         .disapprove_switch = tb_disconnect_pci,
1647         .approve_switch = tb_tunnel_pci,
1648         .approve_xdomain_paths = tb_approve_xdomain_paths,
1649         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1650 };
1651
1652 /*
1653  * During suspend the Thunderbolt controller is reset and all PCIe
1654  * tunnels are lost. The NHI driver will try to reestablish all tunnels
1655  * during resume. This adds device links between the tunneled PCIe
1656  * downstream ports and the NHI so that the device core will make sure
1657  * NHI is resumed first before the rest.
1658  */
1659 static void tb_apple_add_links(struct tb_nhi *nhi)
1660 {
1661         struct pci_dev *upstream, *pdev;
1662
1663         if (!x86_apple_machine)
1664                 return;
1665
1666         switch (nhi->pdev->device) {
1667         case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1668         case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1669         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1670         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1671                 break;
1672         default:
1673                 return;
1674         }
1675
1676         upstream = pci_upstream_bridge(nhi->pdev);
1677         while (upstream) {
1678                 if (!pci_is_pcie(upstream))
1679                         return;
1680                 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
1681                         break;
1682                 upstream = pci_upstream_bridge(upstream);
1683         }
1684
1685         if (!upstream)
1686                 return;
1687
1688         /*
1689          * For each hotplug downstream port, create add device link
1690          * back to NHI so that PCIe tunnels can be re-established after
1691          * sleep.
1692          */
1693         for_each_pci_bridge(pdev, upstream->subordinate) {
1694                 const struct device_link *link;
1695
1696                 if (!pci_is_pcie(pdev))
1697                         continue;
1698                 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
1699                     !pdev->is_hotplug_bridge)
1700                         continue;
1701
1702                 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
1703                                        DL_FLAG_AUTOREMOVE_SUPPLIER |
1704                                        DL_FLAG_PM_RUNTIME);
1705                 if (link) {
1706                         dev_dbg(&nhi->pdev->dev, "created link from %s\n",
1707                                 dev_name(&pdev->dev));
1708                 } else {
1709                         dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
1710                                  dev_name(&pdev->dev));
1711                 }
1712         }
1713 }
1714
1715 struct tb *tb_probe(struct tb_nhi *nhi)
1716 {
1717         struct tb_cm *tcm;
1718         struct tb *tb;
1719
1720         tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
1721         if (!tb)
1722                 return NULL;
1723
1724         if (tb_acpi_may_tunnel_pcie())
1725                 tb->security_level = TB_SECURITY_USER;
1726         else
1727                 tb->security_level = TB_SECURITY_NOPCIE;
1728
1729         tb->cm_ops = &tb_cm_ops;
1730
1731         tcm = tb_priv(tb);
1732         INIT_LIST_HEAD(&tcm->tunnel_list);
1733         INIT_LIST_HEAD(&tcm->dp_resources);
1734         INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
1735
1736         tb_dbg(tb, "using software connection manager\n");
1737
1738         tb_apple_add_links(nhi);
1739         tb_acpi_add_links(nhi);
1740
1741         return tb;
1742 }