GNU Linux-libre 4.14.328-gnu1
[releases.git] / drivers / thunderbolt / icm.c
1 /*
2  * Internal Thunderbolt Connection Manager. This is a firmware running on
3  * the Thunderbolt host controller performing most of the low-level
4  * handling.
5  *
6  * Copyright (C) 2017, Intel Corporation
7  * Authors: Michael Jamet <michael.jamet@intel.com>
8  *          Mika Westerberg <mika.westerberg@linux.intel.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/delay.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_data/x86/apple.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22
23 #include "ctl.h"
24 #include "nhi_regs.h"
25 #include "tb.h"
26
27 #define PCIE2CIO_CMD                    0x30
28 #define PCIE2CIO_CMD_TIMEOUT            BIT(31)
29 #define PCIE2CIO_CMD_START              BIT(30)
30 #define PCIE2CIO_CMD_WRITE              BIT(21)
31 #define PCIE2CIO_CMD_CS_MASK            GENMASK(20, 19)
32 #define PCIE2CIO_CMD_CS_SHIFT           19
33 #define PCIE2CIO_CMD_PORT_MASK          GENMASK(18, 13)
34 #define PCIE2CIO_CMD_PORT_SHIFT         13
35
36 #define PCIE2CIO_WRDATA                 0x34
37 #define PCIE2CIO_RDDATA                 0x38
38
39 #define PHY_PORT_CS1                    0x37
40 #define PHY_PORT_CS1_LINK_DISABLE       BIT(14)
41 #define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
42 #define PHY_PORT_CS1_LINK_STATE_SHIFT   26
43
44 #define ICM_TIMEOUT                     5000 /* ms */
45 #define ICM_MAX_LINK                    4
46 #define ICM_MAX_DEPTH                   6
47
48 /**
49  * struct icm - Internal connection manager private data
50  * @request_lock: Makes sure only one message is send to ICM at time
51  * @rescan_work: Work used to rescan the surviving switches after resume
52  * @upstream_port: Pointer to the PCIe upstream port this host
53  *                 controller is connected. This is only set for systems
54  *                 where ICM needs to be started manually
55  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
56  *           (only set when @upstream_port is not %NULL)
57  * @safe_mode: ICM is in safe mode
58  * @is_supported: Checks if we can support ICM on this controller
59  * @get_mode: Read and return the ICM firmware mode (optional)
60  * @get_route: Find a route string for given switch
61  * @device_connected: Handle device connected ICM message
62  * @device_disconnected: Handle device disconnected ICM message
63  */
64 struct icm {
65         struct mutex request_lock;
66         struct delayed_work rescan_work;
67         struct pci_dev *upstream_port;
68         int vnd_cap;
69         bool safe_mode;
70         bool (*is_supported)(struct tb *tb);
71         int (*get_mode)(struct tb *tb);
72         int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
73         void (*device_connected)(struct tb *tb,
74                                  const struct icm_pkg_header *hdr);
75         void (*device_disconnected)(struct tb *tb,
76                                     const struct icm_pkg_header *hdr);
77 };
78
79 struct icm_notification {
80         struct work_struct work;
81         struct icm_pkg_header *pkg;
82         struct tb *tb;
83 };
84
85 static inline struct tb *icm_to_tb(struct icm *icm)
86 {
87         return ((void *)icm - sizeof(struct tb));
88 }
89
90 static inline u8 phy_port_from_route(u64 route, u8 depth)
91 {
92         return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
93 }
94
95 static inline u8 dual_link_from_link(u8 link)
96 {
97         return link ? ((link - 1) ^ 0x01) + 1 : 0;
98 }
99
100 static inline u64 get_route(u32 route_hi, u32 route_lo)
101 {
102         return (u64)route_hi << 32 | route_lo;
103 }
104
105 static bool icm_match(const struct tb_cfg_request *req,
106                       const struct ctl_pkg *pkg)
107 {
108         const struct icm_pkg_header *res_hdr = pkg->buffer;
109         const struct icm_pkg_header *req_hdr = req->request;
110
111         if (pkg->frame.eof != req->response_type)
112                 return false;
113         if (res_hdr->code != req_hdr->code)
114                 return false;
115
116         return true;
117 }
118
119 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
120 {
121         const struct icm_pkg_header *hdr = pkg->buffer;
122
123         if (hdr->packet_id < req->npackets) {
124                 size_t offset = hdr->packet_id * req->response_size;
125
126                 memcpy(req->response + offset, pkg->buffer, req->response_size);
127         }
128
129         return hdr->packet_id == hdr->total_packets - 1;
130 }
131
132 static int icm_request(struct tb *tb, const void *request, size_t request_size,
133                        void *response, size_t response_size, size_t npackets,
134                        unsigned int timeout_msec)
135 {
136         struct icm *icm = tb_priv(tb);
137         int retries = 3;
138
139         do {
140                 struct tb_cfg_request *req;
141                 struct tb_cfg_result res;
142
143                 req = tb_cfg_request_alloc();
144                 if (!req)
145                         return -ENOMEM;
146
147                 req->match = icm_match;
148                 req->copy = icm_copy;
149                 req->request = request;
150                 req->request_size = request_size;
151                 req->request_type = TB_CFG_PKG_ICM_CMD;
152                 req->response = response;
153                 req->npackets = npackets;
154                 req->response_size = response_size;
155                 req->response_type = TB_CFG_PKG_ICM_RESP;
156
157                 mutex_lock(&icm->request_lock);
158                 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
159                 mutex_unlock(&icm->request_lock);
160
161                 tb_cfg_request_put(req);
162
163                 if (res.err != -ETIMEDOUT)
164                         return res.err == 1 ? -EIO : res.err;
165
166                 usleep_range(20, 50);
167         } while (retries--);
168
169         return -ETIMEDOUT;
170 }
171
172 static bool icm_fr_is_supported(struct tb *tb)
173 {
174         return !x86_apple_machine;
175 }
176
177 static inline int icm_fr_get_switch_index(u32 port)
178 {
179         int index;
180
181         if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
182                 return 0;
183
184         index = port >> ICM_PORT_INDEX_SHIFT;
185         return index != 0xff ? index : 0;
186 }
187
188 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
189 {
190         struct icm_fr_pkg_get_topology_response *switches, *sw;
191         struct icm_fr_pkg_get_topology request = {
192                 .hdr = { .code = ICM_GET_TOPOLOGY },
193         };
194         size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
195         int ret, index;
196         u8 i;
197
198         switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
199         if (!switches)
200                 return -ENOMEM;
201
202         ret = icm_request(tb, &request, sizeof(request), switches,
203                           sizeof(*switches), npackets, ICM_TIMEOUT);
204         if (ret)
205                 goto err_free;
206
207         sw = &switches[0];
208         index = icm_fr_get_switch_index(sw->ports[link]);
209         if (!index) {
210                 ret = -ENODEV;
211                 goto err_free;
212         }
213
214         sw = &switches[index];
215         for (i = 1; i < depth; i++) {
216                 unsigned int j;
217
218                 if (!(sw->first_data & ICM_SWITCH_USED)) {
219                         ret = -ENODEV;
220                         goto err_free;
221                 }
222
223                 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
224                         index = icm_fr_get_switch_index(sw->ports[j]);
225                         if (index > sw->switch_index) {
226                                 sw = &switches[index];
227                                 break;
228                         }
229                 }
230         }
231
232         *route = get_route(sw->route_hi, sw->route_lo);
233
234 err_free:
235         kfree(switches);
236         return ret;
237 }
238
239 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
240 {
241         struct icm_fr_pkg_approve_device request;
242         struct icm_fr_pkg_approve_device reply;
243         int ret;
244
245         memset(&request, 0, sizeof(request));
246         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
247         request.hdr.code = ICM_APPROVE_DEVICE;
248         request.connection_id = sw->connection_id;
249         request.connection_key = sw->connection_key;
250
251         memset(&reply, 0, sizeof(reply));
252         /* Use larger timeout as establishing tunnels can take some time */
253         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
254                           1, 10000);
255         if (ret)
256                 return ret;
257
258         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
259                 tb_warn(tb, "PCIe tunnel creation failed\n");
260                 return -EIO;
261         }
262
263         return 0;
264 }
265
266 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
267 {
268         struct icm_fr_pkg_add_device_key request;
269         struct icm_fr_pkg_add_device_key_response reply;
270         int ret;
271
272         memset(&request, 0, sizeof(request));
273         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
274         request.hdr.code = ICM_ADD_DEVICE_KEY;
275         request.connection_id = sw->connection_id;
276         request.connection_key = sw->connection_key;
277         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
278
279         memset(&reply, 0, sizeof(reply));
280         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
281                           1, ICM_TIMEOUT);
282         if (ret)
283                 return ret;
284
285         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
286                 tb_warn(tb, "Adding key to switch failed\n");
287                 return -EIO;
288         }
289
290         return 0;
291 }
292
293 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
294                                        const u8 *challenge, u8 *response)
295 {
296         struct icm_fr_pkg_challenge_device request;
297         struct icm_fr_pkg_challenge_device_response reply;
298         int ret;
299
300         memset(&request, 0, sizeof(request));
301         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
302         request.hdr.code = ICM_CHALLENGE_DEVICE;
303         request.connection_id = sw->connection_id;
304         request.connection_key = sw->connection_key;
305         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
306
307         memset(&reply, 0, sizeof(reply));
308         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
309                           1, ICM_TIMEOUT);
310         if (ret)
311                 return ret;
312
313         if (reply.hdr.flags & ICM_FLAGS_ERROR)
314                 return -EKEYREJECTED;
315         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
316                 return -ENOKEY;
317
318         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
319
320         return 0;
321 }
322
323 static void remove_switch(struct tb_switch *sw)
324 {
325         struct tb_switch *parent_sw;
326
327         parent_sw = tb_to_switch(sw->dev.parent);
328         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
329         tb_switch_remove(sw);
330 }
331
332 static void
333 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
334 {
335         const struct icm_fr_event_device_connected *pkg =
336                 (const struct icm_fr_event_device_connected *)hdr;
337         struct tb_switch *sw, *parent_sw;
338         struct icm *icm = tb_priv(tb);
339         bool authorized = false;
340         u8 link, depth;
341         u64 route;
342         int ret;
343
344         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
345         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
346                 ICM_LINK_INFO_DEPTH_SHIFT;
347         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
348
349         ret = icm->get_route(tb, link, depth, &route);
350         if (ret) {
351                 tb_err(tb, "failed to find route string for switch at %u.%u\n",
352                        link, depth);
353                 return;
354         }
355
356         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
357         if (sw) {
358                 u8 phy_port, sw_phy_port;
359
360                 parent_sw = tb_to_switch(sw->dev.parent);
361                 sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
362                 phy_port = phy_port_from_route(route, depth);
363
364                 /*
365                  * On resume ICM will send us connected events for the
366                  * devices that still are present. However, that
367                  * information might have changed for example by the
368                  * fact that a switch on a dual-link connection might
369                  * have been enumerated using the other link now. Make
370                  * sure our book keeping matches that.
371                  */
372                 if (sw->depth == depth && sw_phy_port == phy_port &&
373                     !!sw->authorized == authorized) {
374                         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
375                         tb_port_at(route, parent_sw)->remote =
376                                    tb_upstream_port(sw);
377                         sw->config.route_hi = upper_32_bits(route);
378                         sw->config.route_lo = lower_32_bits(route);
379                         sw->connection_id = pkg->connection_id;
380                         sw->connection_key = pkg->connection_key;
381                         sw->link = link;
382                         sw->depth = depth;
383                         sw->is_unplugged = false;
384                         tb_switch_put(sw);
385                         return;
386                 }
387
388                 /*
389                  * User connected the same switch to another physical
390                  * port or to another part of the topology. Remove the
391                  * existing switch now before adding the new one.
392                  */
393                 remove_switch(sw);
394                 tb_switch_put(sw);
395         }
396
397         /*
398          * If the switch was not found by UUID, look for a switch on
399          * same physical port (taking possible link aggregation into
400          * account) and depth. If we found one it is definitely a stale
401          * one so remove it first.
402          */
403         sw = tb_switch_find_by_link_depth(tb, link, depth);
404         if (!sw) {
405                 u8 dual_link;
406
407                 dual_link = dual_link_from_link(link);
408                 if (dual_link)
409                         sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
410         }
411         if (sw) {
412                 remove_switch(sw);
413                 tb_switch_put(sw);
414         }
415
416         parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
417         if (!parent_sw) {
418                 tb_err(tb, "failed to find parent switch for %u.%u\n",
419                        link, depth);
420                 return;
421         }
422
423         sw = tb_switch_alloc(tb, &parent_sw->dev, route);
424         if (!sw) {
425                 tb_switch_put(parent_sw);
426                 return;
427         }
428
429         sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
430         sw->connection_id = pkg->connection_id;
431         sw->connection_key = pkg->connection_key;
432         sw->link = link;
433         sw->depth = depth;
434         sw->authorized = authorized;
435         sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
436                                 ICM_FLAGS_SLEVEL_SHIFT;
437
438         /* Link the two switches now */
439         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
440         tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
441
442         ret = tb_switch_add(sw);
443         if (ret) {
444                 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
445                 tb_switch_put(sw);
446         }
447         tb_switch_put(parent_sw);
448 }
449
450 static void
451 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
452 {
453         const struct icm_fr_event_device_disconnected *pkg =
454                 (const struct icm_fr_event_device_disconnected *)hdr;
455         struct tb_switch *sw;
456         u8 link, depth;
457
458         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
459         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
460                 ICM_LINK_INFO_DEPTH_SHIFT;
461
462         if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
463                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
464                 return;
465         }
466
467         sw = tb_switch_find_by_link_depth(tb, link, depth);
468         if (!sw) {
469                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
470                         depth);
471                 return;
472         }
473
474         remove_switch(sw);
475         tb_switch_put(sw);
476 }
477
478 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
479 {
480         struct pci_dev *parent;
481
482         parent = pci_upstream_bridge(pdev);
483         while (parent) {
484                 if (!pci_is_pcie(parent))
485                         return NULL;
486                 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
487                         break;
488                 parent = pci_upstream_bridge(parent);
489         }
490
491         if (!parent)
492                 return NULL;
493
494         switch (parent->device) {
495         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
496         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
497         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
498         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
499         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
500                 return parent;
501         }
502
503         return NULL;
504 }
505
506 static bool icm_ar_is_supported(struct tb *tb)
507 {
508         struct pci_dev *upstream_port;
509         struct icm *icm = tb_priv(tb);
510
511         /*
512          * Starting from Alpine Ridge we can use ICM on Apple machines
513          * as well. We just need to reset and re-enable it first.
514          */
515         if (!x86_apple_machine)
516                 return true;
517
518         /*
519          * Find the upstream PCIe port in case we need to do reset
520          * through its vendor specific registers.
521          */
522         upstream_port = get_upstream_port(tb->nhi->pdev);
523         if (upstream_port) {
524                 int cap;
525
526                 cap = pci_find_ext_capability(upstream_port,
527                                               PCI_EXT_CAP_ID_VNDR);
528                 if (cap > 0) {
529                         icm->upstream_port = upstream_port;
530                         icm->vnd_cap = cap;
531
532                         return true;
533                 }
534         }
535
536         return false;
537 }
538
539 static int icm_ar_get_mode(struct tb *tb)
540 {
541         struct tb_nhi *nhi = tb->nhi;
542         int retries = 60;
543         u32 val;
544
545         do {
546                 val = ioread32(nhi->iobase + REG_FW_STS);
547                 if (val & REG_FW_STS_NVM_AUTH_DONE)
548                         break;
549                 msleep(50);
550         } while (--retries);
551
552         if (!retries) {
553                 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
554                 return -ENODEV;
555         }
556
557         return nhi_mailbox_mode(nhi);
558 }
559
560 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
561 {
562         struct icm_ar_pkg_get_route_response reply;
563         struct icm_ar_pkg_get_route request = {
564                 .hdr = { .code = ICM_GET_ROUTE },
565                 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
566         };
567         int ret;
568
569         memset(&reply, 0, sizeof(reply));
570         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
571                           1, ICM_TIMEOUT);
572         if (ret)
573                 return ret;
574
575         if (reply.hdr.flags & ICM_FLAGS_ERROR)
576                 return -EIO;
577
578         *route = get_route(reply.route_hi, reply.route_lo);
579         return 0;
580 }
581
582 static void icm_handle_notification(struct work_struct *work)
583 {
584         struct icm_notification *n = container_of(work, typeof(*n), work);
585         struct tb *tb = n->tb;
586         struct icm *icm = tb_priv(tb);
587
588         mutex_lock(&tb->lock);
589
590         switch (n->pkg->code) {
591         case ICM_EVENT_DEVICE_CONNECTED:
592                 icm->device_connected(tb, n->pkg);
593                 break;
594         case ICM_EVENT_DEVICE_DISCONNECTED:
595                 icm->device_disconnected(tb, n->pkg);
596                 break;
597         }
598
599         mutex_unlock(&tb->lock);
600
601         kfree(n->pkg);
602         kfree(n);
603 }
604
605 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
606                              const void *buf, size_t size)
607 {
608         struct icm_notification *n;
609
610         n = kmalloc(sizeof(*n), GFP_KERNEL);
611         if (!n)
612                 return;
613
614         INIT_WORK(&n->work, icm_handle_notification);
615         n->pkg = kmemdup(buf, size, GFP_KERNEL);
616         n->tb = tb;
617
618         queue_work(tb->wq, &n->work);
619 }
620
621 static int
622 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
623 {
624         struct icm_pkg_driver_ready_response reply;
625         struct icm_pkg_driver_ready request = {
626                 .hdr.code = ICM_DRIVER_READY,
627         };
628         unsigned int retries = 10;
629         int ret;
630
631         memset(&reply, 0, sizeof(reply));
632         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
633                           1, ICM_TIMEOUT);
634         if (ret)
635                 return ret;
636
637         if (security_level)
638                 *security_level = reply.security_level & 0xf;
639
640         /*
641          * Hold on here until the switch config space is accessible so
642          * that we can read root switch config successfully.
643          */
644         do {
645                 struct tb_cfg_result res;
646                 u32 tmp;
647
648                 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
649                                       0, 1, 100);
650                 if (!res.err)
651                         return 0;
652
653                 msleep(50);
654         } while (--retries);
655
656         return -ETIMEDOUT;
657 }
658
659 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
660 {
661         unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
662         u32 cmd;
663
664         do {
665                 pci_read_config_dword(icm->upstream_port,
666                                       icm->vnd_cap + PCIE2CIO_CMD, &cmd);
667                 if (!(cmd & PCIE2CIO_CMD_START)) {
668                         if (cmd & PCIE2CIO_CMD_TIMEOUT)
669                                 break;
670                         return 0;
671                 }
672
673                 msleep(50);
674         } while (time_before(jiffies, end));
675
676         return -ETIMEDOUT;
677 }
678
679 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
680                          unsigned int port, unsigned int index, u32 *data)
681 {
682         struct pci_dev *pdev = icm->upstream_port;
683         int ret, vnd_cap = icm->vnd_cap;
684         u32 cmd;
685
686         cmd = index;
687         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
688         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
689         cmd |= PCIE2CIO_CMD_START;
690         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
691
692         ret = pci2cio_wait_completion(icm, 5000);
693         if (ret)
694                 return ret;
695
696         pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
697         return 0;
698 }
699
700 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
701                           unsigned int port, unsigned int index, u32 data)
702 {
703         struct pci_dev *pdev = icm->upstream_port;
704         int vnd_cap = icm->vnd_cap;
705         u32 cmd;
706
707         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
708
709         cmd = index;
710         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
711         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
712         cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
713         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
714
715         return pci2cio_wait_completion(icm, 5000);
716 }
717
718 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
719 {
720         struct icm *icm = tb_priv(tb);
721         u32 val;
722
723         if (!icm->upstream_port)
724                 return -ENODEV;
725
726         /* Put ARC to wait for CIO reset event to happen */
727         val = ioread32(nhi->iobase + REG_FW_STS);
728         val |= REG_FW_STS_CIO_RESET_REQ;
729         iowrite32(val, nhi->iobase + REG_FW_STS);
730
731         /* Re-start ARC */
732         val = ioread32(nhi->iobase + REG_FW_STS);
733         val |= REG_FW_STS_ICM_EN_INVERT;
734         val |= REG_FW_STS_ICM_EN_CPU;
735         iowrite32(val, nhi->iobase + REG_FW_STS);
736
737         /* Trigger CIO reset now */
738         return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
739 }
740
741 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
742 {
743         unsigned int retries = 10;
744         int ret;
745         u32 val;
746
747         /* Check if the ICM firmware is already running */
748         val = ioread32(nhi->iobase + REG_FW_STS);
749         if (val & REG_FW_STS_ICM_EN)
750                 return 0;
751
752         dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
753
754         ret = icm_firmware_reset(tb, nhi);
755         if (ret)
756                 return ret;
757
758         /* Wait until the ICM firmware tells us it is up and running */
759         do {
760                 /* Check that the ICM firmware is running */
761                 val = ioread32(nhi->iobase + REG_FW_STS);
762                 if (val & REG_FW_STS_NVM_AUTH_DONE)
763                         return 0;
764
765                 msleep(300);
766         } while (--retries);
767
768         return -ETIMEDOUT;
769 }
770
771 static int icm_reset_phy_port(struct tb *tb, int phy_port)
772 {
773         struct icm *icm = tb_priv(tb);
774         u32 state0, state1;
775         int port0, port1;
776         u32 val0, val1;
777         int ret;
778
779         if (!icm->upstream_port)
780                 return 0;
781
782         if (phy_port) {
783                 port0 = 3;
784                 port1 = 4;
785         } else {
786                 port0 = 1;
787                 port1 = 2;
788         }
789
790         /*
791          * Read link status of both null ports belonging to a single
792          * physical port.
793          */
794         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
795         if (ret)
796                 return ret;
797         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
798         if (ret)
799                 return ret;
800
801         state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
802         state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
803         state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
804         state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
805
806         /* If they are both up we need to reset them now */
807         if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
808                 return 0;
809
810         val0 |= PHY_PORT_CS1_LINK_DISABLE;
811         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
812         if (ret)
813                 return ret;
814
815         val1 |= PHY_PORT_CS1_LINK_DISABLE;
816         ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
817         if (ret)
818                 return ret;
819
820         /* Wait a bit and then re-enable both ports */
821         usleep_range(10, 100);
822
823         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
824         if (ret)
825                 return ret;
826         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
827         if (ret)
828                 return ret;
829
830         val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
831         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
832         if (ret)
833                 return ret;
834
835         val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
836         return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
837 }
838
839 static int icm_firmware_init(struct tb *tb)
840 {
841         struct icm *icm = tb_priv(tb);
842         struct tb_nhi *nhi = tb->nhi;
843         int ret;
844
845         ret = icm_firmware_start(tb, nhi);
846         if (ret) {
847                 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
848                 return ret;
849         }
850
851         if (icm->get_mode) {
852                 ret = icm->get_mode(tb);
853
854                 switch (ret) {
855                 case NHI_FW_SAFE_MODE:
856                         icm->safe_mode = true;
857                         break;
858
859                 case NHI_FW_CM_MODE:
860                         /* Ask ICM to accept all Thunderbolt devices */
861                         nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
862                         break;
863
864                 default:
865                         if (ret < 0)
866                                 return ret;
867
868                         tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
869                         return -ENODEV;
870                 }
871         }
872
873         /*
874          * Reset both physical ports if there is anything connected to
875          * them already.
876          */
877         ret = icm_reset_phy_port(tb, 0);
878         if (ret)
879                 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
880         ret = icm_reset_phy_port(tb, 1);
881         if (ret)
882                 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
883
884         return 0;
885 }
886
887 static int icm_driver_ready(struct tb *tb)
888 {
889         struct icm *icm = tb_priv(tb);
890         int ret;
891
892         ret = icm_firmware_init(tb);
893         if (ret)
894                 return ret;
895
896         if (icm->safe_mode) {
897                 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
898                 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
899                 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
900                 return 0;
901         }
902
903         return __icm_driver_ready(tb, &tb->security_level);
904 }
905
906 static int icm_suspend(struct tb *tb)
907 {
908         int ret;
909
910         ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
911         if (ret)
912                 tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
913                         ret, __func__);
914
915         return 0;
916 }
917
918 /*
919  * Mark all switches (except root switch) below this one unplugged. ICM
920  * firmware will send us an updated list of switches after we have send
921  * it driver ready command. If a switch is not in that list it will be
922  * removed when we perform rescan.
923  */
924 static void icm_unplug_children(struct tb_switch *sw)
925 {
926         unsigned int i;
927
928         if (tb_route(sw))
929                 sw->is_unplugged = true;
930
931         for (i = 1; i <= sw->config.max_port_number; i++) {
932                 struct tb_port *port = &sw->ports[i];
933
934                 if (tb_is_upstream_port(port))
935                         continue;
936                 if (!port->remote)
937                         continue;
938
939                 icm_unplug_children(port->remote->sw);
940         }
941 }
942
943 static void icm_free_unplugged_children(struct tb_switch *sw)
944 {
945         unsigned int i;
946
947         for (i = 1; i <= sw->config.max_port_number; i++) {
948                 struct tb_port *port = &sw->ports[i];
949
950                 if (tb_is_upstream_port(port))
951                         continue;
952                 if (!port->remote)
953                         continue;
954
955                 if (port->remote->sw->is_unplugged) {
956                         tb_switch_remove(port->remote->sw);
957                         port->remote = NULL;
958                 } else {
959                         icm_free_unplugged_children(port->remote->sw);
960                 }
961         }
962 }
963
964 static void icm_rescan_work(struct work_struct *work)
965 {
966         struct icm *icm = container_of(work, struct icm, rescan_work.work);
967         struct tb *tb = icm_to_tb(icm);
968
969         mutex_lock(&tb->lock);
970         if (tb->root_switch)
971                 icm_free_unplugged_children(tb->root_switch);
972         mutex_unlock(&tb->lock);
973 }
974
975 static void icm_complete(struct tb *tb)
976 {
977         struct icm *icm = tb_priv(tb);
978
979         if (tb->nhi->going_away)
980                 return;
981
982         icm_unplug_children(tb->root_switch);
983
984         /*
985          * Now all existing children should be resumed, start events
986          * from ICM to get updated status.
987          */
988         __icm_driver_ready(tb, NULL);
989
990         /*
991          * We do not get notifications of devices that have been
992          * unplugged during suspend so schedule rescan to clean them up
993          * if any.
994          */
995         queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
996 }
997
998 static int icm_start(struct tb *tb)
999 {
1000         struct icm *icm = tb_priv(tb);
1001         int ret;
1002
1003         if (icm->safe_mode)
1004                 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1005         else
1006                 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1007         if (!tb->root_switch)
1008                 return -ENODEV;
1009
1010         /*
1011          * NVM upgrade has not been tested on Apple systems and they
1012          * don't provide images publicly either. To be on the safe side
1013          * prevent root switch NVM upgrade on Macs for now.
1014          */
1015         tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1016
1017         ret = tb_switch_add(tb->root_switch);
1018         if (ret)
1019                 tb_switch_put(tb->root_switch);
1020
1021         return ret;
1022 }
1023
1024 static void icm_stop(struct tb *tb)
1025 {
1026         struct icm *icm = tb_priv(tb);
1027
1028         cancel_delayed_work(&icm->rescan_work);
1029         tb_switch_remove(tb->root_switch);
1030         tb->root_switch = NULL;
1031         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1032 }
1033
1034 static int icm_disconnect_pcie_paths(struct tb *tb)
1035 {
1036         return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1037 }
1038
1039 /* Falcon Ridge and Alpine Ridge */
1040 static const struct tb_cm_ops icm_fr_ops = {
1041         .driver_ready = icm_driver_ready,
1042         .start = icm_start,
1043         .stop = icm_stop,
1044         .suspend = icm_suspend,
1045         .complete = icm_complete,
1046         .handle_event = icm_handle_event,
1047         .approve_switch = icm_fr_approve_switch,
1048         .add_switch_key = icm_fr_add_switch_key,
1049         .challenge_switch_key = icm_fr_challenge_switch_key,
1050         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1051 };
1052
1053 struct tb *icm_probe(struct tb_nhi *nhi)
1054 {
1055         struct icm *icm;
1056         struct tb *tb;
1057
1058         tb = tb_domain_alloc(nhi, sizeof(struct icm));
1059         if (!tb)
1060                 return NULL;
1061
1062         icm = tb_priv(tb);
1063         INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1064         mutex_init(&icm->request_lock);
1065
1066         switch (nhi->pdev->device) {
1067         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1068         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1069                 icm->is_supported = icm_fr_is_supported;
1070                 icm->get_route = icm_fr_get_route;
1071                 icm->device_connected = icm_fr_device_connected;
1072                 icm->device_disconnected = icm_fr_device_disconnected;
1073                 tb->cm_ops = &icm_fr_ops;
1074                 break;
1075
1076         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1077         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1078         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1079         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1080         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1081                 icm->is_supported = icm_ar_is_supported;
1082                 icm->get_mode = icm_ar_get_mode;
1083                 icm->get_route = icm_ar_get_route;
1084                 icm->device_connected = icm_fr_device_connected;
1085                 icm->device_disconnected = icm_fr_device_disconnected;
1086                 tb->cm_ops = &icm_fr_ops;
1087                 break;
1088         }
1089
1090         if (!icm->is_supported || !icm->is_supported(tb)) {
1091                 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1092                 tb_domain_put(tb);
1093                 return NULL;
1094         }
1095
1096         return tb;
1097 }