perf stat: Fix out of bounds CPU map access when handling armv8_pmu events
[linux-2.6-microblaze.git] / drivers / thunderbolt / icm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Internal Thunderbolt Connection Manager. This is a firmware running on
4  * the Thunderbolt host controller performing most of the low-level
5  * handling.
6  *
7  * Copyright (C) 2017, Intel Corporation
8  * Authors: Michael Jamet <michael.jamet@intel.com>
9  *          Mika Westerberg <mika.westerberg@linux.intel.com>
10  */
11
12 #include <linux/delay.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/platform_data/x86/apple.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/workqueue.h>
21
22 #include "ctl.h"
23 #include "nhi_regs.h"
24 #include "tb.h"
25
26 #define PCIE2CIO_CMD                    0x30
27 #define PCIE2CIO_CMD_TIMEOUT            BIT(31)
28 #define PCIE2CIO_CMD_START              BIT(30)
29 #define PCIE2CIO_CMD_WRITE              BIT(21)
30 #define PCIE2CIO_CMD_CS_MASK            GENMASK(20, 19)
31 #define PCIE2CIO_CMD_CS_SHIFT           19
32 #define PCIE2CIO_CMD_PORT_MASK          GENMASK(18, 13)
33 #define PCIE2CIO_CMD_PORT_SHIFT         13
34
35 #define PCIE2CIO_WRDATA                 0x34
36 #define PCIE2CIO_RDDATA                 0x38
37
38 #define PHY_PORT_CS1                    0x37
39 #define PHY_PORT_CS1_LINK_DISABLE       BIT(14)
40 #define PHY_PORT_CS1_LINK_STATE_MASK    GENMASK(29, 26)
41 #define PHY_PORT_CS1_LINK_STATE_SHIFT   26
42
43 #define ICM_TIMEOUT                     5000    /* ms */
44 #define ICM_APPROVE_TIMEOUT             10000   /* ms */
45 #define ICM_MAX_LINK                    4
46
47 static bool start_icm;
48 module_param(start_icm, bool, 0444);
49 MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
50
51 /**
52  * struct icm - Internal connection manager private data
53  * @request_lock: Makes sure only one message is send to ICM at time
54  * @rescan_work: Work used to rescan the surviving switches after resume
55  * @upstream_port: Pointer to the PCIe upstream port this host
56  *                 controller is connected. This is only set for systems
57  *                 where ICM needs to be started manually
58  * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
59  *           (only set when @upstream_port is not %NULL)
60  * @safe_mode: ICM is in safe mode
61  * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
62  * @rpm: Does the controller support runtime PM (RTD3)
63  * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
64  * @veto: Is RTD3 veto in effect
65  * @is_supported: Checks if we can support ICM on this controller
66  * @cio_reset: Trigger CIO reset
67  * @get_mode: Read and return the ICM firmware mode (optional)
68  * @get_route: Find a route string for given switch
69  * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
70  * @driver_ready: Send driver ready message to ICM
71  * @set_uuid: Set UUID for the root switch (optional)
72  * @device_connected: Handle device connected ICM message
73  * @device_disconnected: Handle device disconnected ICM message
74  * @xdomain_connected - Handle XDomain connected ICM message
75  * @xdomain_disconnected - Handle XDomain disconnected ICM message
76  * @rtd3_veto: Handle RTD3 veto notification ICM message
77  */
78 struct icm {
79         struct mutex request_lock;
80         struct delayed_work rescan_work;
81         struct pci_dev *upstream_port;
82         size_t max_boot_acl;
83         int vnd_cap;
84         bool safe_mode;
85         bool rpm;
86         bool can_upgrade_nvm;
87         bool veto;
88         bool (*is_supported)(struct tb *tb);
89         int (*cio_reset)(struct tb *tb);
90         int (*get_mode)(struct tb *tb);
91         int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
92         void (*save_devices)(struct tb *tb);
93         int (*driver_ready)(struct tb *tb,
94                             enum tb_security_level *security_level,
95                             size_t *nboot_acl, bool *rpm);
96         void (*set_uuid)(struct tb *tb);
97         void (*device_connected)(struct tb *tb,
98                                  const struct icm_pkg_header *hdr);
99         void (*device_disconnected)(struct tb *tb,
100                                     const struct icm_pkg_header *hdr);
101         void (*xdomain_connected)(struct tb *tb,
102                                   const struct icm_pkg_header *hdr);
103         void (*xdomain_disconnected)(struct tb *tb,
104                                      const struct icm_pkg_header *hdr);
105         void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
106 };
107
108 struct icm_notification {
109         struct work_struct work;
110         struct icm_pkg_header *pkg;
111         struct tb *tb;
112 };
113
114 struct ep_name_entry {
115         u8 len;
116         u8 type;
117         u8 data[];
118 };
119
120 #define EP_NAME_INTEL_VSS       0x10
121
122 /* Intel Vendor specific structure */
123 struct intel_vss {
124         u16 vendor;
125         u16 model;
126         u8 mc;
127         u8 flags;
128         u16 pci_devid;
129         u32 nvm_version;
130 };
131
132 #define INTEL_VSS_FLAGS_RTD3    BIT(0)
133
134 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
135 {
136         const void *end = ep_name + size;
137
138         while (ep_name < end) {
139                 const struct ep_name_entry *ep = ep_name;
140
141                 if (!ep->len)
142                         break;
143                 if (ep_name + ep->len > end)
144                         break;
145
146                 if (ep->type == EP_NAME_INTEL_VSS)
147                         return (const struct intel_vss *)ep->data;
148
149                 ep_name += ep->len;
150         }
151
152         return NULL;
153 }
154
155 static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
156 {
157         const struct intel_vss *vss;
158
159         vss = parse_intel_vss(ep_name, size);
160         if (vss)
161                 return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
162
163         return false;
164 }
165
166 static inline struct tb *icm_to_tb(struct icm *icm)
167 {
168         return ((void *)icm - sizeof(struct tb));
169 }
170
171 static inline u8 phy_port_from_route(u64 route, u8 depth)
172 {
173         u8 link;
174
175         link = depth ? route >> ((depth - 1) * 8) : route;
176         return tb_phy_port_from_link(link);
177 }
178
179 static inline u8 dual_link_from_link(u8 link)
180 {
181         return link ? ((link - 1) ^ 0x01) + 1 : 0;
182 }
183
184 static inline u64 get_route(u32 route_hi, u32 route_lo)
185 {
186         return (u64)route_hi << 32 | route_lo;
187 }
188
189 static inline u64 get_parent_route(u64 route)
190 {
191         int depth = tb_route_length(route);
192         return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
193 }
194
195 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
196 {
197         unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
198         u32 cmd;
199
200         do {
201                 pci_read_config_dword(icm->upstream_port,
202                                       icm->vnd_cap + PCIE2CIO_CMD, &cmd);
203                 if (!(cmd & PCIE2CIO_CMD_START)) {
204                         if (cmd & PCIE2CIO_CMD_TIMEOUT)
205                                 break;
206                         return 0;
207                 }
208
209                 msleep(50);
210         } while (time_before(jiffies, end));
211
212         return -ETIMEDOUT;
213 }
214
215 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
216                          unsigned int port, unsigned int index, u32 *data)
217 {
218         struct pci_dev *pdev = icm->upstream_port;
219         int ret, vnd_cap = icm->vnd_cap;
220         u32 cmd;
221
222         cmd = index;
223         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
224         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
225         cmd |= PCIE2CIO_CMD_START;
226         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
227
228         ret = pci2cio_wait_completion(icm, 5000);
229         if (ret)
230                 return ret;
231
232         pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
233         return 0;
234 }
235
236 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
237                           unsigned int port, unsigned int index, u32 data)
238 {
239         struct pci_dev *pdev = icm->upstream_port;
240         int vnd_cap = icm->vnd_cap;
241         u32 cmd;
242
243         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
244
245         cmd = index;
246         cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
247         cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
248         cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
249         pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
250
251         return pci2cio_wait_completion(icm, 5000);
252 }
253
254 static bool icm_match(const struct tb_cfg_request *req,
255                       const struct ctl_pkg *pkg)
256 {
257         const struct icm_pkg_header *res_hdr = pkg->buffer;
258         const struct icm_pkg_header *req_hdr = req->request;
259
260         if (pkg->frame.eof != req->response_type)
261                 return false;
262         if (res_hdr->code != req_hdr->code)
263                 return false;
264
265         return true;
266 }
267
268 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
269 {
270         const struct icm_pkg_header *hdr = pkg->buffer;
271
272         if (hdr->packet_id < req->npackets) {
273                 size_t offset = hdr->packet_id * req->response_size;
274
275                 memcpy(req->response + offset, pkg->buffer, req->response_size);
276         }
277
278         return hdr->packet_id == hdr->total_packets - 1;
279 }
280
281 static int icm_request(struct tb *tb, const void *request, size_t request_size,
282                        void *response, size_t response_size, size_t npackets,
283                        unsigned int timeout_msec)
284 {
285         struct icm *icm = tb_priv(tb);
286         int retries = 3;
287
288         do {
289                 struct tb_cfg_request *req;
290                 struct tb_cfg_result res;
291
292                 req = tb_cfg_request_alloc();
293                 if (!req)
294                         return -ENOMEM;
295
296                 req->match = icm_match;
297                 req->copy = icm_copy;
298                 req->request = request;
299                 req->request_size = request_size;
300                 req->request_type = TB_CFG_PKG_ICM_CMD;
301                 req->response = response;
302                 req->npackets = npackets;
303                 req->response_size = response_size;
304                 req->response_type = TB_CFG_PKG_ICM_RESP;
305
306                 mutex_lock(&icm->request_lock);
307                 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
308                 mutex_unlock(&icm->request_lock);
309
310                 tb_cfg_request_put(req);
311
312                 if (res.err != -ETIMEDOUT)
313                         return res.err == 1 ? -EIO : res.err;
314
315                 usleep_range(20, 50);
316         } while (retries--);
317
318         return -ETIMEDOUT;
319 }
320
321 /*
322  * If rescan is queued to run (we are resuming), postpone it to give the
323  * firmware some more time to send device connected notifications for next
324  * devices in the chain.
325  */
326 static void icm_postpone_rescan(struct tb *tb)
327 {
328         struct icm *icm = tb_priv(tb);
329
330         if (delayed_work_pending(&icm->rescan_work))
331                 mod_delayed_work(tb->wq, &icm->rescan_work,
332                                  msecs_to_jiffies(500));
333 }
334
335 static void icm_veto_begin(struct tb *tb)
336 {
337         struct icm *icm = tb_priv(tb);
338
339         if (!icm->veto) {
340                 icm->veto = true;
341                 /* Keep the domain powered while veto is in effect */
342                 pm_runtime_get(&tb->dev);
343         }
344 }
345
346 static void icm_veto_end(struct tb *tb)
347 {
348         struct icm *icm = tb_priv(tb);
349
350         if (icm->veto) {
351                 icm->veto = false;
352                 /* Allow the domain suspend now */
353                 pm_runtime_mark_last_busy(&tb->dev);
354                 pm_runtime_put_autosuspend(&tb->dev);
355         }
356 }
357
358 static bool icm_firmware_running(const struct tb_nhi *nhi)
359 {
360         u32 val;
361
362         val = ioread32(nhi->iobase + REG_FW_STS);
363         return !!(val & REG_FW_STS_ICM_EN);
364 }
365
366 static bool icm_fr_is_supported(struct tb *tb)
367 {
368         return !x86_apple_machine;
369 }
370
371 static inline int icm_fr_get_switch_index(u32 port)
372 {
373         int index;
374
375         if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
376                 return 0;
377
378         index = port >> ICM_PORT_INDEX_SHIFT;
379         return index != 0xff ? index : 0;
380 }
381
382 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
383 {
384         struct icm_fr_pkg_get_topology_response *switches, *sw;
385         struct icm_fr_pkg_get_topology request = {
386                 .hdr = { .code = ICM_GET_TOPOLOGY },
387         };
388         size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
389         int ret, index;
390         u8 i;
391
392         switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
393         if (!switches)
394                 return -ENOMEM;
395
396         ret = icm_request(tb, &request, sizeof(request), switches,
397                           sizeof(*switches), npackets, ICM_TIMEOUT);
398         if (ret)
399                 goto err_free;
400
401         sw = &switches[0];
402         index = icm_fr_get_switch_index(sw->ports[link]);
403         if (!index) {
404                 ret = -ENODEV;
405                 goto err_free;
406         }
407
408         sw = &switches[index];
409         for (i = 1; i < depth; i++) {
410                 unsigned int j;
411
412                 if (!(sw->first_data & ICM_SWITCH_USED)) {
413                         ret = -ENODEV;
414                         goto err_free;
415                 }
416
417                 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
418                         index = icm_fr_get_switch_index(sw->ports[j]);
419                         if (index > sw->switch_index) {
420                                 sw = &switches[index];
421                                 break;
422                         }
423                 }
424         }
425
426         *route = get_route(sw->route_hi, sw->route_lo);
427
428 err_free:
429         kfree(switches);
430         return ret;
431 }
432
433 static void icm_fr_save_devices(struct tb *tb)
434 {
435         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
436 }
437
438 static int
439 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
440                     size_t *nboot_acl, bool *rpm)
441 {
442         struct icm_fr_pkg_driver_ready_response reply;
443         struct icm_pkg_driver_ready request = {
444                 .hdr.code = ICM_DRIVER_READY,
445         };
446         int ret;
447
448         memset(&reply, 0, sizeof(reply));
449         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
450                           1, ICM_TIMEOUT);
451         if (ret)
452                 return ret;
453
454         if (security_level)
455                 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
456
457         return 0;
458 }
459
460 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
461 {
462         struct icm_fr_pkg_approve_device request;
463         struct icm_fr_pkg_approve_device reply;
464         int ret;
465
466         memset(&request, 0, sizeof(request));
467         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
468         request.hdr.code = ICM_APPROVE_DEVICE;
469         request.connection_id = sw->connection_id;
470         request.connection_key = sw->connection_key;
471
472         memset(&reply, 0, sizeof(reply));
473         /* Use larger timeout as establishing tunnels can take some time */
474         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
475                           1, ICM_APPROVE_TIMEOUT);
476         if (ret)
477                 return ret;
478
479         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
480                 tb_warn(tb, "PCIe tunnel creation failed\n");
481                 return -EIO;
482         }
483
484         return 0;
485 }
486
487 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
488 {
489         struct icm_fr_pkg_add_device_key request;
490         struct icm_fr_pkg_add_device_key_response reply;
491         int ret;
492
493         memset(&request, 0, sizeof(request));
494         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
495         request.hdr.code = ICM_ADD_DEVICE_KEY;
496         request.connection_id = sw->connection_id;
497         request.connection_key = sw->connection_key;
498         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
499
500         memset(&reply, 0, sizeof(reply));
501         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
502                           1, ICM_TIMEOUT);
503         if (ret)
504                 return ret;
505
506         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
507                 tb_warn(tb, "Adding key to switch failed\n");
508                 return -EIO;
509         }
510
511         return 0;
512 }
513
514 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
515                                        const u8 *challenge, u8 *response)
516 {
517         struct icm_fr_pkg_challenge_device request;
518         struct icm_fr_pkg_challenge_device_response reply;
519         int ret;
520
521         memset(&request, 0, sizeof(request));
522         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
523         request.hdr.code = ICM_CHALLENGE_DEVICE;
524         request.connection_id = sw->connection_id;
525         request.connection_key = sw->connection_key;
526         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
527
528         memset(&reply, 0, sizeof(reply));
529         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
530                           1, ICM_TIMEOUT);
531         if (ret)
532                 return ret;
533
534         if (reply.hdr.flags & ICM_FLAGS_ERROR)
535                 return -EKEYREJECTED;
536         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
537                 return -ENOKEY;
538
539         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
540
541         return 0;
542 }
543
544 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
545 {
546         struct icm_fr_pkg_approve_xdomain_response reply;
547         struct icm_fr_pkg_approve_xdomain request;
548         int ret;
549
550         memset(&request, 0, sizeof(request));
551         request.hdr.code = ICM_APPROVE_XDOMAIN;
552         request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
553         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
554
555         request.transmit_path = xd->transmit_path;
556         request.transmit_ring = xd->transmit_ring;
557         request.receive_path = xd->receive_path;
558         request.receive_ring = xd->receive_ring;
559
560         memset(&reply, 0, sizeof(reply));
561         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
562                           1, ICM_TIMEOUT);
563         if (ret)
564                 return ret;
565
566         if (reply.hdr.flags & ICM_FLAGS_ERROR)
567                 return -EIO;
568
569         return 0;
570 }
571
572 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
573 {
574         u8 phy_port;
575         u8 cmd;
576
577         phy_port = tb_phy_port_from_link(xd->link);
578         if (phy_port == 0)
579                 cmd = NHI_MAILBOX_DISCONNECT_PA;
580         else
581                 cmd = NHI_MAILBOX_DISCONNECT_PB;
582
583         nhi_mailbox_cmd(tb->nhi, cmd, 1);
584         usleep_range(10, 50);
585         nhi_mailbox_cmd(tb->nhi, cmd, 2);
586         return 0;
587 }
588
589 static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
590                                       const uuid_t *uuid)
591 {
592         struct tb *tb = parent_sw->tb;
593         struct tb_switch *sw;
594
595         sw = tb_switch_alloc(tb, &parent_sw->dev, route);
596         if (IS_ERR(sw)) {
597                 tb_warn(tb, "failed to allocate switch at %llx\n", route);
598                 return sw;
599         }
600
601         sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
602         if (!sw->uuid) {
603                 tb_switch_put(sw);
604                 return ERR_PTR(-ENOMEM);
605         }
606
607         init_completion(&sw->rpm_complete);
608         return sw;
609 }
610
611 static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
612 {
613         u64 route = tb_route(sw);
614         int ret;
615
616         /* Link the two switches now */
617         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
618         tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
619
620         ret = tb_switch_add(sw);
621         if (ret)
622                 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
623
624         return ret;
625 }
626
627 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
628                           u64 route, u8 connection_id, u8 connection_key,
629                           u8 link, u8 depth, bool boot)
630 {
631         /* Disconnect from parent */
632         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
633         /* Re-connect via updated port*/
634         tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
635
636         /* Update with the new addressing information */
637         sw->config.route_hi = upper_32_bits(route);
638         sw->config.route_lo = lower_32_bits(route);
639         sw->connection_id = connection_id;
640         sw->connection_key = connection_key;
641         sw->link = link;
642         sw->depth = depth;
643         sw->boot = boot;
644
645         /* This switch still exists */
646         sw->is_unplugged = false;
647
648         /* Runtime resume is now complete */
649         complete(&sw->rpm_complete);
650 }
651
652 static void remove_switch(struct tb_switch *sw)
653 {
654         struct tb_switch *parent_sw;
655
656         parent_sw = tb_to_switch(sw->dev.parent);
657         tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
658         tb_switch_remove(sw);
659 }
660
661 static void add_xdomain(struct tb_switch *sw, u64 route,
662                         const uuid_t *local_uuid, const uuid_t *remote_uuid,
663                         u8 link, u8 depth)
664 {
665         struct tb_xdomain *xd;
666
667         pm_runtime_get_sync(&sw->dev);
668
669         xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
670         if (!xd)
671                 goto out;
672
673         xd->link = link;
674         xd->depth = depth;
675
676         tb_port_at(route, sw)->xdomain = xd;
677
678         tb_xdomain_add(xd);
679
680 out:
681         pm_runtime_mark_last_busy(&sw->dev);
682         pm_runtime_put_autosuspend(&sw->dev);
683 }
684
685 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
686 {
687         xd->link = link;
688         xd->route = route;
689         xd->is_unplugged = false;
690 }
691
692 static void remove_xdomain(struct tb_xdomain *xd)
693 {
694         struct tb_switch *sw;
695
696         sw = tb_to_switch(xd->dev.parent);
697         tb_port_at(xd->route, sw)->xdomain = NULL;
698         tb_xdomain_remove(xd);
699 }
700
701 static void
702 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
703 {
704         const struct icm_fr_event_device_connected *pkg =
705                 (const struct icm_fr_event_device_connected *)hdr;
706         enum tb_security_level security_level;
707         struct tb_switch *sw, *parent_sw;
708         bool boot, dual_lane, speed_gen3;
709         struct icm *icm = tb_priv(tb);
710         bool authorized = false;
711         struct tb_xdomain *xd;
712         u8 link, depth;
713         u64 route;
714         int ret;
715
716         icm_postpone_rescan(tb);
717
718         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
719         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
720                 ICM_LINK_INFO_DEPTH_SHIFT;
721         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
722         security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
723                          ICM_FLAGS_SLEVEL_SHIFT;
724         boot = pkg->link_info & ICM_LINK_INFO_BOOT;
725         dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
726         speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
727
728         if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
729                 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
730                         link, depth);
731                 return;
732         }
733
734         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
735         if (sw) {
736                 u8 phy_port, sw_phy_port;
737
738                 parent_sw = tb_to_switch(sw->dev.parent);
739                 sw_phy_port = tb_phy_port_from_link(sw->link);
740                 phy_port = tb_phy_port_from_link(link);
741
742                 /*
743                  * On resume ICM will send us connected events for the
744                  * devices that still are present. However, that
745                  * information might have changed for example by the
746                  * fact that a switch on a dual-link connection might
747                  * have been enumerated using the other link now. Make
748                  * sure our book keeping matches that.
749                  */
750                 if (sw->depth == depth && sw_phy_port == phy_port &&
751                     !!sw->authorized == authorized) {
752                         /*
753                          * It was enumerated through another link so update
754                          * route string accordingly.
755                          */
756                         if (sw->link != link) {
757                                 ret = icm->get_route(tb, link, depth, &route);
758                                 if (ret) {
759                                         tb_err(tb, "failed to update route string for switch at %u.%u\n",
760                                                link, depth);
761                                         tb_switch_put(sw);
762                                         return;
763                                 }
764                         } else {
765                                 route = tb_route(sw);
766                         }
767
768                         update_switch(parent_sw, sw, route, pkg->connection_id,
769                                       pkg->connection_key, link, depth, boot);
770                         tb_switch_put(sw);
771                         return;
772                 }
773
774                 /*
775                  * User connected the same switch to another physical
776                  * port or to another part of the topology. Remove the
777                  * existing switch now before adding the new one.
778                  */
779                 remove_switch(sw);
780                 tb_switch_put(sw);
781         }
782
783         /*
784          * If the switch was not found by UUID, look for a switch on
785          * same physical port (taking possible link aggregation into
786          * account) and depth. If we found one it is definitely a stale
787          * one so remove it first.
788          */
789         sw = tb_switch_find_by_link_depth(tb, link, depth);
790         if (!sw) {
791                 u8 dual_link;
792
793                 dual_link = dual_link_from_link(link);
794                 if (dual_link)
795                         sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
796         }
797         if (sw) {
798                 remove_switch(sw);
799                 tb_switch_put(sw);
800         }
801
802         /* Remove existing XDomain connection if found */
803         xd = tb_xdomain_find_by_link_depth(tb, link, depth);
804         if (xd) {
805                 remove_xdomain(xd);
806                 tb_xdomain_put(xd);
807         }
808
809         parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
810         if (!parent_sw) {
811                 tb_err(tb, "failed to find parent switch for %u.%u\n",
812                        link, depth);
813                 return;
814         }
815
816         ret = icm->get_route(tb, link, depth, &route);
817         if (ret) {
818                 tb_err(tb, "failed to find route string for switch at %u.%u\n",
819                        link, depth);
820                 tb_switch_put(parent_sw);
821                 return;
822         }
823
824         pm_runtime_get_sync(&parent_sw->dev);
825
826         sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
827         if (!IS_ERR(sw)) {
828                 sw->connection_id = pkg->connection_id;
829                 sw->connection_key = pkg->connection_key;
830                 sw->link = link;
831                 sw->depth = depth;
832                 sw->authorized = authorized;
833                 sw->security_level = security_level;
834                 sw->boot = boot;
835                 sw->link_speed = speed_gen3 ? 20 : 10;
836                 sw->link_width = dual_lane ? 2 : 1;
837                 sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
838
839                 if (add_switch(parent_sw, sw))
840                         tb_switch_put(sw);
841         }
842
843         pm_runtime_mark_last_busy(&parent_sw->dev);
844         pm_runtime_put_autosuspend(&parent_sw->dev);
845
846         tb_switch_put(parent_sw);
847 }
848
849 static void
850 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
851 {
852         const struct icm_fr_event_device_disconnected *pkg =
853                 (const struct icm_fr_event_device_disconnected *)hdr;
854         struct tb_switch *sw;
855         u8 link, depth;
856
857         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
858         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
859                 ICM_LINK_INFO_DEPTH_SHIFT;
860
861         if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
862                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
863                 return;
864         }
865
866         sw = tb_switch_find_by_link_depth(tb, link, depth);
867         if (!sw) {
868                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
869                         depth);
870                 return;
871         }
872
873         remove_switch(sw);
874         tb_switch_put(sw);
875 }
876
877 static void
878 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
879 {
880         const struct icm_fr_event_xdomain_connected *pkg =
881                 (const struct icm_fr_event_xdomain_connected *)hdr;
882         struct tb_xdomain *xd;
883         struct tb_switch *sw;
884         u8 link, depth;
885         u64 route;
886
887         link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
888         depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
889                 ICM_LINK_INFO_DEPTH_SHIFT;
890
891         if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
892                 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
893                 return;
894         }
895
896         route = get_route(pkg->local_route_hi, pkg->local_route_lo);
897
898         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
899         if (xd) {
900                 u8 xd_phy_port, phy_port;
901
902                 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
903                 phy_port = phy_port_from_route(route, depth);
904
905                 if (xd->depth == depth && xd_phy_port == phy_port) {
906                         update_xdomain(xd, route, link);
907                         tb_xdomain_put(xd);
908                         return;
909                 }
910
911                 /*
912                  * If we find an existing XDomain connection remove it
913                  * now. We need to go through login handshake and
914                  * everything anyway to be able to re-establish the
915                  * connection.
916                  */
917                 remove_xdomain(xd);
918                 tb_xdomain_put(xd);
919         }
920
921         /*
922          * Look if there already exists an XDomain in the same place
923          * than the new one and in that case remove it because it is
924          * most likely another host that got disconnected.
925          */
926         xd = tb_xdomain_find_by_link_depth(tb, link, depth);
927         if (!xd) {
928                 u8 dual_link;
929
930                 dual_link = dual_link_from_link(link);
931                 if (dual_link)
932                         xd = tb_xdomain_find_by_link_depth(tb, dual_link,
933                                                            depth);
934         }
935         if (xd) {
936                 remove_xdomain(xd);
937                 tb_xdomain_put(xd);
938         }
939
940         /*
941          * If the user disconnected a switch during suspend and
942          * connected another host to the same port, remove the switch
943          * first.
944          */
945         sw = tb_switch_find_by_route(tb, route);
946         if (sw) {
947                 remove_switch(sw);
948                 tb_switch_put(sw);
949         }
950
951         sw = tb_switch_find_by_link_depth(tb, link, depth);
952         if (!sw) {
953                 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
954                         depth);
955                 return;
956         }
957
958         add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
959                     depth);
960         tb_switch_put(sw);
961 }
962
963 static void
964 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
965 {
966         const struct icm_fr_event_xdomain_disconnected *pkg =
967                 (const struct icm_fr_event_xdomain_disconnected *)hdr;
968         struct tb_xdomain *xd;
969
970         /*
971          * If the connection is through one or multiple devices, the
972          * XDomain device is removed along with them so it is fine if we
973          * cannot find it here.
974          */
975         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
976         if (xd) {
977                 remove_xdomain(xd);
978                 tb_xdomain_put(xd);
979         }
980 }
981
982 static int icm_tr_cio_reset(struct tb *tb)
983 {
984         return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
985 }
986
987 static int
988 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
989                     size_t *nboot_acl, bool *rpm)
990 {
991         struct icm_tr_pkg_driver_ready_response reply;
992         struct icm_pkg_driver_ready request = {
993                 .hdr.code = ICM_DRIVER_READY,
994         };
995         int ret;
996
997         memset(&reply, 0, sizeof(reply));
998         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
999                           1, 20000);
1000         if (ret)
1001                 return ret;
1002
1003         if (security_level)
1004                 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
1005         if (nboot_acl)
1006                 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
1007                                 ICM_TR_INFO_BOOT_ACL_SHIFT;
1008         if (rpm)
1009                 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
1010
1011         return 0;
1012 }
1013
1014 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
1015 {
1016         struct icm_tr_pkg_approve_device request;
1017         struct icm_tr_pkg_approve_device reply;
1018         int ret;
1019
1020         memset(&request, 0, sizeof(request));
1021         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1022         request.hdr.code = ICM_APPROVE_DEVICE;
1023         request.route_lo = sw->config.route_lo;
1024         request.route_hi = sw->config.route_hi;
1025         request.connection_id = sw->connection_id;
1026
1027         memset(&reply, 0, sizeof(reply));
1028         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1029                           1, ICM_APPROVE_TIMEOUT);
1030         if (ret)
1031                 return ret;
1032
1033         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1034                 tb_warn(tb, "PCIe tunnel creation failed\n");
1035                 return -EIO;
1036         }
1037
1038         return 0;
1039 }
1040
1041 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
1042 {
1043         struct icm_tr_pkg_add_device_key_response reply;
1044         struct icm_tr_pkg_add_device_key request;
1045         int ret;
1046
1047         memset(&request, 0, sizeof(request));
1048         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1049         request.hdr.code = ICM_ADD_DEVICE_KEY;
1050         request.route_lo = sw->config.route_lo;
1051         request.route_hi = sw->config.route_hi;
1052         request.connection_id = sw->connection_id;
1053         memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
1054
1055         memset(&reply, 0, sizeof(reply));
1056         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1057                           1, ICM_TIMEOUT);
1058         if (ret)
1059                 return ret;
1060
1061         if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1062                 tb_warn(tb, "Adding key to switch failed\n");
1063                 return -EIO;
1064         }
1065
1066         return 0;
1067 }
1068
1069 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
1070                                        const u8 *challenge, u8 *response)
1071 {
1072         struct icm_tr_pkg_challenge_device_response reply;
1073         struct icm_tr_pkg_challenge_device request;
1074         int ret;
1075
1076         memset(&request, 0, sizeof(request));
1077         memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1078         request.hdr.code = ICM_CHALLENGE_DEVICE;
1079         request.route_lo = sw->config.route_lo;
1080         request.route_hi = sw->config.route_hi;
1081         request.connection_id = sw->connection_id;
1082         memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
1083
1084         memset(&reply, 0, sizeof(reply));
1085         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1086                           1, ICM_TIMEOUT);
1087         if (ret)
1088                 return ret;
1089
1090         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1091                 return -EKEYREJECTED;
1092         if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
1093                 return -ENOKEY;
1094
1095         memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
1096
1097         return 0;
1098 }
1099
1100 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1101 {
1102         struct icm_tr_pkg_approve_xdomain_response reply;
1103         struct icm_tr_pkg_approve_xdomain request;
1104         int ret;
1105
1106         memset(&request, 0, sizeof(request));
1107         request.hdr.code = ICM_APPROVE_XDOMAIN;
1108         request.route_hi = upper_32_bits(xd->route);
1109         request.route_lo = lower_32_bits(xd->route);
1110         request.transmit_path = xd->transmit_path;
1111         request.transmit_ring = xd->transmit_ring;
1112         request.receive_path = xd->receive_path;
1113         request.receive_ring = xd->receive_ring;
1114         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1115
1116         memset(&reply, 0, sizeof(reply));
1117         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1118                           1, ICM_TIMEOUT);
1119         if (ret)
1120                 return ret;
1121
1122         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1123                 return -EIO;
1124
1125         return 0;
1126 }
1127
1128 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
1129                                     int stage)
1130 {
1131         struct icm_tr_pkg_disconnect_xdomain_response reply;
1132         struct icm_tr_pkg_disconnect_xdomain request;
1133         int ret;
1134
1135         memset(&request, 0, sizeof(request));
1136         request.hdr.code = ICM_DISCONNECT_XDOMAIN;
1137         request.stage = stage;
1138         request.route_hi = upper_32_bits(xd->route);
1139         request.route_lo = lower_32_bits(xd->route);
1140         memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1141
1142         memset(&reply, 0, sizeof(reply));
1143         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1144                           1, ICM_TIMEOUT);
1145         if (ret)
1146                 return ret;
1147
1148         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1149                 return -EIO;
1150
1151         return 0;
1152 }
1153
1154 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1155 {
1156         int ret;
1157
1158         ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1159         if (ret)
1160                 return ret;
1161
1162         usleep_range(10, 50);
1163         return icm_tr_xdomain_tear_down(tb, xd, 2);
1164 }
1165
1166 static void
1167 __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
1168                           bool force_rtd3)
1169 {
1170         const struct icm_tr_event_device_connected *pkg =
1171                 (const struct icm_tr_event_device_connected *)hdr;
1172         bool authorized, boot, dual_lane, speed_gen3;
1173         enum tb_security_level security_level;
1174         struct tb_switch *sw, *parent_sw;
1175         struct tb_xdomain *xd;
1176         u64 route;
1177
1178         icm_postpone_rescan(tb);
1179
1180         /*
1181          * Currently we don't use the QoS information coming with the
1182          * device connected message so simply just ignore that extra
1183          * packet for now.
1184          */
1185         if (pkg->hdr.packet_id)
1186                 return;
1187
1188         route = get_route(pkg->route_hi, pkg->route_lo);
1189         authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1190         security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1191                          ICM_FLAGS_SLEVEL_SHIFT;
1192         boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1193         dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
1194         speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
1195
1196         if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1197                 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1198                         route);
1199                 return;
1200         }
1201
1202         sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1203         if (sw) {
1204                 /* Update the switch if it is still in the same place */
1205                 if (tb_route(sw) == route && !!sw->authorized == authorized) {
1206                         parent_sw = tb_to_switch(sw->dev.parent);
1207                         update_switch(parent_sw, sw, route, pkg->connection_id,
1208                                       0, 0, 0, boot);
1209                         tb_switch_put(sw);
1210                         return;
1211                 }
1212
1213                 remove_switch(sw);
1214                 tb_switch_put(sw);
1215         }
1216
1217         /* Another switch with the same address */
1218         sw = tb_switch_find_by_route(tb, route);
1219         if (sw) {
1220                 remove_switch(sw);
1221                 tb_switch_put(sw);
1222         }
1223
1224         /* XDomain connection with the same address */
1225         xd = tb_xdomain_find_by_route(tb, route);
1226         if (xd) {
1227                 remove_xdomain(xd);
1228                 tb_xdomain_put(xd);
1229         }
1230
1231         parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1232         if (!parent_sw) {
1233                 tb_err(tb, "failed to find parent switch for %llx\n", route);
1234                 return;
1235         }
1236
1237         pm_runtime_get_sync(&parent_sw->dev);
1238
1239         sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
1240         if (!IS_ERR(sw)) {
1241                 sw->connection_id = pkg->connection_id;
1242                 sw->authorized = authorized;
1243                 sw->security_level = security_level;
1244                 sw->boot = boot;
1245                 sw->link_speed = speed_gen3 ? 20 : 10;
1246                 sw->link_width = dual_lane ? 2 : 1;
1247                 sw->rpm = force_rtd3;
1248                 if (!sw->rpm)
1249                         sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
1250                                                     sizeof(pkg->ep_name));
1251
1252                 if (add_switch(parent_sw, sw))
1253                         tb_switch_put(sw);
1254         }
1255
1256         pm_runtime_mark_last_busy(&parent_sw->dev);
1257         pm_runtime_put_autosuspend(&parent_sw->dev);
1258
1259         tb_switch_put(parent_sw);
1260 }
1261
1262 static void
1263 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1264 {
1265         __icm_tr_device_connected(tb, hdr, false);
1266 }
1267
1268 static void
1269 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1270 {
1271         const struct icm_tr_event_device_disconnected *pkg =
1272                 (const struct icm_tr_event_device_disconnected *)hdr;
1273         struct tb_switch *sw;
1274         u64 route;
1275
1276         route = get_route(pkg->route_hi, pkg->route_lo);
1277
1278         sw = tb_switch_find_by_route(tb, route);
1279         if (!sw) {
1280                 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1281                 return;
1282         }
1283
1284         remove_switch(sw);
1285         tb_switch_put(sw);
1286 }
1287
1288 static void
1289 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1290 {
1291         const struct icm_tr_event_xdomain_connected *pkg =
1292                 (const struct icm_tr_event_xdomain_connected *)hdr;
1293         struct tb_xdomain *xd;
1294         struct tb_switch *sw;
1295         u64 route;
1296
1297         if (!tb->root_switch)
1298                 return;
1299
1300         route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1301
1302         xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1303         if (xd) {
1304                 if (xd->route == route) {
1305                         update_xdomain(xd, route, 0);
1306                         tb_xdomain_put(xd);
1307                         return;
1308                 }
1309
1310                 remove_xdomain(xd);
1311                 tb_xdomain_put(xd);
1312         }
1313
1314         /* An existing xdomain with the same address */
1315         xd = tb_xdomain_find_by_route(tb, route);
1316         if (xd) {
1317                 remove_xdomain(xd);
1318                 tb_xdomain_put(xd);
1319         }
1320
1321         /*
1322          * If the user disconnected a switch during suspend and
1323          * connected another host to the same port, remove the switch
1324          * first.
1325          */
1326         sw = tb_switch_find_by_route(tb, route);
1327         if (sw) {
1328                 remove_switch(sw);
1329                 tb_switch_put(sw);
1330         }
1331
1332         sw = tb_switch_find_by_route(tb, get_parent_route(route));
1333         if (!sw) {
1334                 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1335                 return;
1336         }
1337
1338         add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1339         tb_switch_put(sw);
1340 }
1341
1342 static void
1343 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1344 {
1345         const struct icm_tr_event_xdomain_disconnected *pkg =
1346                 (const struct icm_tr_event_xdomain_disconnected *)hdr;
1347         struct tb_xdomain *xd;
1348         u64 route;
1349
1350         route = get_route(pkg->route_hi, pkg->route_lo);
1351
1352         xd = tb_xdomain_find_by_route(tb, route);
1353         if (xd) {
1354                 remove_xdomain(xd);
1355                 tb_xdomain_put(xd);
1356         }
1357 }
1358
1359 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1360 {
1361         struct pci_dev *parent;
1362
1363         parent = pci_upstream_bridge(pdev);
1364         while (parent) {
1365                 if (!pci_is_pcie(parent))
1366                         return NULL;
1367                 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1368                         break;
1369                 parent = pci_upstream_bridge(parent);
1370         }
1371
1372         if (!parent)
1373                 return NULL;
1374
1375         switch (parent->device) {
1376         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1377         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1378         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1379         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1380         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1381         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1382         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1383                 return parent;
1384         }
1385
1386         return NULL;
1387 }
1388
1389 static bool icm_ar_is_supported(struct tb *tb)
1390 {
1391         struct pci_dev *upstream_port;
1392         struct icm *icm = tb_priv(tb);
1393
1394         /*
1395          * Starting from Alpine Ridge we can use ICM on Apple machines
1396          * as well. We just need to reset and re-enable it first.
1397          * However, only start it if explicitly asked by the user.
1398          */
1399         if (icm_firmware_running(tb->nhi))
1400                 return true;
1401         if (!start_icm)
1402                 return false;
1403
1404         /*
1405          * Find the upstream PCIe port in case we need to do reset
1406          * through its vendor specific registers.
1407          */
1408         upstream_port = get_upstream_port(tb->nhi->pdev);
1409         if (upstream_port) {
1410                 int cap;
1411
1412                 cap = pci_find_ext_capability(upstream_port,
1413                                               PCI_EXT_CAP_ID_VNDR);
1414                 if (cap > 0) {
1415                         icm->upstream_port = upstream_port;
1416                         icm->vnd_cap = cap;
1417
1418                         return true;
1419                 }
1420         }
1421
1422         return false;
1423 }
1424
1425 static int icm_ar_cio_reset(struct tb *tb)
1426 {
1427         return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
1428 }
1429
1430 static int icm_ar_get_mode(struct tb *tb)
1431 {
1432         struct tb_nhi *nhi = tb->nhi;
1433         int retries = 60;
1434         u32 val;
1435
1436         do {
1437                 val = ioread32(nhi->iobase + REG_FW_STS);
1438                 if (val & REG_FW_STS_NVM_AUTH_DONE)
1439                         break;
1440                 msleep(50);
1441         } while (--retries);
1442
1443         if (!retries) {
1444                 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1445                 return -ENODEV;
1446         }
1447
1448         return nhi_mailbox_mode(nhi);
1449 }
1450
1451 static int
1452 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1453                     size_t *nboot_acl, bool *rpm)
1454 {
1455         struct icm_ar_pkg_driver_ready_response reply;
1456         struct icm_pkg_driver_ready request = {
1457                 .hdr.code = ICM_DRIVER_READY,
1458         };
1459         int ret;
1460
1461         memset(&reply, 0, sizeof(reply));
1462         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1463                           1, ICM_TIMEOUT);
1464         if (ret)
1465                 return ret;
1466
1467         if (security_level)
1468                 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1469         if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1470                 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1471                                 ICM_AR_INFO_BOOT_ACL_SHIFT;
1472         if (rpm)
1473                 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1474
1475         return 0;
1476 }
1477
1478 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1479 {
1480         struct icm_ar_pkg_get_route_response reply;
1481         struct icm_ar_pkg_get_route request = {
1482                 .hdr = { .code = ICM_GET_ROUTE },
1483                 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1484         };
1485         int ret;
1486
1487         memset(&reply, 0, sizeof(reply));
1488         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1489                           1, ICM_TIMEOUT);
1490         if (ret)
1491                 return ret;
1492
1493         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1494                 return -EIO;
1495
1496         *route = get_route(reply.route_hi, reply.route_lo);
1497         return 0;
1498 }
1499
1500 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1501 {
1502         struct icm_ar_pkg_preboot_acl_response reply;
1503         struct icm_ar_pkg_preboot_acl request = {
1504                 .hdr = { .code = ICM_PREBOOT_ACL },
1505         };
1506         int ret, i;
1507
1508         memset(&reply, 0, sizeof(reply));
1509         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1510                           1, ICM_TIMEOUT);
1511         if (ret)
1512                 return ret;
1513
1514         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1515                 return -EIO;
1516
1517         for (i = 0; i < nuuids; i++) {
1518                 u32 *uuid = (u32 *)&uuids[i];
1519
1520                 uuid[0] = reply.acl[i].uuid_lo;
1521                 uuid[1] = reply.acl[i].uuid_hi;
1522
1523                 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1524                         /* Map empty entries to null UUID */
1525                         uuid[0] = 0;
1526                         uuid[1] = 0;
1527                 } else if (uuid[0] != 0 || uuid[1] != 0) {
1528                         /* Upper two DWs are always one's */
1529                         uuid[2] = 0xffffffff;
1530                         uuid[3] = 0xffffffff;
1531                 }
1532         }
1533
1534         return ret;
1535 }
1536
1537 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1538                                size_t nuuids)
1539 {
1540         struct icm_ar_pkg_preboot_acl_response reply;
1541         struct icm_ar_pkg_preboot_acl request = {
1542                 .hdr = {
1543                         .code = ICM_PREBOOT_ACL,
1544                         .flags = ICM_FLAGS_WRITE,
1545                 },
1546         };
1547         int ret, i;
1548
1549         for (i = 0; i < nuuids; i++) {
1550                 const u32 *uuid = (const u32 *)&uuids[i];
1551
1552                 if (uuid_is_null(&uuids[i])) {
1553                         /*
1554                          * Map null UUID to the empty (all one) entries
1555                          * for ICM.
1556                          */
1557                         request.acl[i].uuid_lo = 0xffffffff;
1558                         request.acl[i].uuid_hi = 0xffffffff;
1559                 } else {
1560                         /* Two high DWs need to be set to all one */
1561                         if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1562                                 return -EINVAL;
1563
1564                         request.acl[i].uuid_lo = uuid[0];
1565                         request.acl[i].uuid_hi = uuid[1];
1566                 }
1567         }
1568
1569         memset(&reply, 0, sizeof(reply));
1570         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1571                           1, ICM_TIMEOUT);
1572         if (ret)
1573                 return ret;
1574
1575         if (reply.hdr.flags & ICM_FLAGS_ERROR)
1576                 return -EIO;
1577
1578         return 0;
1579 }
1580
1581 static int
1582 icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1583                     size_t *nboot_acl, bool *rpm)
1584 {
1585         struct icm_tr_pkg_driver_ready_response reply;
1586         struct icm_pkg_driver_ready request = {
1587                 .hdr.code = ICM_DRIVER_READY,
1588         };
1589         int ret;
1590
1591         memset(&reply, 0, sizeof(reply));
1592         ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1593                           1, 20000);
1594         if (ret)
1595                 return ret;
1596
1597         /* Ice Lake always supports RTD3 */
1598         if (rpm)
1599                 *rpm = true;
1600
1601         return 0;
1602 }
1603
1604 static void icm_icl_set_uuid(struct tb *tb)
1605 {
1606         struct tb_nhi *nhi = tb->nhi;
1607         u32 uuid[4];
1608
1609         pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
1610         pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
1611         uuid[2] = 0xffffffff;
1612         uuid[3] = 0xffffffff;
1613
1614         tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1615 }
1616
1617 static void
1618 icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1619 {
1620         __icm_tr_device_connected(tb, hdr, true);
1621 }
1622
1623 static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
1624 {
1625         const struct icm_icl_event_rtd3_veto *pkg =
1626                 (const struct icm_icl_event_rtd3_veto *)hdr;
1627
1628         tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
1629
1630         if (pkg->veto_reason)
1631                 icm_veto_begin(tb);
1632         else
1633                 icm_veto_end(tb);
1634 }
1635
1636 static bool icm_tgl_is_supported(struct tb *tb)
1637 {
1638         /*
1639          * If the firmware is not running use software CM. This platform
1640          * should fully support both.
1641          */
1642         return icm_firmware_running(tb->nhi);
1643 }
1644
1645 static void icm_handle_notification(struct work_struct *work)
1646 {
1647         struct icm_notification *n = container_of(work, typeof(*n), work);
1648         struct tb *tb = n->tb;
1649         struct icm *icm = tb_priv(tb);
1650
1651         mutex_lock(&tb->lock);
1652
1653         /*
1654          * When the domain is stopped we flush its workqueue but before
1655          * that the root switch is removed. In that case we should treat
1656          * the queued events as being canceled.
1657          */
1658         if (tb->root_switch) {
1659                 switch (n->pkg->code) {
1660                 case ICM_EVENT_DEVICE_CONNECTED:
1661                         icm->device_connected(tb, n->pkg);
1662                         break;
1663                 case ICM_EVENT_DEVICE_DISCONNECTED:
1664                         icm->device_disconnected(tb, n->pkg);
1665                         break;
1666                 case ICM_EVENT_XDOMAIN_CONNECTED:
1667                         icm->xdomain_connected(tb, n->pkg);
1668                         break;
1669                 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1670                         icm->xdomain_disconnected(tb, n->pkg);
1671                         break;
1672                 case ICM_EVENT_RTD3_VETO:
1673                         icm->rtd3_veto(tb, n->pkg);
1674                         break;
1675                 }
1676         }
1677
1678         mutex_unlock(&tb->lock);
1679
1680         kfree(n->pkg);
1681         kfree(n);
1682 }
1683
1684 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1685                              const void *buf, size_t size)
1686 {
1687         struct icm_notification *n;
1688
1689         n = kmalloc(sizeof(*n), GFP_KERNEL);
1690         if (!n)
1691                 return;
1692
1693         INIT_WORK(&n->work, icm_handle_notification);
1694         n->pkg = kmemdup(buf, size, GFP_KERNEL);
1695         n->tb = tb;
1696
1697         queue_work(tb->wq, &n->work);
1698 }
1699
1700 static int
1701 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1702                    size_t *nboot_acl, bool *rpm)
1703 {
1704         struct icm *icm = tb_priv(tb);
1705         unsigned int retries = 50;
1706         int ret;
1707
1708         ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
1709         if (ret) {
1710                 tb_err(tb, "failed to send driver ready to ICM\n");
1711                 return ret;
1712         }
1713
1714         /*
1715          * Hold on here until the switch config space is accessible so
1716          * that we can read root switch config successfully.
1717          */
1718         do {
1719                 struct tb_cfg_result res;
1720                 u32 tmp;
1721
1722                 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1723                                       0, 1, 100);
1724                 if (!res.err)
1725                         return 0;
1726
1727                 msleep(50);
1728         } while (--retries);
1729
1730         tb_err(tb, "failed to read root switch config space, giving up\n");
1731         return -ETIMEDOUT;
1732 }
1733
1734 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1735 {
1736         struct icm *icm = tb_priv(tb);
1737         u32 val;
1738
1739         if (!icm->upstream_port)
1740                 return -ENODEV;
1741
1742         /* Put ARC to wait for CIO reset event to happen */
1743         val = ioread32(nhi->iobase + REG_FW_STS);
1744         val |= REG_FW_STS_CIO_RESET_REQ;
1745         iowrite32(val, nhi->iobase + REG_FW_STS);
1746
1747         /* Re-start ARC */
1748         val = ioread32(nhi->iobase + REG_FW_STS);
1749         val |= REG_FW_STS_ICM_EN_INVERT;
1750         val |= REG_FW_STS_ICM_EN_CPU;
1751         iowrite32(val, nhi->iobase + REG_FW_STS);
1752
1753         /* Trigger CIO reset now */
1754         return icm->cio_reset(tb);
1755 }
1756
1757 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1758 {
1759         unsigned int retries = 10;
1760         int ret;
1761         u32 val;
1762
1763         /* Check if the ICM firmware is already running */
1764         if (icm_firmware_running(nhi))
1765                 return 0;
1766
1767         dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1768
1769         ret = icm_firmware_reset(tb, nhi);
1770         if (ret)
1771                 return ret;
1772
1773         /* Wait until the ICM firmware tells us it is up and running */
1774         do {
1775                 /* Check that the ICM firmware is running */
1776                 val = ioread32(nhi->iobase + REG_FW_STS);
1777                 if (val & REG_FW_STS_NVM_AUTH_DONE)
1778                         return 0;
1779
1780                 msleep(300);
1781         } while (--retries);
1782
1783         return -ETIMEDOUT;
1784 }
1785
1786 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1787 {
1788         struct icm *icm = tb_priv(tb);
1789         u32 state0, state1;
1790         int port0, port1;
1791         u32 val0, val1;
1792         int ret;
1793
1794         if (!icm->upstream_port)
1795                 return 0;
1796
1797         if (phy_port) {
1798                 port0 = 3;
1799                 port1 = 4;
1800         } else {
1801                 port0 = 1;
1802                 port1 = 2;
1803         }
1804
1805         /*
1806          * Read link status of both null ports belonging to a single
1807          * physical port.
1808          */
1809         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1810         if (ret)
1811                 return ret;
1812         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1813         if (ret)
1814                 return ret;
1815
1816         state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1817         state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1818         state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1819         state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1820
1821         /* If they are both up we need to reset them now */
1822         if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1823                 return 0;
1824
1825         val0 |= PHY_PORT_CS1_LINK_DISABLE;
1826         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1827         if (ret)
1828                 return ret;
1829
1830         val1 |= PHY_PORT_CS1_LINK_DISABLE;
1831         ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1832         if (ret)
1833                 return ret;
1834
1835         /* Wait a bit and then re-enable both ports */
1836         usleep_range(10, 100);
1837
1838         ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1839         if (ret)
1840                 return ret;
1841         ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1842         if (ret)
1843                 return ret;
1844
1845         val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1846         ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1847         if (ret)
1848                 return ret;
1849
1850         val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1851         return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1852 }
1853
1854 static int icm_firmware_init(struct tb *tb)
1855 {
1856         struct icm *icm = tb_priv(tb);
1857         struct tb_nhi *nhi = tb->nhi;
1858         int ret;
1859
1860         ret = icm_firmware_start(tb, nhi);
1861         if (ret) {
1862                 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1863                 return ret;
1864         }
1865
1866         if (icm->get_mode) {
1867                 ret = icm->get_mode(tb);
1868
1869                 switch (ret) {
1870                 case NHI_FW_SAFE_MODE:
1871                         icm->safe_mode = true;
1872                         break;
1873
1874                 case NHI_FW_CM_MODE:
1875                         /* Ask ICM to accept all Thunderbolt devices */
1876                         nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1877                         break;
1878
1879                 default:
1880                         if (ret < 0)
1881                                 return ret;
1882
1883                         tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1884                         return -ENODEV;
1885                 }
1886         }
1887
1888         /*
1889          * Reset both physical ports if there is anything connected to
1890          * them already.
1891          */
1892         ret = icm_reset_phy_port(tb, 0);
1893         if (ret)
1894                 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1895         ret = icm_reset_phy_port(tb, 1);
1896         if (ret)
1897                 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1898
1899         return 0;
1900 }
1901
1902 static int icm_driver_ready(struct tb *tb)
1903 {
1904         struct icm *icm = tb_priv(tb);
1905         int ret;
1906
1907         ret = icm_firmware_init(tb);
1908         if (ret)
1909                 return ret;
1910
1911         if (icm->safe_mode) {
1912                 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1913                 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1914                 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1915                 return 0;
1916         }
1917
1918         ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
1919                                  &icm->rpm);
1920         if (ret)
1921                 return ret;
1922
1923         /*
1924          * Make sure the number of supported preboot ACL matches what we
1925          * expect or disable the whole feature.
1926          */
1927         if (tb->nboot_acl > icm->max_boot_acl)
1928                 tb->nboot_acl = 0;
1929
1930         return 0;
1931 }
1932
1933 static int icm_suspend(struct tb *tb)
1934 {
1935         struct icm *icm = tb_priv(tb);
1936
1937         if (icm->save_devices)
1938                 icm->save_devices(tb);
1939
1940         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1941         return 0;
1942 }
1943
1944 /*
1945  * Mark all switches (except root switch) below this one unplugged. ICM
1946  * firmware will send us an updated list of switches after we have send
1947  * it driver ready command. If a switch is not in that list it will be
1948  * removed when we perform rescan.
1949  */
1950 static void icm_unplug_children(struct tb_switch *sw)
1951 {
1952         struct tb_port *port;
1953
1954         if (tb_route(sw))
1955                 sw->is_unplugged = true;
1956
1957         tb_switch_for_each_port(sw, port) {
1958                 if (port->xdomain)
1959                         port->xdomain->is_unplugged = true;
1960                 else if (tb_port_has_remote(port))
1961                         icm_unplug_children(port->remote->sw);
1962         }
1963 }
1964
1965 static int complete_rpm(struct device *dev, void *data)
1966 {
1967         struct tb_switch *sw = tb_to_switch(dev);
1968
1969         if (sw)
1970                 complete(&sw->rpm_complete);
1971         return 0;
1972 }
1973
1974 static void remove_unplugged_switch(struct tb_switch *sw)
1975 {
1976         pm_runtime_get_sync(sw->dev.parent);
1977
1978         /*
1979          * Signal this and switches below for rpm_complete because
1980          * tb_switch_remove() calls pm_runtime_get_sync() that then waits
1981          * for it.
1982          */
1983         complete_rpm(&sw->dev, NULL);
1984         bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
1985         tb_switch_remove(sw);
1986
1987         pm_runtime_mark_last_busy(sw->dev.parent);
1988         pm_runtime_put_autosuspend(sw->dev.parent);
1989 }
1990
1991 static void icm_free_unplugged_children(struct tb_switch *sw)
1992 {
1993         struct tb_port *port;
1994
1995         tb_switch_for_each_port(sw, port) {
1996                 if (port->xdomain && port->xdomain->is_unplugged) {
1997                         tb_xdomain_remove(port->xdomain);
1998                         port->xdomain = NULL;
1999                 } else if (tb_port_has_remote(port)) {
2000                         if (port->remote->sw->is_unplugged) {
2001                                 remove_unplugged_switch(port->remote->sw);
2002                                 port->remote = NULL;
2003                         } else {
2004                                 icm_free_unplugged_children(port->remote->sw);
2005                         }
2006                 }
2007         }
2008 }
2009
2010 static void icm_rescan_work(struct work_struct *work)
2011 {
2012         struct icm *icm = container_of(work, struct icm, rescan_work.work);
2013         struct tb *tb = icm_to_tb(icm);
2014
2015         mutex_lock(&tb->lock);
2016         if (tb->root_switch)
2017                 icm_free_unplugged_children(tb->root_switch);
2018         mutex_unlock(&tb->lock);
2019 }
2020
2021 static void icm_complete(struct tb *tb)
2022 {
2023         struct icm *icm = tb_priv(tb);
2024
2025         if (tb->nhi->going_away)
2026                 return;
2027
2028         /*
2029          * If RTD3 was vetoed before we entered system suspend allow it
2030          * again now before driver ready is sent. Firmware sends a new RTD3
2031          * veto if it is still the case after we have sent it driver ready
2032          * command.
2033          */
2034         icm_veto_end(tb);
2035         icm_unplug_children(tb->root_switch);
2036
2037         /*
2038          * Now all existing children should be resumed, start events
2039          * from ICM to get updated status.
2040          */
2041         __icm_driver_ready(tb, NULL, NULL, NULL);
2042
2043         /*
2044          * We do not get notifications of devices that have been
2045          * unplugged during suspend so schedule rescan to clean them up
2046          * if any.
2047          */
2048         queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
2049 }
2050
2051 static int icm_runtime_suspend(struct tb *tb)
2052 {
2053         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2054         return 0;
2055 }
2056
2057 static int icm_runtime_suspend_switch(struct tb_switch *sw)
2058 {
2059         if (tb_route(sw))
2060                 reinit_completion(&sw->rpm_complete);
2061         return 0;
2062 }
2063
2064 static int icm_runtime_resume_switch(struct tb_switch *sw)
2065 {
2066         if (tb_route(sw)) {
2067                 if (!wait_for_completion_timeout(&sw->rpm_complete,
2068                                                  msecs_to_jiffies(500))) {
2069                         dev_dbg(&sw->dev, "runtime resuming timed out\n");
2070                 }
2071         }
2072         return 0;
2073 }
2074
2075 static int icm_runtime_resume(struct tb *tb)
2076 {
2077         /*
2078          * We can reuse the same resume functionality than with system
2079          * suspend.
2080          */
2081         icm_complete(tb);
2082         return 0;
2083 }
2084
2085 static int icm_start(struct tb *tb)
2086 {
2087         struct icm *icm = tb_priv(tb);
2088         int ret;
2089
2090         if (icm->safe_mode)
2091                 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
2092         else
2093                 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2094         if (IS_ERR(tb->root_switch))
2095                 return PTR_ERR(tb->root_switch);
2096
2097         tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
2098         tb->root_switch->rpm = icm->rpm;
2099
2100         if (icm->set_uuid)
2101                 icm->set_uuid(tb);
2102
2103         ret = tb_switch_add(tb->root_switch);
2104         if (ret) {
2105                 tb_switch_put(tb->root_switch);
2106                 tb->root_switch = NULL;
2107         }
2108
2109         return ret;
2110 }
2111
2112 static void icm_stop(struct tb *tb)
2113 {
2114         struct icm *icm = tb_priv(tb);
2115
2116         cancel_delayed_work(&icm->rescan_work);
2117         tb_switch_remove(tb->root_switch);
2118         tb->root_switch = NULL;
2119         nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2120 }
2121
2122 static int icm_disconnect_pcie_paths(struct tb *tb)
2123 {
2124         return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
2125 }
2126
2127 /* Falcon Ridge */
2128 static const struct tb_cm_ops icm_fr_ops = {
2129         .driver_ready = icm_driver_ready,
2130         .start = icm_start,
2131         .stop = icm_stop,
2132         .suspend = icm_suspend,
2133         .complete = icm_complete,
2134         .handle_event = icm_handle_event,
2135         .approve_switch = icm_fr_approve_switch,
2136         .add_switch_key = icm_fr_add_switch_key,
2137         .challenge_switch_key = icm_fr_challenge_switch_key,
2138         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2139         .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2140         .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2141 };
2142
2143 /* Alpine Ridge */
2144 static const struct tb_cm_ops icm_ar_ops = {
2145         .driver_ready = icm_driver_ready,
2146         .start = icm_start,
2147         .stop = icm_stop,
2148         .suspend = icm_suspend,
2149         .complete = icm_complete,
2150         .runtime_suspend = icm_runtime_suspend,
2151         .runtime_resume = icm_runtime_resume,
2152         .runtime_suspend_switch = icm_runtime_suspend_switch,
2153         .runtime_resume_switch = icm_runtime_resume_switch,
2154         .handle_event = icm_handle_event,
2155         .get_boot_acl = icm_ar_get_boot_acl,
2156         .set_boot_acl = icm_ar_set_boot_acl,
2157         .approve_switch = icm_fr_approve_switch,
2158         .add_switch_key = icm_fr_add_switch_key,
2159         .challenge_switch_key = icm_fr_challenge_switch_key,
2160         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2161         .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2162         .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2163 };
2164
2165 /* Titan Ridge */
2166 static const struct tb_cm_ops icm_tr_ops = {
2167         .driver_ready = icm_driver_ready,
2168         .start = icm_start,
2169         .stop = icm_stop,
2170         .suspend = icm_suspend,
2171         .complete = icm_complete,
2172         .runtime_suspend = icm_runtime_suspend,
2173         .runtime_resume = icm_runtime_resume,
2174         .runtime_suspend_switch = icm_runtime_suspend_switch,
2175         .runtime_resume_switch = icm_runtime_resume_switch,
2176         .handle_event = icm_handle_event,
2177         .get_boot_acl = icm_ar_get_boot_acl,
2178         .set_boot_acl = icm_ar_set_boot_acl,
2179         .approve_switch = icm_tr_approve_switch,
2180         .add_switch_key = icm_tr_add_switch_key,
2181         .challenge_switch_key = icm_tr_challenge_switch_key,
2182         .disconnect_pcie_paths = icm_disconnect_pcie_paths,
2183         .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2184         .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2185 };
2186
2187 /* Ice Lake */
2188 static const struct tb_cm_ops icm_icl_ops = {
2189         .driver_ready = icm_driver_ready,
2190         .start = icm_start,
2191         .stop = icm_stop,
2192         .complete = icm_complete,
2193         .runtime_suspend = icm_runtime_suspend,
2194         .runtime_resume = icm_runtime_resume,
2195         .handle_event = icm_handle_event,
2196         .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2197         .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2198 };
2199
2200 struct tb *icm_probe(struct tb_nhi *nhi)
2201 {
2202         struct icm *icm;
2203         struct tb *tb;
2204
2205         tb = tb_domain_alloc(nhi, sizeof(struct icm));
2206         if (!tb)
2207                 return NULL;
2208
2209         icm = tb_priv(tb);
2210         INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
2211         mutex_init(&icm->request_lock);
2212
2213         switch (nhi->pdev->device) {
2214         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2215         case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2216                 icm->can_upgrade_nvm = true;
2217                 icm->is_supported = icm_fr_is_supported;
2218                 icm->get_route = icm_fr_get_route;
2219                 icm->save_devices = icm_fr_save_devices;
2220                 icm->driver_ready = icm_fr_driver_ready;
2221                 icm->device_connected = icm_fr_device_connected;
2222                 icm->device_disconnected = icm_fr_device_disconnected;
2223                 icm->xdomain_connected = icm_fr_xdomain_connected;
2224                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2225                 tb->cm_ops = &icm_fr_ops;
2226                 break;
2227
2228         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
2229         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
2230         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
2231         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
2232         case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
2233                 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2234                 /*
2235                  * NVM upgrade has not been tested on Apple systems and
2236                  * they don't provide images publicly either. To be on
2237                  * the safe side prevent root switch NVM upgrade on Macs
2238                  * for now.
2239                  */
2240                 icm->can_upgrade_nvm = !x86_apple_machine;
2241                 icm->is_supported = icm_ar_is_supported;
2242                 icm->cio_reset = icm_ar_cio_reset;
2243                 icm->get_mode = icm_ar_get_mode;
2244                 icm->get_route = icm_ar_get_route;
2245                 icm->save_devices = icm_fr_save_devices;
2246                 icm->driver_ready = icm_ar_driver_ready;
2247                 icm->device_connected = icm_fr_device_connected;
2248                 icm->device_disconnected = icm_fr_device_disconnected;
2249                 icm->xdomain_connected = icm_fr_xdomain_connected;
2250                 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2251                 tb->cm_ops = &icm_ar_ops;
2252                 break;
2253
2254         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2255         case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2256                 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2257                 icm->can_upgrade_nvm = !x86_apple_machine;
2258                 icm->is_supported = icm_ar_is_supported;
2259                 icm->cio_reset = icm_tr_cio_reset;
2260                 icm->get_mode = icm_ar_get_mode;
2261                 icm->driver_ready = icm_tr_driver_ready;
2262                 icm->device_connected = icm_tr_device_connected;
2263                 icm->device_disconnected = icm_tr_device_disconnected;
2264                 icm->xdomain_connected = icm_tr_xdomain_connected;
2265                 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2266                 tb->cm_ops = &icm_tr_ops;
2267                 break;
2268
2269         case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2270         case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2271                 icm->is_supported = icm_fr_is_supported;
2272                 icm->driver_ready = icm_icl_driver_ready;
2273                 icm->set_uuid = icm_icl_set_uuid;
2274                 icm->device_connected = icm_icl_device_connected;
2275                 icm->device_disconnected = icm_tr_device_disconnected;
2276                 icm->xdomain_connected = icm_tr_xdomain_connected;
2277                 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2278                 icm->rtd3_veto = icm_icl_rtd3_veto;
2279                 tb->cm_ops = &icm_icl_ops;
2280                 break;
2281
2282         case PCI_DEVICE_ID_INTEL_TGL_NHI0:
2283         case PCI_DEVICE_ID_INTEL_TGL_NHI1:
2284                 icm->is_supported = icm_tgl_is_supported;
2285                 icm->driver_ready = icm_icl_driver_ready;
2286                 icm->set_uuid = icm_icl_set_uuid;
2287                 icm->device_connected = icm_icl_device_connected;
2288                 icm->device_disconnected = icm_tr_device_disconnected;
2289                 icm->xdomain_connected = icm_tr_xdomain_connected;
2290                 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2291                 icm->rtd3_veto = icm_icl_rtd3_veto;
2292                 tb->cm_ops = &icm_icl_ops;
2293                 break;
2294         }
2295
2296         if (!icm->is_supported || !icm->is_supported(tb)) {
2297                 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
2298                 tb_domain_put(tb);
2299                 return NULL;
2300         }
2301
2302         return tb;
2303 }