1 // SPDX-License-Identifier: GPL-2.0
3 * Internal Thunderbolt Connection Manager. This is a firmware running on
4 * the Thunderbolt host controller performing most of the low-level
7 * Copyright (C) 2017, Intel Corporation
8 * Authors: Michael Jamet <michael.jamet@intel.com>
9 * Mika Westerberg <mika.westerberg@linux.intel.com>
12 #include <linux/delay.h>
13 #include <linux/mutex.h>
14 #include <linux/pci.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/platform_data/x86/apple.h>
17 #include <linux/sizes.h>
18 #include <linux/slab.h>
19 #include <linux/workqueue.h>
25 #define PCIE2CIO_CMD 0x30
26 #define PCIE2CIO_CMD_TIMEOUT BIT(31)
27 #define PCIE2CIO_CMD_START BIT(30)
28 #define PCIE2CIO_CMD_WRITE BIT(21)
29 #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
30 #define PCIE2CIO_CMD_CS_SHIFT 19
31 #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
32 #define PCIE2CIO_CMD_PORT_SHIFT 13
34 #define PCIE2CIO_WRDATA 0x34
35 #define PCIE2CIO_RDDATA 0x38
37 #define PHY_PORT_CS1 0x37
38 #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
39 #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
40 #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
42 #define ICM_TIMEOUT 5000 /* ms */
43 #define ICM_APPROVE_TIMEOUT 10000 /* ms */
44 #define ICM_MAX_LINK 4
47 * struct icm - Internal connection manager private data
48 * @request_lock: Makes sure only one message is send to ICM at time
49 * @rescan_work: Work used to rescan the surviving switches after resume
50 * @upstream_port: Pointer to the PCIe upstream port this host
51 * controller is connected. This is only set for systems
52 * where ICM needs to be started manually
53 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
54 * (only set when @upstream_port is not %NULL)
55 * @safe_mode: ICM is in safe mode
56 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
57 * @rpm: Does the controller support runtime PM (RTD3)
58 * @is_supported: Checks if we can support ICM on this controller
59 * @get_mode: Read and return the ICM firmware mode (optional)
60 * @get_route: Find a route string for given switch
61 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
62 * @driver_ready: Send driver ready message to ICM
63 * @device_connected: Handle device connected ICM message
64 * @device_disconnected: Handle device disconnected ICM message
65 * @xdomain_connected - Handle XDomain connected ICM message
66 * @xdomain_disconnected - Handle XDomain disconnected ICM message
69 struct mutex request_lock;
70 struct delayed_work rescan_work;
71 struct pci_dev *upstream_port;
76 bool (*is_supported)(struct tb *tb);
77 int (*get_mode)(struct tb *tb);
78 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
79 void (*save_devices)(struct tb *tb);
80 int (*driver_ready)(struct tb *tb,
81 enum tb_security_level *security_level,
82 size_t *nboot_acl, bool *rpm);
83 void (*device_connected)(struct tb *tb,
84 const struct icm_pkg_header *hdr);
85 void (*device_disconnected)(struct tb *tb,
86 const struct icm_pkg_header *hdr);
87 void (*xdomain_connected)(struct tb *tb,
88 const struct icm_pkg_header *hdr);
89 void (*xdomain_disconnected)(struct tb *tb,
90 const struct icm_pkg_header *hdr);
93 struct icm_notification {
94 struct work_struct work;
95 struct icm_pkg_header *pkg;
99 struct ep_name_entry {
105 #define EP_NAME_INTEL_VSS 0x10
107 /* Intel Vendor specific structure */
117 #define INTEL_VSS_FLAGS_RTD3 BIT(0)
119 static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
121 const void *end = ep_name + size;
123 while (ep_name < end) {
124 const struct ep_name_entry *ep = ep_name;
128 if (ep_name + ep->len > end)
131 if (ep->type == EP_NAME_INTEL_VSS)
132 return (const struct intel_vss *)ep->data;
140 static inline struct tb *icm_to_tb(struct icm *icm)
142 return ((void *)icm - sizeof(struct tb));
145 static inline u8 phy_port_from_route(u64 route, u8 depth)
149 link = depth ? route >> ((depth - 1) * 8) : route;
150 return tb_phy_port_from_link(link);
153 static inline u8 dual_link_from_link(u8 link)
155 return link ? ((link - 1) ^ 0x01) + 1 : 0;
158 static inline u64 get_route(u32 route_hi, u32 route_lo)
160 return (u64)route_hi << 32 | route_lo;
163 static inline u64 get_parent_route(u64 route)
165 int depth = tb_route_length(route);
166 return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
169 static bool icm_match(const struct tb_cfg_request *req,
170 const struct ctl_pkg *pkg)
172 const struct icm_pkg_header *res_hdr = pkg->buffer;
173 const struct icm_pkg_header *req_hdr = req->request;
175 if (pkg->frame.eof != req->response_type)
177 if (res_hdr->code != req_hdr->code)
183 static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
185 const struct icm_pkg_header *hdr = pkg->buffer;
187 if (hdr->packet_id < req->npackets) {
188 size_t offset = hdr->packet_id * req->response_size;
190 memcpy(req->response + offset, pkg->buffer, req->response_size);
193 return hdr->packet_id == hdr->total_packets - 1;
196 static int icm_request(struct tb *tb, const void *request, size_t request_size,
197 void *response, size_t response_size, size_t npackets,
198 unsigned int timeout_msec)
200 struct icm *icm = tb_priv(tb);
204 struct tb_cfg_request *req;
205 struct tb_cfg_result res;
207 req = tb_cfg_request_alloc();
211 req->match = icm_match;
212 req->copy = icm_copy;
213 req->request = request;
214 req->request_size = request_size;
215 req->request_type = TB_CFG_PKG_ICM_CMD;
216 req->response = response;
217 req->npackets = npackets;
218 req->response_size = response_size;
219 req->response_type = TB_CFG_PKG_ICM_RESP;
221 mutex_lock(&icm->request_lock);
222 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
223 mutex_unlock(&icm->request_lock);
225 tb_cfg_request_put(req);
227 if (res.err != -ETIMEDOUT)
228 return res.err == 1 ? -EIO : res.err;
230 usleep_range(20, 50);
236 static bool icm_fr_is_supported(struct tb *tb)
238 return !x86_apple_machine;
241 static inline int icm_fr_get_switch_index(u32 port)
245 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
248 index = port >> ICM_PORT_INDEX_SHIFT;
249 return index != 0xff ? index : 0;
252 static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
254 struct icm_fr_pkg_get_topology_response *switches, *sw;
255 struct icm_fr_pkg_get_topology request = {
256 .hdr = { .code = ICM_GET_TOPOLOGY },
258 size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
262 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
266 ret = icm_request(tb, &request, sizeof(request), switches,
267 sizeof(*switches), npackets, ICM_TIMEOUT);
272 index = icm_fr_get_switch_index(sw->ports[link]);
278 sw = &switches[index];
279 for (i = 1; i < depth; i++) {
282 if (!(sw->first_data & ICM_SWITCH_USED)) {
287 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
288 index = icm_fr_get_switch_index(sw->ports[j]);
289 if (index > sw->switch_index) {
290 sw = &switches[index];
296 *route = get_route(sw->route_hi, sw->route_lo);
303 static void icm_fr_save_devices(struct tb *tb)
305 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
309 icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
310 size_t *nboot_acl, bool *rpm)
312 struct icm_fr_pkg_driver_ready_response reply;
313 struct icm_pkg_driver_ready request = {
314 .hdr.code = ICM_DRIVER_READY,
318 memset(&reply, 0, sizeof(reply));
319 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
325 *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
330 static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
332 struct icm_fr_pkg_approve_device request;
333 struct icm_fr_pkg_approve_device reply;
336 memset(&request, 0, sizeof(request));
337 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
338 request.hdr.code = ICM_APPROVE_DEVICE;
339 request.connection_id = sw->connection_id;
340 request.connection_key = sw->connection_key;
342 memset(&reply, 0, sizeof(reply));
343 /* Use larger timeout as establishing tunnels can take some time */
344 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
345 1, ICM_APPROVE_TIMEOUT);
349 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
350 tb_warn(tb, "PCIe tunnel creation failed\n");
357 static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
359 struct icm_fr_pkg_add_device_key request;
360 struct icm_fr_pkg_add_device_key_response reply;
363 memset(&request, 0, sizeof(request));
364 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
365 request.hdr.code = ICM_ADD_DEVICE_KEY;
366 request.connection_id = sw->connection_id;
367 request.connection_key = sw->connection_key;
368 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
370 memset(&reply, 0, sizeof(reply));
371 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
376 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
377 tb_warn(tb, "Adding key to switch failed\n");
384 static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
385 const u8 *challenge, u8 *response)
387 struct icm_fr_pkg_challenge_device request;
388 struct icm_fr_pkg_challenge_device_response reply;
391 memset(&request, 0, sizeof(request));
392 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
393 request.hdr.code = ICM_CHALLENGE_DEVICE;
394 request.connection_id = sw->connection_id;
395 request.connection_key = sw->connection_key;
396 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
398 memset(&reply, 0, sizeof(reply));
399 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
404 if (reply.hdr.flags & ICM_FLAGS_ERROR)
405 return -EKEYREJECTED;
406 if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
409 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
414 static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
416 struct icm_fr_pkg_approve_xdomain_response reply;
417 struct icm_fr_pkg_approve_xdomain request;
420 memset(&request, 0, sizeof(request));
421 request.hdr.code = ICM_APPROVE_XDOMAIN;
422 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
423 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
425 request.transmit_path = xd->transmit_path;
426 request.transmit_ring = xd->transmit_ring;
427 request.receive_path = xd->receive_path;
428 request.receive_ring = xd->receive_ring;
430 memset(&reply, 0, sizeof(reply));
431 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
436 if (reply.hdr.flags & ICM_FLAGS_ERROR)
442 static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
447 phy_port = tb_phy_port_from_link(xd->link);
449 cmd = NHI_MAILBOX_DISCONNECT_PA;
451 cmd = NHI_MAILBOX_DISCONNECT_PB;
453 nhi_mailbox_cmd(tb->nhi, cmd, 1);
454 usleep_range(10, 50);
455 nhi_mailbox_cmd(tb->nhi, cmd, 2);
459 static void add_switch(struct tb_switch *parent_sw, u64 route,
460 const uuid_t *uuid, const u8 *ep_name,
461 size_t ep_name_size, u8 connection_id, u8 connection_key,
462 u8 link, u8 depth, enum tb_security_level security_level,
463 bool authorized, bool boot)
465 const struct intel_vss *vss;
466 struct tb_switch *sw;
468 pm_runtime_get_sync(&parent_sw->dev);
470 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
474 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
476 tb_sw_warn(sw, "cannot allocate memory for switch\n");
480 sw->connection_id = connection_id;
481 sw->connection_key = connection_key;
484 sw->authorized = authorized;
485 sw->security_level = security_level;
488 vss = parse_intel_vss(ep_name, ep_name_size);
490 sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
492 /* Link the two switches now */
493 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
494 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
496 if (tb_switch_add(sw)) {
497 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
502 pm_runtime_mark_last_busy(&parent_sw->dev);
503 pm_runtime_put_autosuspend(&parent_sw->dev);
506 static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
507 u64 route, u8 connection_id, u8 connection_key,
508 u8 link, u8 depth, bool boot)
510 /* Disconnect from parent */
511 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
512 /* Re-connect via updated port*/
513 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
515 /* Update with the new addressing information */
516 sw->config.route_hi = upper_32_bits(route);
517 sw->config.route_lo = lower_32_bits(route);
518 sw->connection_id = connection_id;
519 sw->connection_key = connection_key;
524 /* This switch still exists */
525 sw->is_unplugged = false;
528 static void remove_switch(struct tb_switch *sw)
530 struct tb_switch *parent_sw;
532 parent_sw = tb_to_switch(sw->dev.parent);
533 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
534 tb_switch_remove(sw);
537 static void add_xdomain(struct tb_switch *sw, u64 route,
538 const uuid_t *local_uuid, const uuid_t *remote_uuid,
541 struct tb_xdomain *xd;
543 pm_runtime_get_sync(&sw->dev);
545 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
552 tb_port_at(route, sw)->xdomain = xd;
557 pm_runtime_mark_last_busy(&sw->dev);
558 pm_runtime_put_autosuspend(&sw->dev);
561 static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
565 xd->is_unplugged = false;
568 static void remove_xdomain(struct tb_xdomain *xd)
570 struct tb_switch *sw;
572 sw = tb_to_switch(xd->dev.parent);
573 tb_port_at(xd->route, sw)->xdomain = NULL;
574 tb_xdomain_remove(xd);
578 icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
580 const struct icm_fr_event_device_connected *pkg =
581 (const struct icm_fr_event_device_connected *)hdr;
582 enum tb_security_level security_level;
583 struct tb_switch *sw, *parent_sw;
584 struct icm *icm = tb_priv(tb);
585 bool authorized = false;
586 struct tb_xdomain *xd;
592 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
593 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
594 ICM_LINK_INFO_DEPTH_SHIFT;
595 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
596 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
597 ICM_FLAGS_SLEVEL_SHIFT;
598 boot = pkg->link_info & ICM_LINK_INFO_BOOT;
600 if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
601 tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
606 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
608 u8 phy_port, sw_phy_port;
610 parent_sw = tb_to_switch(sw->dev.parent);
611 sw_phy_port = tb_phy_port_from_link(sw->link);
612 phy_port = tb_phy_port_from_link(link);
615 * On resume ICM will send us connected events for the
616 * devices that still are present. However, that
617 * information might have changed for example by the
618 * fact that a switch on a dual-link connection might
619 * have been enumerated using the other link now. Make
620 * sure our book keeping matches that.
622 if (sw->depth == depth && sw_phy_port == phy_port &&
623 !!sw->authorized == authorized) {
625 * It was enumerated through another link so update
626 * route string accordingly.
628 if (sw->link != link) {
629 ret = icm->get_route(tb, link, depth, &route);
631 tb_err(tb, "failed to update route string for switch at %u.%u\n",
637 route = tb_route(sw);
640 update_switch(parent_sw, sw, route, pkg->connection_id,
641 pkg->connection_key, link, depth, boot);
647 * User connected the same switch to another physical
648 * port or to another part of the topology. Remove the
649 * existing switch now before adding the new one.
656 * If the switch was not found by UUID, look for a switch on
657 * same physical port (taking possible link aggregation into
658 * account) and depth. If we found one it is definitely a stale
659 * one so remove it first.
661 sw = tb_switch_find_by_link_depth(tb, link, depth);
665 dual_link = dual_link_from_link(link);
667 sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
674 /* Remove existing XDomain connection if found */
675 xd = tb_xdomain_find_by_link_depth(tb, link, depth);
681 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
683 tb_err(tb, "failed to find parent switch for %u.%u\n",
688 ret = icm->get_route(tb, link, depth, &route);
690 tb_err(tb, "failed to find route string for switch at %u.%u\n",
692 tb_switch_put(parent_sw);
696 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
697 sizeof(pkg->ep_name), pkg->connection_id,
698 pkg->connection_key, link, depth, security_level,
701 tb_switch_put(parent_sw);
705 icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
707 const struct icm_fr_event_device_disconnected *pkg =
708 (const struct icm_fr_event_device_disconnected *)hdr;
709 struct tb_switch *sw;
712 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
713 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
714 ICM_LINK_INFO_DEPTH_SHIFT;
716 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
717 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
721 sw = tb_switch_find_by_link_depth(tb, link, depth);
723 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
733 icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
735 const struct icm_fr_event_xdomain_connected *pkg =
736 (const struct icm_fr_event_xdomain_connected *)hdr;
737 struct tb_xdomain *xd;
738 struct tb_switch *sw;
742 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
743 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
744 ICM_LINK_INFO_DEPTH_SHIFT;
746 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
747 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
751 route = get_route(pkg->local_route_hi, pkg->local_route_lo);
753 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
755 u8 xd_phy_port, phy_port;
757 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
758 phy_port = phy_port_from_route(route, depth);
760 if (xd->depth == depth && xd_phy_port == phy_port) {
761 update_xdomain(xd, route, link);
767 * If we find an existing XDomain connection remove it
768 * now. We need to go through login handshake and
769 * everything anyway to be able to re-establish the
777 * Look if there already exists an XDomain in the same place
778 * than the new one and in that case remove it because it is
779 * most likely another host that got disconnected.
781 xd = tb_xdomain_find_by_link_depth(tb, link, depth);
785 dual_link = dual_link_from_link(link);
787 xd = tb_xdomain_find_by_link_depth(tb, dual_link,
796 * If the user disconnected a switch during suspend and
797 * connected another host to the same port, remove the switch
800 sw = tb_switch_find_by_route(tb, route);
806 sw = tb_switch_find_by_link_depth(tb, link, depth);
808 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
813 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
819 icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
821 const struct icm_fr_event_xdomain_disconnected *pkg =
822 (const struct icm_fr_event_xdomain_disconnected *)hdr;
823 struct tb_xdomain *xd;
826 * If the connection is through one or multiple devices, the
827 * XDomain device is removed along with them so it is fine if we
828 * cannot find it here.
830 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
838 icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
839 size_t *nboot_acl, bool *rpm)
841 struct icm_tr_pkg_driver_ready_response reply;
842 struct icm_pkg_driver_ready request = {
843 .hdr.code = ICM_DRIVER_READY,
847 memset(&reply, 0, sizeof(reply));
848 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
854 *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
856 *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
857 ICM_TR_INFO_BOOT_ACL_SHIFT;
859 *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
864 static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
866 struct icm_tr_pkg_approve_device request;
867 struct icm_tr_pkg_approve_device reply;
870 memset(&request, 0, sizeof(request));
871 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
872 request.hdr.code = ICM_APPROVE_DEVICE;
873 request.route_lo = sw->config.route_lo;
874 request.route_hi = sw->config.route_hi;
875 request.connection_id = sw->connection_id;
877 memset(&reply, 0, sizeof(reply));
878 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
879 1, ICM_APPROVE_TIMEOUT);
883 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
884 tb_warn(tb, "PCIe tunnel creation failed\n");
891 static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
893 struct icm_tr_pkg_add_device_key_response reply;
894 struct icm_tr_pkg_add_device_key request;
897 memset(&request, 0, sizeof(request));
898 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
899 request.hdr.code = ICM_ADD_DEVICE_KEY;
900 request.route_lo = sw->config.route_lo;
901 request.route_hi = sw->config.route_hi;
902 request.connection_id = sw->connection_id;
903 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
905 memset(&reply, 0, sizeof(reply));
906 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
911 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
912 tb_warn(tb, "Adding key to switch failed\n");
919 static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
920 const u8 *challenge, u8 *response)
922 struct icm_tr_pkg_challenge_device_response reply;
923 struct icm_tr_pkg_challenge_device request;
926 memset(&request, 0, sizeof(request));
927 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
928 request.hdr.code = ICM_CHALLENGE_DEVICE;
929 request.route_lo = sw->config.route_lo;
930 request.route_hi = sw->config.route_hi;
931 request.connection_id = sw->connection_id;
932 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
934 memset(&reply, 0, sizeof(reply));
935 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
940 if (reply.hdr.flags & ICM_FLAGS_ERROR)
941 return -EKEYREJECTED;
942 if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
945 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
950 static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
952 struct icm_tr_pkg_approve_xdomain_response reply;
953 struct icm_tr_pkg_approve_xdomain request;
956 memset(&request, 0, sizeof(request));
957 request.hdr.code = ICM_APPROVE_XDOMAIN;
958 request.route_hi = upper_32_bits(xd->route);
959 request.route_lo = lower_32_bits(xd->route);
960 request.transmit_path = xd->transmit_path;
961 request.transmit_ring = xd->transmit_ring;
962 request.receive_path = xd->receive_path;
963 request.receive_ring = xd->receive_ring;
964 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
966 memset(&reply, 0, sizeof(reply));
967 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
972 if (reply.hdr.flags & ICM_FLAGS_ERROR)
978 static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
981 struct icm_tr_pkg_disconnect_xdomain_response reply;
982 struct icm_tr_pkg_disconnect_xdomain request;
985 memset(&request, 0, sizeof(request));
986 request.hdr.code = ICM_DISCONNECT_XDOMAIN;
987 request.stage = stage;
988 request.route_hi = upper_32_bits(xd->route);
989 request.route_lo = lower_32_bits(xd->route);
990 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
992 memset(&reply, 0, sizeof(reply));
993 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
998 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1004 static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
1008 ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1012 usleep_range(10, 50);
1013 return icm_tr_xdomain_tear_down(tb, xd, 2);
1017 icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1019 const struct icm_tr_event_device_connected *pkg =
1020 (const struct icm_tr_event_device_connected *)hdr;
1021 enum tb_security_level security_level;
1022 struct tb_switch *sw, *parent_sw;
1023 struct tb_xdomain *xd;
1024 bool authorized, boot;
1028 * Currently we don't use the QoS information coming with the
1029 * device connected message so simply just ignore that extra
1032 if (pkg->hdr.packet_id)
1035 route = get_route(pkg->route_hi, pkg->route_lo);
1036 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1037 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1038 ICM_FLAGS_SLEVEL_SHIFT;
1039 boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1041 if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1042 tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1047 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1049 /* Update the switch if it is still in the same place */
1050 if (tb_route(sw) == route && !!sw->authorized == authorized) {
1051 parent_sw = tb_to_switch(sw->dev.parent);
1052 update_switch(parent_sw, sw, route, pkg->connection_id,
1062 /* Another switch with the same address */
1063 sw = tb_switch_find_by_route(tb, route);
1069 /* XDomain connection with the same address */
1070 xd = tb_xdomain_find_by_route(tb, route);
1076 parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1078 tb_err(tb, "failed to find parent switch for %llx\n", route);
1082 add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
1083 sizeof(pkg->ep_name), pkg->connection_id,
1084 0, 0, 0, security_level, authorized, boot);
1086 tb_switch_put(parent_sw);
1090 icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1092 const struct icm_tr_event_device_disconnected *pkg =
1093 (const struct icm_tr_event_device_disconnected *)hdr;
1094 struct tb_switch *sw;
1097 route = get_route(pkg->route_hi, pkg->route_lo);
1099 sw = tb_switch_find_by_route(tb, route);
1101 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1110 icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1112 const struct icm_tr_event_xdomain_connected *pkg =
1113 (const struct icm_tr_event_xdomain_connected *)hdr;
1114 struct tb_xdomain *xd;
1115 struct tb_switch *sw;
1118 if (!tb->root_switch)
1121 route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1123 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1125 if (xd->route == route) {
1126 update_xdomain(xd, route, 0);
1135 /* An existing xdomain with the same address */
1136 xd = tb_xdomain_find_by_route(tb, route);
1143 * If the user disconnected a switch during suspend and
1144 * connected another host to the same port, remove the switch
1147 sw = tb_switch_find_by_route(tb, route);
1153 sw = tb_switch_find_by_route(tb, get_parent_route(route));
1155 tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1159 add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1164 icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1166 const struct icm_tr_event_xdomain_disconnected *pkg =
1167 (const struct icm_tr_event_xdomain_disconnected *)hdr;
1168 struct tb_xdomain *xd;
1171 route = get_route(pkg->route_hi, pkg->route_lo);
1173 xd = tb_xdomain_find_by_route(tb, route);
1180 static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1182 struct pci_dev *parent;
1184 parent = pci_upstream_bridge(pdev);
1186 if (!pci_is_pcie(parent))
1188 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1190 parent = pci_upstream_bridge(parent);
1196 switch (parent->device) {
1197 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1198 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1199 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1200 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1201 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1202 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1203 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1210 static bool icm_ar_is_supported(struct tb *tb)
1212 struct pci_dev *upstream_port;
1213 struct icm *icm = tb_priv(tb);
1216 * Starting from Alpine Ridge we can use ICM on Apple machines
1217 * as well. We just need to reset and re-enable it first.
1219 if (!x86_apple_machine)
1223 * Find the upstream PCIe port in case we need to do reset
1224 * through its vendor specific registers.
1226 upstream_port = get_upstream_port(tb->nhi->pdev);
1227 if (upstream_port) {
1230 cap = pci_find_ext_capability(upstream_port,
1231 PCI_EXT_CAP_ID_VNDR);
1233 icm->upstream_port = upstream_port;
1243 static int icm_ar_get_mode(struct tb *tb)
1245 struct tb_nhi *nhi = tb->nhi;
1250 val = ioread32(nhi->iobase + REG_FW_STS);
1251 if (val & REG_FW_STS_NVM_AUTH_DONE)
1254 } while (--retries);
1257 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1261 return nhi_mailbox_mode(nhi);
1265 icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1266 size_t *nboot_acl, bool *rpm)
1268 struct icm_ar_pkg_driver_ready_response reply;
1269 struct icm_pkg_driver_ready request = {
1270 .hdr.code = ICM_DRIVER_READY,
1274 memset(&reply, 0, sizeof(reply));
1275 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1281 *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1282 if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1283 *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1284 ICM_AR_INFO_BOOT_ACL_SHIFT;
1286 *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1291 static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1293 struct icm_ar_pkg_get_route_response reply;
1294 struct icm_ar_pkg_get_route request = {
1295 .hdr = { .code = ICM_GET_ROUTE },
1296 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1300 memset(&reply, 0, sizeof(reply));
1301 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1306 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1309 *route = get_route(reply.route_hi, reply.route_lo);
1313 static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1315 struct icm_ar_pkg_preboot_acl_response reply;
1316 struct icm_ar_pkg_preboot_acl request = {
1317 .hdr = { .code = ICM_PREBOOT_ACL },
1321 memset(&reply, 0, sizeof(reply));
1322 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1327 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1330 for (i = 0; i < nuuids; i++) {
1331 u32 *uuid = (u32 *)&uuids[i];
1333 uuid[0] = reply.acl[i].uuid_lo;
1334 uuid[1] = reply.acl[i].uuid_hi;
1336 if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1337 /* Map empty entries to null UUID */
1340 } else if (uuid[0] != 0 || uuid[1] != 0) {
1341 /* Upper two DWs are always one's */
1342 uuid[2] = 0xffffffff;
1343 uuid[3] = 0xffffffff;
1350 static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1353 struct icm_ar_pkg_preboot_acl_response reply;
1354 struct icm_ar_pkg_preboot_acl request = {
1356 .code = ICM_PREBOOT_ACL,
1357 .flags = ICM_FLAGS_WRITE,
1362 for (i = 0; i < nuuids; i++) {
1363 const u32 *uuid = (const u32 *)&uuids[i];
1365 if (uuid_is_null(&uuids[i])) {
1367 * Map null UUID to the empty (all one) entries
1370 request.acl[i].uuid_lo = 0xffffffff;
1371 request.acl[i].uuid_hi = 0xffffffff;
1373 /* Two high DWs need to be set to all one */
1374 if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1377 request.acl[i].uuid_lo = uuid[0];
1378 request.acl[i].uuid_hi = uuid[1];
1382 memset(&reply, 0, sizeof(reply));
1383 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1388 if (reply.hdr.flags & ICM_FLAGS_ERROR)
1394 static void icm_handle_notification(struct work_struct *work)
1396 struct icm_notification *n = container_of(work, typeof(*n), work);
1397 struct tb *tb = n->tb;
1398 struct icm *icm = tb_priv(tb);
1400 mutex_lock(&tb->lock);
1403 * When the domain is stopped we flush its workqueue but before
1404 * that the root switch is removed. In that case we should treat
1405 * the queued events as being canceled.
1407 if (tb->root_switch) {
1408 switch (n->pkg->code) {
1409 case ICM_EVENT_DEVICE_CONNECTED:
1410 icm->device_connected(tb, n->pkg);
1412 case ICM_EVENT_DEVICE_DISCONNECTED:
1413 icm->device_disconnected(tb, n->pkg);
1415 case ICM_EVENT_XDOMAIN_CONNECTED:
1416 icm->xdomain_connected(tb, n->pkg);
1418 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1419 icm->xdomain_disconnected(tb, n->pkg);
1424 mutex_unlock(&tb->lock);
1430 static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1431 const void *buf, size_t size)
1433 struct icm_notification *n;
1435 n = kmalloc(sizeof(*n), GFP_KERNEL);
1439 INIT_WORK(&n->work, icm_handle_notification);
1440 n->pkg = kmemdup(buf, size, GFP_KERNEL);
1443 queue_work(tb->wq, &n->work);
1447 __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1448 size_t *nboot_acl, bool *rpm)
1450 struct icm *icm = tb_priv(tb);
1451 unsigned int retries = 50;
1454 ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
1456 tb_err(tb, "failed to send driver ready to ICM\n");
1461 * Hold on here until the switch config space is accessible so
1462 * that we can read root switch config successfully.
1465 struct tb_cfg_result res;
1468 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1474 } while (--retries);
1476 tb_err(tb, "failed to read root switch config space, giving up\n");
1480 static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
1482 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
1486 pci_read_config_dword(icm->upstream_port,
1487 icm->vnd_cap + PCIE2CIO_CMD, &cmd);
1488 if (!(cmd & PCIE2CIO_CMD_START)) {
1489 if (cmd & PCIE2CIO_CMD_TIMEOUT)
1495 } while (time_before(jiffies, end));
1500 static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
1501 unsigned int port, unsigned int index, u32 *data)
1503 struct pci_dev *pdev = icm->upstream_port;
1504 int ret, vnd_cap = icm->vnd_cap;
1508 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1509 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1510 cmd |= PCIE2CIO_CMD_START;
1511 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1513 ret = pci2cio_wait_completion(icm, 5000);
1517 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
1521 static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
1522 unsigned int port, unsigned int index, u32 data)
1524 struct pci_dev *pdev = icm->upstream_port;
1525 int vnd_cap = icm->vnd_cap;
1528 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
1531 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1532 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1533 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
1534 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1536 return pci2cio_wait_completion(icm, 5000);
1539 static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1541 struct icm *icm = tb_priv(tb);
1544 if (!icm->upstream_port)
1547 /* Put ARC to wait for CIO reset event to happen */
1548 val = ioread32(nhi->iobase + REG_FW_STS);
1549 val |= REG_FW_STS_CIO_RESET_REQ;
1550 iowrite32(val, nhi->iobase + REG_FW_STS);
1553 val = ioread32(nhi->iobase + REG_FW_STS);
1554 val |= REG_FW_STS_ICM_EN_INVERT;
1555 val |= REG_FW_STS_ICM_EN_CPU;
1556 iowrite32(val, nhi->iobase + REG_FW_STS);
1558 /* Trigger CIO reset now */
1559 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
1562 static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1564 unsigned int retries = 10;
1568 /* Check if the ICM firmware is already running */
1569 val = ioread32(nhi->iobase + REG_FW_STS);
1570 if (val & REG_FW_STS_ICM_EN)
1573 dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1575 ret = icm_firmware_reset(tb, nhi);
1579 /* Wait until the ICM firmware tells us it is up and running */
1581 /* Check that the ICM firmware is running */
1582 val = ioread32(nhi->iobase + REG_FW_STS);
1583 if (val & REG_FW_STS_NVM_AUTH_DONE)
1587 } while (--retries);
1592 static int icm_reset_phy_port(struct tb *tb, int phy_port)
1594 struct icm *icm = tb_priv(tb);
1600 if (!icm->upstream_port)
1612 * Read link status of both null ports belonging to a single
1615 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1618 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1622 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1623 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1624 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1625 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1627 /* If they are both up we need to reset them now */
1628 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1631 val0 |= PHY_PORT_CS1_LINK_DISABLE;
1632 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1636 val1 |= PHY_PORT_CS1_LINK_DISABLE;
1637 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1641 /* Wait a bit and then re-enable both ports */
1642 usleep_range(10, 100);
1644 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1647 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1651 val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1652 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1656 val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1657 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1660 static int icm_firmware_init(struct tb *tb)
1662 struct icm *icm = tb_priv(tb);
1663 struct tb_nhi *nhi = tb->nhi;
1666 ret = icm_firmware_start(tb, nhi);
1668 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1672 if (icm->get_mode) {
1673 ret = icm->get_mode(tb);
1676 case NHI_FW_SAFE_MODE:
1677 icm->safe_mode = true;
1680 case NHI_FW_CM_MODE:
1681 /* Ask ICM to accept all Thunderbolt devices */
1682 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1689 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1695 * Reset both physical ports if there is anything connected to
1698 ret = icm_reset_phy_port(tb, 0);
1700 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1701 ret = icm_reset_phy_port(tb, 1);
1703 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1708 static int icm_driver_ready(struct tb *tb)
1710 struct icm *icm = tb_priv(tb);
1713 ret = icm_firmware_init(tb);
1717 if (icm->safe_mode) {
1718 tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1719 tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1720 tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1724 ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
1730 * Make sure the number of supported preboot ACL matches what we
1731 * expect or disable the whole feature.
1733 if (tb->nboot_acl > icm->max_boot_acl)
1739 static int icm_suspend(struct tb *tb)
1741 struct icm *icm = tb_priv(tb);
1743 if (icm->save_devices)
1744 icm->save_devices(tb);
1746 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1751 * Mark all switches (except root switch) below this one unplugged. ICM
1752 * firmware will send us an updated list of switches after we have send
1753 * it driver ready command. If a switch is not in that list it will be
1754 * removed when we perform rescan.
1756 static void icm_unplug_children(struct tb_switch *sw)
1761 sw->is_unplugged = true;
1763 for (i = 1; i <= sw->config.max_port_number; i++) {
1764 struct tb_port *port = &sw->ports[i];
1767 port->xdomain->is_unplugged = true;
1768 else if (tb_port_has_remote(port))
1769 icm_unplug_children(port->remote->sw);
1773 static void icm_free_unplugged_children(struct tb_switch *sw)
1777 for (i = 1; i <= sw->config.max_port_number; i++) {
1778 struct tb_port *port = &sw->ports[i];
1780 if (port->xdomain && port->xdomain->is_unplugged) {
1781 tb_xdomain_remove(port->xdomain);
1782 port->xdomain = NULL;
1783 } else if (tb_port_has_remote(port)) {
1784 if (port->remote->sw->is_unplugged) {
1785 tb_switch_remove(port->remote->sw);
1786 port->remote = NULL;
1788 icm_free_unplugged_children(port->remote->sw);
1794 static void icm_rescan_work(struct work_struct *work)
1796 struct icm *icm = container_of(work, struct icm, rescan_work.work);
1797 struct tb *tb = icm_to_tb(icm);
1799 mutex_lock(&tb->lock);
1800 if (tb->root_switch)
1801 icm_free_unplugged_children(tb->root_switch);
1802 mutex_unlock(&tb->lock);
1805 static void icm_complete(struct tb *tb)
1807 struct icm *icm = tb_priv(tb);
1809 if (tb->nhi->going_away)
1812 icm_unplug_children(tb->root_switch);
1815 * Now all existing children should be resumed, start events
1816 * from ICM to get updated status.
1818 __icm_driver_ready(tb, NULL, NULL, NULL);
1821 * We do not get notifications of devices that have been
1822 * unplugged during suspend so schedule rescan to clean them up
1825 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1828 static int icm_runtime_suspend(struct tb *tb)
1830 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1834 static int icm_runtime_resume(struct tb *tb)
1837 * We can reuse the same resume functionality than with system
1844 static int icm_start(struct tb *tb)
1846 struct icm *icm = tb_priv(tb);
1850 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1852 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1853 if (IS_ERR(tb->root_switch))
1854 return PTR_ERR(tb->root_switch);
1857 * NVM upgrade has not been tested on Apple systems and they
1858 * don't provide images publicly either. To be on the safe side
1859 * prevent root switch NVM upgrade on Macs for now.
1861 tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1862 tb->root_switch->rpm = icm->rpm;
1864 ret = tb_switch_add(tb->root_switch);
1866 tb_switch_put(tb->root_switch);
1867 tb->root_switch = NULL;
1873 static void icm_stop(struct tb *tb)
1875 struct icm *icm = tb_priv(tb);
1877 cancel_delayed_work(&icm->rescan_work);
1878 tb_switch_remove(tb->root_switch);
1879 tb->root_switch = NULL;
1880 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1883 static int icm_disconnect_pcie_paths(struct tb *tb)
1885 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1889 static const struct tb_cm_ops icm_fr_ops = {
1890 .driver_ready = icm_driver_ready,
1893 .suspend = icm_suspend,
1894 .complete = icm_complete,
1895 .handle_event = icm_handle_event,
1896 .approve_switch = icm_fr_approve_switch,
1897 .add_switch_key = icm_fr_add_switch_key,
1898 .challenge_switch_key = icm_fr_challenge_switch_key,
1899 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1900 .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1901 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1905 static const struct tb_cm_ops icm_ar_ops = {
1906 .driver_ready = icm_driver_ready,
1909 .suspend = icm_suspend,
1910 .complete = icm_complete,
1911 .runtime_suspend = icm_runtime_suspend,
1912 .runtime_resume = icm_runtime_resume,
1913 .handle_event = icm_handle_event,
1914 .get_boot_acl = icm_ar_get_boot_acl,
1915 .set_boot_acl = icm_ar_set_boot_acl,
1916 .approve_switch = icm_fr_approve_switch,
1917 .add_switch_key = icm_fr_add_switch_key,
1918 .challenge_switch_key = icm_fr_challenge_switch_key,
1919 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1920 .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1921 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1925 static const struct tb_cm_ops icm_tr_ops = {
1926 .driver_ready = icm_driver_ready,
1929 .suspend = icm_suspend,
1930 .complete = icm_complete,
1931 .runtime_suspend = icm_runtime_suspend,
1932 .runtime_resume = icm_runtime_resume,
1933 .handle_event = icm_handle_event,
1934 .get_boot_acl = icm_ar_get_boot_acl,
1935 .set_boot_acl = icm_ar_set_boot_acl,
1936 .approve_switch = icm_tr_approve_switch,
1937 .add_switch_key = icm_tr_add_switch_key,
1938 .challenge_switch_key = icm_tr_challenge_switch_key,
1939 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1940 .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
1941 .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
1944 struct tb *icm_probe(struct tb_nhi *nhi)
1949 tb = tb_domain_alloc(nhi, sizeof(struct icm));
1954 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1955 mutex_init(&icm->request_lock);
1957 switch (nhi->pdev->device) {
1958 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1959 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1960 icm->is_supported = icm_fr_is_supported;
1961 icm->get_route = icm_fr_get_route;
1962 icm->save_devices = icm_fr_save_devices;
1963 icm->driver_ready = icm_fr_driver_ready;
1964 icm->device_connected = icm_fr_device_connected;
1965 icm->device_disconnected = icm_fr_device_disconnected;
1966 icm->xdomain_connected = icm_fr_xdomain_connected;
1967 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1968 tb->cm_ops = &icm_fr_ops;
1971 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1972 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1973 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1974 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1975 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1976 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
1977 icm->is_supported = icm_ar_is_supported;
1978 icm->get_mode = icm_ar_get_mode;
1979 icm->get_route = icm_ar_get_route;
1980 icm->save_devices = icm_fr_save_devices;
1981 icm->driver_ready = icm_ar_driver_ready;
1982 icm->device_connected = icm_fr_device_connected;
1983 icm->device_disconnected = icm_fr_device_disconnected;
1984 icm->xdomain_connected = icm_fr_xdomain_connected;
1985 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1986 tb->cm_ops = &icm_ar_ops;
1989 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
1990 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
1991 icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
1992 icm->is_supported = icm_ar_is_supported;
1993 icm->get_mode = icm_ar_get_mode;
1994 icm->driver_ready = icm_tr_driver_ready;
1995 icm->device_connected = icm_tr_device_connected;
1996 icm->device_disconnected = icm_tr_device_disconnected;
1997 icm->xdomain_connected = icm_tr_xdomain_connected;
1998 icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
1999 tb->cm_ops = &icm_tr_ops;
2003 if (!icm->is_supported || !icm->is_supported(tb)) {
2004 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");