1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
19 /* Switch NVM support */
23 struct nvm_auth_status {
24 struct list_head list;
30 WRITE_AND_AUTHENTICATE = 1,
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
44 struct nvm_auth_status *st;
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
56 struct nvm_auth_status *st;
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
62 *status = st ? st->status : 0;
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
67 struct nvm_auth_status *st;
69 if (WARN_ON(!sw->uuid))
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
87 mutex_unlock(&nvm_auth_status_lock);
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
92 struct nvm_auth_status *st;
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
100 mutex_unlock(&nvm_auth_status_lock);
103 static int nvm_validate_and_write(struct tb_switch *sw)
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
130 * Read digital section size and check that it also fits inside
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
137 if (!sw->safe_mode) {
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
157 /* Skip headers in the image */
159 image_size -= hdr_size;
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 sw->nvm->flushed = true;
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
180 if (!sw->safe_mode) {
183 ret = tb_domain_disconnect_all_paths(sw->tb);
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
195 * Any error from update auth operation requires power
196 * cycling of the host router.
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
204 * From safe mode we can get out by just power cycling the
207 dma_port_power_cycle(sw->dma_port);
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
213 int ret, retries = 10;
215 ret = dma_port_flash_update_auth(sw->dma_port);
221 /* Power cycle is required */
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
258 struct pci_dev *root_port;
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
268 pm_runtime_get_noresume(&root_port->dev);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
273 struct pci_dev *root_port;
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
277 pm_runtime_put(&root_port->dev);
280 static inline bool nvm_readable(struct tb_switch *sw)
282 if (tb_switch_is_usb4(sw)) {
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
289 return usb4_switch_nvm_sector_size(sw) > 0;
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
298 if (sw->no_nvm_upgrade)
300 return nvm_readable(sw);
303 static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
311 static int nvm_authenticate(struct tb_switch *sw)
315 if (tb_switch_is_usb4(sw))
316 return usb4_switch_nvm_authenticate(sw);
319 nvm_authenticate_start_dma_port(sw);
320 ret = nvm_authenticate_host_dma_port(sw);
322 ret = nvm_authenticate_device_dma_port(sw);
328 static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
331 struct tb_nvm *nvm = priv;
332 struct tb_switch *sw = tb_to_switch(nvm->dev);
335 pm_runtime_get_sync(&sw->dev);
337 if (!mutex_trylock(&sw->tb->lock)) {
338 ret = restart_syscall();
342 ret = nvm_read(sw, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
346 pm_runtime_mark_last_busy(&sw->dev);
347 pm_runtime_put_autosuspend(&sw->dev);
352 static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
355 struct tb_nvm *nvm = priv;
356 struct tb_switch *sw = tb_to_switch(nvm->dev);
359 if (!mutex_trylock(&sw->tb->lock))
360 return restart_syscall();
363 * Since writing the NVM image might require some special steps,
364 * for example when CSS headers are written, we cache the image
365 * locally here and handle the special cases when the user asks
366 * us to authenticate the image.
368 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369 mutex_unlock(&sw->tb->lock);
374 static int tb_switch_nvm_add(struct tb_switch *sw)
380 if (!nvm_readable(sw))
384 * The NVM format of non-Intel hardware is not known so
385 * currently restrict NVM upgrade for Intel hardware. We may
386 * relax this in the future when we learn other NVM formats.
388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 sw->config.vendor_id != 0x8087) {
391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 sw->config.vendor_id);
396 nvm = tb_nvm_alloc(&sw->dev);
401 * If the switch is in safe-mode the only accessible portion of
402 * the NVM is the non-active one where userspace is expected to
403 * write new functional NVM.
405 if (!sw->safe_mode) {
406 u32 nvm_size, hdr_size;
408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413 nvm_size = (SZ_1M << (val & 7)) / 8;
414 nvm_size = (nvm_size - hdr_size) / 2;
416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
420 nvm->major = val >> 16;
421 nvm->minor = val >> 8;
423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
428 if (!sw->no_nvm_upgrade) {
429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430 tb_switch_nvm_write);
443 static void tb_switch_nvm_remove(struct tb_switch *sw)
453 /* Remove authentication status in case the switch is unplugged */
454 if (!nvm->authenticating)
455 nvm_clear_auth_status(sw);
460 /* port utility functions */
462 static const char *tb_port_type(struct tb_regs_port_header *port)
464 switch (port->type >> 16) {
466 switch ((u8) port->type) {
491 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 port->port_number, port->vendor_id, port->device_id,
496 port->revision, port->thunderbolt_version, tb_port_type(port),
498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
499 port->max_in_hop_id, port->max_out_hop_id);
500 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
505 * tb_port_state() - get connectedness state of a port
506 * @port: the port to check
508 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
510 * Return: Returns an enum tb_port_state on success or an error code on failure.
512 int tb_port_state(struct tb_port *port)
514 struct tb_cap_phy phy;
516 if (port->cap_phy == 0) {
517 tb_port_WARN(port, "does not have a PHY\n");
520 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
527 * tb_wait_for_port() - wait for a port to become ready
528 * @port: Port to wait
529 * @wait_if_unplugged: Wait also when port is unplugged
531 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
532 * wait_if_unplugged is set then we also wait if the port is in state
533 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
534 * switch resume). Otherwise we only wait if a device is registered but the link
535 * has not yet been established.
537 * Return: Returns an error code on failure. Returns 0 if the port is not
538 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
539 * if the port is connected and in state TB_PORT_UP.
541 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
545 if (!port->cap_phy) {
546 tb_port_WARN(port, "does not have PHY\n");
549 if (tb_is_upstream_port(port)) {
550 tb_port_WARN(port, "is the upstream port\n");
555 state = tb_port_state(port);
558 if (state == TB_PORT_DISABLED) {
559 tb_port_dbg(port, "is disabled (state: 0)\n");
562 if (state == TB_PORT_UNPLUGGED) {
563 if (wait_if_unplugged) {
564 /* used during resume */
566 "is unplugged (state: 7), retrying...\n");
570 tb_port_dbg(port, "is unplugged (state: 7)\n");
573 if (state == TB_PORT_UP) {
574 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
579 * After plug-in the state is TB_PORT_CONNECTING. Give it some
583 "is connected, link is not up (state: %d), retrying...\n",
588 "failed to reach state TB_PORT_UP. Ignoring port...\n");
593 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
594 * @port: Port to add/remove NFC credits
595 * @credits: Credits to add/remove
597 * Change the number of NFC credits allocated to @port by @credits. To remove
598 * NFC credits pass a negative amount of credits.
600 * Return: Returns 0 on success or an error code on failure.
602 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
606 if (credits == 0 || port->sw->is_unplugged)
610 * USB4 restricts programming NFC buffers to lane adapters only
611 * so skip other ports.
613 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
616 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
617 nfc_credits += credits;
619 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
620 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
622 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
623 port->config.nfc_credits |= nfc_credits;
625 return tb_port_write(port, &port->config.nfc_credits,
626 TB_CFG_PORT, ADP_CS_4, 1);
630 * tb_port_set_initial_credits() - Set initial port link credits allocated
631 * @port: Port to set the initial credits
632 * @credits: Number of credits to to allocate
634 * Set initial credits value to be used for ingress shared buffering.
636 int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
641 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
645 data &= ~ADP_CS_5_LCA_MASK;
646 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
648 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
652 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
653 * @port: Port whose counters to clear
654 * @counter: Counter index to clear
656 * Return: Returns 0 on success or an error code on failure.
658 int tb_port_clear_counter(struct tb_port *port, int counter)
660 u32 zero[3] = { 0, 0, 0 };
661 tb_port_dbg(port, "clearing counter %d\n", counter);
662 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
666 * tb_port_unlock() - Unlock downstream port
667 * @port: Port to unlock
669 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
670 * downstream router accessible for CM.
672 int tb_port_unlock(struct tb_port *port)
674 if (tb_switch_is_icm(port->sw))
676 if (!tb_port_is_null(port))
678 if (tb_switch_is_usb4(port->sw))
679 return usb4_port_unlock(port);
683 static int __tb_port_enable(struct tb_port *port, bool enable)
688 if (!tb_port_is_null(port))
691 ret = tb_port_read(port, &phy, TB_CFG_PORT,
692 port->cap_phy + LANE_ADP_CS_1, 1);
697 phy &= ~LANE_ADP_CS_1_LD;
699 phy |= LANE_ADP_CS_1_LD;
701 return tb_port_write(port, &phy, TB_CFG_PORT,
702 port->cap_phy + LANE_ADP_CS_1, 1);
706 * tb_port_enable() - Enable lane adapter
707 * @port: Port to enable (can be %NULL)
709 * This is used for lane 0 and 1 adapters to enable it.
711 int tb_port_enable(struct tb_port *port)
713 return __tb_port_enable(port, true);
717 * tb_port_disable() - Disable lane adapter
718 * @port: Port to disable (can be %NULL)
720 * This is used for lane 0 and 1 adapters to disable it.
722 int tb_port_disable(struct tb_port *port)
724 return __tb_port_enable(port, false);
728 * tb_init_port() - initialize a port
730 * This is a helper method for tb_switch_alloc. Does not check or initialize
731 * any downstream switches.
733 * Return: Returns 0 on success or an error code on failure.
735 static int tb_init_port(struct tb_port *port)
740 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
742 if (res == -ENODEV) {
743 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
745 port->disabled = true;
751 /* Port 0 is the switch itself and has no PHY. */
752 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
753 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
758 tb_port_WARN(port, "non switch port without a PHY\n");
760 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
762 port->cap_usb4 = cap;
763 } else if (port->port != 0) {
764 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
766 port->cap_adap = cap;
769 tb_dump_port(port->sw->tb, &port->config);
771 /* Control port does not need HopID allocation */
773 ida_init(&port->in_hopids);
774 ida_init(&port->out_hopids);
777 INIT_LIST_HEAD(&port->list);
782 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
789 port_max_hopid = port->config.max_in_hop_id;
790 ida = &port->in_hopids;
792 port_max_hopid = port->config.max_out_hop_id;
793 ida = &port->out_hopids;
797 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
800 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
801 min_hopid = TB_PATH_MIN_HOPID;
803 if (max_hopid < 0 || max_hopid > port_max_hopid)
804 max_hopid = port_max_hopid;
806 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
810 * tb_port_alloc_in_hopid() - Allocate input HopID from port
811 * @port: Port to allocate HopID for
812 * @min_hopid: Minimum acceptable input HopID
813 * @max_hopid: Maximum acceptable input HopID
815 * Return: HopID between @min_hopid and @max_hopid or negative errno in
818 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
820 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
824 * tb_port_alloc_out_hopid() - Allocate output HopID from port
825 * @port: Port to allocate HopID for
826 * @min_hopid: Minimum acceptable output HopID
827 * @max_hopid: Maximum acceptable output HopID
829 * Return: HopID between @min_hopid and @max_hopid or negative errno in
832 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
834 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
838 * tb_port_release_in_hopid() - Release allocated input HopID from port
839 * @port: Port whose HopID to release
840 * @hopid: HopID to release
842 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
844 ida_simple_remove(&port->in_hopids, hopid);
848 * tb_port_release_out_hopid() - Release allocated output HopID from port
849 * @port: Port whose HopID to release
850 * @hopid: HopID to release
852 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
854 ida_simple_remove(&port->out_hopids, hopid);
857 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
858 const struct tb_switch *sw)
860 u64 mask = (1ULL << parent->config.depth * 8) - 1;
861 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
865 * tb_next_port_on_path() - Return next port for given port on a path
866 * @start: Start port of the walk
867 * @end: End port of the walk
868 * @prev: Previous port (%NULL if this is the first)
870 * This function can be used to walk from one port to another if they
871 * are connected through zero or more switches. If the @prev is dual
872 * link port, the function follows that link and returns another end on
875 * If the @end port has been reached, return %NULL.
877 * Domain tb->lock must be held when this function is called.
879 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
880 struct tb_port *prev)
882 struct tb_port *next;
887 if (prev->sw == end->sw) {
893 if (tb_switch_is_reachable(prev->sw, end->sw)) {
894 next = tb_port_at(tb_route(end->sw), prev->sw);
895 /* Walk down the topology if next == prev */
897 (next == prev || next->dual_link_port == prev))
900 if (tb_is_upstream_port(prev)) {
903 next = tb_upstream_port(prev->sw);
905 * Keep the same link if prev and next are both
908 if (next->dual_link_port &&
909 next->link_nr != prev->link_nr) {
910 next = next->dual_link_port;
915 return next != prev ? next : NULL;
919 * tb_port_get_link_speed() - Get current link speed
920 * @port: Port to check (USB4 or CIO)
922 * Returns link speed in Gb/s or negative errno in case of failure.
924 int tb_port_get_link_speed(struct tb_port *port)
932 ret = tb_port_read(port, &val, TB_CFG_PORT,
933 port->cap_phy + LANE_ADP_CS_1, 1);
937 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
938 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
939 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
943 * tb_port_get_link_width() - Get current link width
944 * @port: Port to check (USB4 or CIO)
946 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
947 * or negative errno in case of failure.
949 int tb_port_get_link_width(struct tb_port *port)
957 ret = tb_port_read(port, &val, TB_CFG_PORT,
958 port->cap_phy + LANE_ADP_CS_1, 1);
962 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
963 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
966 static bool tb_port_is_width_supported(struct tb_port *port, int width)
974 ret = tb_port_read(port, &phy, TB_CFG_PORT,
975 port->cap_phy + LANE_ADP_CS_0, 1);
979 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
980 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
982 return !!(widths & width);
985 static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
993 ret = tb_port_read(port, &val, TB_CFG_PORT,
994 port->cap_phy + LANE_ADP_CS_1, 1);
998 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1001 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1002 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1005 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1006 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1012 val |= LANE_ADP_CS_1_LB;
1014 return tb_port_write(port, &val, TB_CFG_PORT,
1015 port->cap_phy + LANE_ADP_CS_1, 1);
1019 * tb_port_lane_bonding_enable() - Enable bonding on port
1020 * @port: port to enable
1022 * Enable bonding by setting the link width of the port and the
1023 * other port in case of dual link port.
1025 * Return: %0 in case of success and negative errno in case of error
1027 int tb_port_lane_bonding_enable(struct tb_port *port)
1032 * Enable lane bonding for both links if not already enabled by
1033 * for example the boot firmware.
1035 ret = tb_port_get_link_width(port);
1037 ret = tb_port_set_link_width(port, 2);
1042 ret = tb_port_get_link_width(port->dual_link_port);
1044 ret = tb_port_set_link_width(port->dual_link_port, 2);
1046 tb_port_set_link_width(port, 1);
1051 port->bonded = true;
1052 port->dual_link_port->bonded = true;
1058 * tb_port_lane_bonding_disable() - Disable bonding on port
1059 * @port: port to disable
1061 * Disable bonding by setting the link width of the port and the
1062 * other port in case of dual link port.
1065 void tb_port_lane_bonding_disable(struct tb_port *port)
1067 port->dual_link_port->bonded = false;
1068 port->bonded = false;
1070 tb_port_set_link_width(port->dual_link_port, 1);
1071 tb_port_set_link_width(port, 1);
1074 static int tb_port_start_lane_initialization(struct tb_port *port)
1078 if (tb_switch_is_usb4(port->sw))
1081 ret = tb_lc_start_lane_initialization(port);
1082 return ret == -EINVAL ? 0 : ret;
1086 * tb_port_is_enabled() - Is the adapter port enabled
1087 * @port: Port to check
1089 bool tb_port_is_enabled(struct tb_port *port)
1091 switch (port->config.type) {
1092 case TB_TYPE_PCIE_UP:
1093 case TB_TYPE_PCIE_DOWN:
1094 return tb_pci_port_is_enabled(port);
1096 case TB_TYPE_DP_HDMI_IN:
1097 case TB_TYPE_DP_HDMI_OUT:
1098 return tb_dp_port_is_enabled(port);
1100 case TB_TYPE_USB3_UP:
1101 case TB_TYPE_USB3_DOWN:
1102 return tb_usb3_port_is_enabled(port);
1110 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1111 * @port: USB3 adapter port to check
1113 bool tb_usb3_port_is_enabled(struct tb_port *port)
1117 if (tb_port_read(port, &data, TB_CFG_PORT,
1118 port->cap_adap + ADP_USB3_CS_0, 1))
1121 return !!(data & ADP_USB3_CS_0_PE);
1125 * tb_usb3_port_enable() - Enable USB3 adapter port
1126 * @port: USB3 adapter port to enable
1127 * @enable: Enable/disable the USB3 adapter
1129 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1131 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1134 if (!port->cap_adap)
1136 return tb_port_write(port, &word, TB_CFG_PORT,
1137 port->cap_adap + ADP_USB3_CS_0, 1);
1141 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1142 * @port: PCIe port to check
1144 bool tb_pci_port_is_enabled(struct tb_port *port)
1148 if (tb_port_read(port, &data, TB_CFG_PORT,
1149 port->cap_adap + ADP_PCIE_CS_0, 1))
1152 return !!(data & ADP_PCIE_CS_0_PE);
1156 * tb_pci_port_enable() - Enable PCIe adapter port
1157 * @port: PCIe port to enable
1158 * @enable: Enable/disable the PCIe adapter
1160 int tb_pci_port_enable(struct tb_port *port, bool enable)
1162 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1163 if (!port->cap_adap)
1165 return tb_port_write(port, &word, TB_CFG_PORT,
1166 port->cap_adap + ADP_PCIE_CS_0, 1);
1170 * tb_dp_port_hpd_is_active() - Is HPD already active
1171 * @port: DP out port to check
1173 * Checks if the DP OUT adapter port has HDP bit already set.
1175 int tb_dp_port_hpd_is_active(struct tb_port *port)
1180 ret = tb_port_read(port, &data, TB_CFG_PORT,
1181 port->cap_adap + ADP_DP_CS_2, 1);
1185 return !!(data & ADP_DP_CS_2_HDP);
1189 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1190 * @port: Port to clear HPD
1192 * If the DP IN port has HDP set, this function can be used to clear it.
1194 int tb_dp_port_hpd_clear(struct tb_port *port)
1199 ret = tb_port_read(port, &data, TB_CFG_PORT,
1200 port->cap_adap + ADP_DP_CS_3, 1);
1204 data |= ADP_DP_CS_3_HDPC;
1205 return tb_port_write(port, &data, TB_CFG_PORT,
1206 port->cap_adap + ADP_DP_CS_3, 1);
1210 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1211 * @port: DP IN/OUT port to set hops
1212 * @video: Video Hop ID
1213 * @aux_tx: AUX TX Hop ID
1214 * @aux_rx: AUX RX Hop ID
1216 * Programs specified Hop IDs for DP IN/OUT port.
1218 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1219 unsigned int aux_tx, unsigned int aux_rx)
1224 ret = tb_port_read(port, data, TB_CFG_PORT,
1225 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1229 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1230 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1231 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1233 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1234 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1235 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1236 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1237 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1239 return tb_port_write(port, data, TB_CFG_PORT,
1240 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1244 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1245 * @port: DP adapter port to check
1247 bool tb_dp_port_is_enabled(struct tb_port *port)
1251 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1255 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1259 * tb_dp_port_enable() - Enables/disables DP paths of a port
1260 * @port: DP IN/OUT port
1261 * @enable: Enable/disable DP path
1263 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1264 * calling this function.
1266 int tb_dp_port_enable(struct tb_port *port, bool enable)
1271 ret = tb_port_read(port, data, TB_CFG_PORT,
1272 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1277 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1279 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1281 return tb_port_write(port, data, TB_CFG_PORT,
1282 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1285 /* switch utility functions */
1287 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1289 switch (sw->generation) {
1291 return "Thunderbolt 1";
1293 return "Thunderbolt 2";
1295 return "Thunderbolt 3";
1303 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1305 const struct tb_regs_switch_header *regs = &sw->config;
1307 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1308 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1309 regs->revision, regs->thunderbolt_version);
1310 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1311 tb_dbg(tb, " Config:\n");
1313 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1314 regs->upstream_port_number, regs->depth,
1315 (((u64) regs->route_hi) << 32) | regs->route_lo,
1316 regs->enabled, regs->plug_events_delay);
1317 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1318 regs->__unknown1, regs->__unknown4);
1322 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1323 * @sw: Switch to reset
1325 * Return: Returns 0 on success or an error code on failure.
1327 int tb_switch_reset(struct tb_switch *sw)
1329 struct tb_cfg_result res;
1331 if (sw->generation > 1)
1334 tb_sw_dbg(sw, "resetting switch\n");
1336 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1337 TB_CFG_SWITCH, 2, 2);
1340 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
1347 * tb_plug_events_active() - enable/disable plug events on a switch
1349 * Also configures a sane plug_events_delay of 255ms.
1351 * Return: Returns 0 on success or an error code on failure.
1353 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1358 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1361 sw->config.plug_events_delay = 0xff;
1362 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1366 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1371 data = data & 0xFFFFFF83;
1372 switch (sw->config.device_id) {
1373 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1374 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1375 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1383 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1384 sw->cap_plug_events + 1, 1);
1387 static ssize_t authorized_show(struct device *dev,
1388 struct device_attribute *attr,
1391 struct tb_switch *sw = tb_to_switch(dev);
1393 return sprintf(buf, "%u\n", sw->authorized);
1396 static int disapprove_switch(struct device *dev, void *not_used)
1398 struct tb_switch *sw;
1400 sw = tb_to_switch(dev);
1401 if (sw && sw->authorized) {
1404 /* First children */
1405 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1409 ret = tb_domain_disapprove_switch(sw->tb, sw);
1414 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1420 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1424 if (!mutex_trylock(&sw->tb->lock))
1425 return restart_syscall();
1427 if (!!sw->authorized == !!val)
1431 /* Disapprove switch */
1434 ret = disapprove_switch(&sw->dev, NULL);
1439 /* Approve switch */
1442 ret = tb_domain_approve_switch_key(sw->tb, sw);
1444 ret = tb_domain_approve_switch(sw->tb, sw);
1447 /* Challenge switch */
1450 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1458 sw->authorized = val;
1459 /* Notify status change to the userspace */
1460 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1464 mutex_unlock(&sw->tb->lock);
1468 static ssize_t authorized_store(struct device *dev,
1469 struct device_attribute *attr,
1470 const char *buf, size_t count)
1472 struct tb_switch *sw = tb_to_switch(dev);
1476 ret = kstrtouint(buf, 0, &val);
1482 pm_runtime_get_sync(&sw->dev);
1483 ret = tb_switch_set_authorized(sw, val);
1484 pm_runtime_mark_last_busy(&sw->dev);
1485 pm_runtime_put_autosuspend(&sw->dev);
1487 return ret ? ret : count;
1489 static DEVICE_ATTR_RW(authorized);
1491 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1494 struct tb_switch *sw = tb_to_switch(dev);
1496 return sprintf(buf, "%u\n", sw->boot);
1498 static DEVICE_ATTR_RO(boot);
1500 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1503 struct tb_switch *sw = tb_to_switch(dev);
1505 return sprintf(buf, "%#x\n", sw->device);
1507 static DEVICE_ATTR_RO(device);
1510 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1512 struct tb_switch *sw = tb_to_switch(dev);
1514 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1516 static DEVICE_ATTR_RO(device_name);
1519 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1521 struct tb_switch *sw = tb_to_switch(dev);
1523 return sprintf(buf, "%u\n", sw->generation);
1525 static DEVICE_ATTR_RO(generation);
1527 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1530 struct tb_switch *sw = tb_to_switch(dev);
1533 if (!mutex_trylock(&sw->tb->lock))
1534 return restart_syscall();
1537 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1539 ret = sprintf(buf, "\n");
1541 mutex_unlock(&sw->tb->lock);
1545 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1546 const char *buf, size_t count)
1548 struct tb_switch *sw = tb_to_switch(dev);
1549 u8 key[TB_SWITCH_KEY_SIZE];
1550 ssize_t ret = count;
1553 if (!strcmp(buf, "\n"))
1555 else if (hex2bin(key, buf, sizeof(key)))
1558 if (!mutex_trylock(&sw->tb->lock))
1559 return restart_syscall();
1561 if (sw->authorized) {
1568 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1574 mutex_unlock(&sw->tb->lock);
1577 static DEVICE_ATTR(key, 0600, key_show, key_store);
1579 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1582 struct tb_switch *sw = tb_to_switch(dev);
1584 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1588 * Currently all lanes must run at the same speed but we expose here
1589 * both directions to allow possible asymmetric links in the future.
1591 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1592 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1594 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1597 struct tb_switch *sw = tb_to_switch(dev);
1599 return sprintf(buf, "%u\n", sw->link_width);
1603 * Currently link has same amount of lanes both directions (1 or 2) but
1604 * expose them separately to allow possible asymmetric links in the future.
1606 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1607 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1609 static ssize_t nvm_authenticate_show(struct device *dev,
1610 struct device_attribute *attr, char *buf)
1612 struct tb_switch *sw = tb_to_switch(dev);
1615 nvm_get_auth_status(sw, &status);
1616 return sprintf(buf, "%#x\n", status);
1619 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1622 struct tb_switch *sw = tb_to_switch(dev);
1626 pm_runtime_get_sync(&sw->dev);
1628 if (!mutex_trylock(&sw->tb->lock)) {
1629 ret = restart_syscall();
1633 /* If NVMem devices are not yet added */
1639 ret = kstrtoint(buf, 10, &val);
1643 /* Always clear the authentication status */
1644 nvm_clear_auth_status(sw);
1647 if (!sw->nvm->flushed) {
1648 if (!sw->nvm->buf) {
1653 ret = nvm_validate_and_write(sw);
1654 if (ret || val == WRITE_ONLY)
1657 if (val == WRITE_AND_AUTHENTICATE) {
1659 ret = tb_lc_force_power(sw);
1661 sw->nvm->authenticating = true;
1662 ret = nvm_authenticate(sw);
1668 mutex_unlock(&sw->tb->lock);
1670 pm_runtime_mark_last_busy(&sw->dev);
1671 pm_runtime_put_autosuspend(&sw->dev);
1676 static ssize_t nvm_authenticate_store(struct device *dev,
1677 struct device_attribute *attr, const char *buf, size_t count)
1679 int ret = nvm_authenticate_sysfs(dev, buf, false);
1684 static DEVICE_ATTR_RW(nvm_authenticate);
1686 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1687 struct device_attribute *attr, char *buf)
1689 return nvm_authenticate_show(dev, attr, buf);
1692 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1693 struct device_attribute *attr, const char *buf, size_t count)
1697 ret = nvm_authenticate_sysfs(dev, buf, true);
1698 return ret ? ret : count;
1700 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1702 static ssize_t nvm_version_show(struct device *dev,
1703 struct device_attribute *attr, char *buf)
1705 struct tb_switch *sw = tb_to_switch(dev);
1708 if (!mutex_trylock(&sw->tb->lock))
1709 return restart_syscall();
1716 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1718 mutex_unlock(&sw->tb->lock);
1722 static DEVICE_ATTR_RO(nvm_version);
1724 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1727 struct tb_switch *sw = tb_to_switch(dev);
1729 return sprintf(buf, "%#x\n", sw->vendor);
1731 static DEVICE_ATTR_RO(vendor);
1734 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1736 struct tb_switch *sw = tb_to_switch(dev);
1738 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1740 static DEVICE_ATTR_RO(vendor_name);
1742 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1745 struct tb_switch *sw = tb_to_switch(dev);
1747 return sprintf(buf, "%pUb\n", sw->uuid);
1749 static DEVICE_ATTR_RO(unique_id);
1751 static struct attribute *switch_attrs[] = {
1752 &dev_attr_authorized.attr,
1753 &dev_attr_boot.attr,
1754 &dev_attr_device.attr,
1755 &dev_attr_device_name.attr,
1756 &dev_attr_generation.attr,
1758 &dev_attr_nvm_authenticate.attr,
1759 &dev_attr_nvm_authenticate_on_disconnect.attr,
1760 &dev_attr_nvm_version.attr,
1761 &dev_attr_rx_speed.attr,
1762 &dev_attr_rx_lanes.attr,
1763 &dev_attr_tx_speed.attr,
1764 &dev_attr_tx_lanes.attr,
1765 &dev_attr_vendor.attr,
1766 &dev_attr_vendor_name.attr,
1767 &dev_attr_unique_id.attr,
1771 static umode_t switch_attr_is_visible(struct kobject *kobj,
1772 struct attribute *attr, int n)
1774 struct device *dev = kobj_to_dev(kobj);
1775 struct tb_switch *sw = tb_to_switch(dev);
1777 if (attr == &dev_attr_authorized.attr) {
1778 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
1779 sw->tb->security_level == TB_SECURITY_DPONLY)
1781 } else if (attr == &dev_attr_device.attr) {
1784 } else if (attr == &dev_attr_device_name.attr) {
1785 if (!sw->device_name)
1787 } else if (attr == &dev_attr_vendor.attr) {
1790 } else if (attr == &dev_attr_vendor_name.attr) {
1791 if (!sw->vendor_name)
1793 } else if (attr == &dev_attr_key.attr) {
1795 sw->tb->security_level == TB_SECURITY_SECURE &&
1796 sw->security_level == TB_SECURITY_SECURE)
1799 } else if (attr == &dev_attr_rx_speed.attr ||
1800 attr == &dev_attr_rx_lanes.attr ||
1801 attr == &dev_attr_tx_speed.attr ||
1802 attr == &dev_attr_tx_lanes.attr) {
1806 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1807 if (nvm_upgradeable(sw))
1810 } else if (attr == &dev_attr_nvm_version.attr) {
1811 if (nvm_readable(sw))
1814 } else if (attr == &dev_attr_boot.attr) {
1818 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1819 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1824 return sw->safe_mode ? 0 : attr->mode;
1827 static const struct attribute_group switch_group = {
1828 .is_visible = switch_attr_is_visible,
1829 .attrs = switch_attrs,
1832 static const struct attribute_group *switch_groups[] = {
1837 static void tb_switch_release(struct device *dev)
1839 struct tb_switch *sw = tb_to_switch(dev);
1840 struct tb_port *port;
1842 dma_port_free(sw->dma_port);
1844 tb_switch_for_each_port(sw, port) {
1845 if (!port->disabled) {
1846 ida_destroy(&port->in_hopids);
1847 ida_destroy(&port->out_hopids);
1852 kfree(sw->device_name);
1853 kfree(sw->vendor_name);
1861 * Currently only need to provide the callbacks. Everything else is handled
1862 * in the connection manager.
1864 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1866 struct tb_switch *sw = tb_to_switch(dev);
1867 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1869 if (cm_ops->runtime_suspend_switch)
1870 return cm_ops->runtime_suspend_switch(sw);
1875 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1877 struct tb_switch *sw = tb_to_switch(dev);
1878 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1880 if (cm_ops->runtime_resume_switch)
1881 return cm_ops->runtime_resume_switch(sw);
1885 static const struct dev_pm_ops tb_switch_pm_ops = {
1886 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1890 struct device_type tb_switch_type = {
1891 .name = "thunderbolt_device",
1892 .release = tb_switch_release,
1893 .pm = &tb_switch_pm_ops,
1896 static int tb_switch_get_generation(struct tb_switch *sw)
1898 switch (sw->config.device_id) {
1899 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1900 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1901 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1902 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1903 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1904 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1905 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1906 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1909 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1910 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1911 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1914 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1915 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1916 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1917 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1918 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1919 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1920 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1921 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1922 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1923 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1927 if (tb_switch_is_usb4(sw))
1931 * For unknown switches assume generation to be 1 to be
1934 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1935 sw->config.device_id);
1940 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1944 if (tb_switch_is_usb4(sw) ||
1945 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1946 max_depth = USB4_SWITCH_MAX_DEPTH;
1948 max_depth = TB_SWITCH_MAX_DEPTH;
1950 return depth > max_depth;
1954 * tb_switch_alloc() - allocate a switch
1955 * @tb: Pointer to the owning domain
1956 * @parent: Parent device for this switch
1957 * @route: Route string for this switch
1959 * Allocates and initializes a switch. Will not upload configuration to
1960 * the switch. For that you need to call tb_switch_configure()
1961 * separately. The returned switch should be released by calling
1964 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1967 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1970 struct tb_switch *sw;
1974 /* Unlock the downstream port so we can access the switch below */
1976 struct tb_switch *parent_sw = tb_to_switch(parent);
1977 struct tb_port *down;
1979 down = tb_port_at(route, parent_sw);
1980 tb_port_unlock(down);
1983 depth = tb_route_length(route);
1985 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1986 if (upstream_port < 0)
1987 return ERR_PTR(upstream_port);
1989 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1991 return ERR_PTR(-ENOMEM);
1994 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1996 goto err_free_sw_ports;
1998 sw->generation = tb_switch_get_generation(sw);
2000 tb_dbg(tb, "current switch config:\n");
2001 tb_dump_switch(tb, sw);
2003 /* configure switch */
2004 sw->config.upstream_port_number = upstream_port;
2005 sw->config.depth = depth;
2006 sw->config.route_hi = upper_32_bits(route);
2007 sw->config.route_lo = lower_32_bits(route);
2008 sw->config.enabled = 0;
2010 /* Make sure we do not exceed maximum topology limit */
2011 if (tb_switch_exceeds_max_depth(sw, depth)) {
2012 ret = -EADDRNOTAVAIL;
2013 goto err_free_sw_ports;
2016 /* initialize ports */
2017 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2021 goto err_free_sw_ports;
2024 for (i = 0; i <= sw->config.max_port_number; i++) {
2025 /* minimum setup for tb_find_cap and tb_drom_read to work */
2026 sw->ports[i].sw = sw;
2027 sw->ports[i].port = i;
2030 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2032 sw->cap_plug_events = ret;
2034 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2038 /* Root switch is always authorized */
2040 sw->authorized = true;
2042 device_initialize(&sw->dev);
2043 sw->dev.parent = parent;
2044 sw->dev.bus = &tb_bus_type;
2045 sw->dev.type = &tb_switch_type;
2046 sw->dev.groups = switch_groups;
2047 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2055 return ERR_PTR(ret);
2059 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2060 * @tb: Pointer to the owning domain
2061 * @parent: Parent device for this switch
2062 * @route: Route string for this switch
2064 * This creates a switch in safe mode. This means the switch pretty much
2065 * lacks all capabilities except DMA configuration port before it is
2066 * flashed with a valid NVM firmware.
2068 * The returned switch must be released by calling tb_switch_put().
2070 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2073 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2075 struct tb_switch *sw;
2077 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2079 return ERR_PTR(-ENOMEM);
2082 sw->config.depth = tb_route_length(route);
2083 sw->config.route_hi = upper_32_bits(route);
2084 sw->config.route_lo = lower_32_bits(route);
2085 sw->safe_mode = true;
2087 device_initialize(&sw->dev);
2088 sw->dev.parent = parent;
2089 sw->dev.bus = &tb_bus_type;
2090 sw->dev.type = &tb_switch_type;
2091 sw->dev.groups = switch_groups;
2092 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2098 * tb_switch_configure() - Uploads configuration to the switch
2099 * @sw: Switch to configure
2101 * Call this function before the switch is added to the system. It will
2102 * upload configuration to the switch and makes it available for the
2103 * connection manager to use. Can be called to the switch again after
2104 * resume from low power states to re-initialize it.
2106 * Return: %0 in case of success and negative errno in case of failure
2108 int tb_switch_configure(struct tb_switch *sw)
2110 struct tb *tb = sw->tb;
2114 route = tb_route(sw);
2116 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2117 sw->config.enabled ? "restoring" : "initializing", route,
2118 tb_route_length(route), sw->config.upstream_port_number);
2120 sw->config.enabled = 1;
2122 if (tb_switch_is_usb4(sw)) {
2124 * For USB4 devices, we need to program the CM version
2125 * accordingly so that it knows to expose all the
2126 * additional capabilities.
2128 sw->config.cmuv = USB4_VERSION_1_0;
2130 /* Enumerate the switch */
2131 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2136 ret = usb4_switch_setup(sw);
2138 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2139 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2140 sw->config.vendor_id);
2142 if (!sw->cap_plug_events) {
2143 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2147 /* Enumerate the switch */
2148 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2154 return tb_plug_events_active(sw, true);
2157 static int tb_switch_set_uuid(struct tb_switch *sw)
2166 if (tb_switch_is_usb4(sw)) {
2167 ret = usb4_switch_read_uid(sw, &sw->uid);
2173 * The newer controllers include fused UUID as part of
2174 * link controller specific registers
2176 ret = tb_lc_read_uuid(sw, uuid);
2186 * ICM generates UUID based on UID and fills the upper
2187 * two words with ones. This is not strictly following
2188 * UUID format but we want to be compatible with it so
2189 * we do the same here.
2191 uuid[0] = sw->uid & 0xffffffff;
2192 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2193 uuid[2] = 0xffffffff;
2194 uuid[3] = 0xffffffff;
2197 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2203 static int tb_switch_add_dma_port(struct tb_switch *sw)
2208 switch (sw->generation) {
2210 /* Only root switch can be upgraded */
2217 ret = tb_switch_set_uuid(sw);
2224 * DMA port is the only thing available when the switch
2232 if (sw->no_nvm_upgrade)
2235 if (tb_switch_is_usb4(sw)) {
2236 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2241 tb_sw_info(sw, "switch flash authentication failed\n");
2242 nvm_set_auth_status(sw, status);
2248 /* Root switch DMA port requires running firmware */
2249 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2252 sw->dma_port = dma_port_alloc(sw);
2257 * If there is status already set then authentication failed
2258 * when the dma_port_flash_update_auth() returned. Power cycling
2259 * is not needed (it was done already) so only thing we do here
2260 * is to unblock runtime PM of the root port.
2262 nvm_get_auth_status(sw, &status);
2265 nvm_authenticate_complete_dma_port(sw);
2270 * Check status of the previous flash authentication. If there
2271 * is one we need to power cycle the switch in any case to make
2272 * it functional again.
2274 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2278 /* Now we can allow root port to suspend again */
2280 nvm_authenticate_complete_dma_port(sw);
2283 tb_sw_info(sw, "switch flash authentication failed\n");
2284 nvm_set_auth_status(sw, status);
2287 tb_sw_info(sw, "power cycling the switch now\n");
2288 dma_port_power_cycle(sw->dma_port);
2291 * We return error here which causes the switch adding failure.
2292 * It should appear back after power cycle is complete.
2297 static void tb_switch_default_link_ports(struct tb_switch *sw)
2301 for (i = 1; i <= sw->config.max_port_number; i += 2) {
2302 struct tb_port *port = &sw->ports[i];
2303 struct tb_port *subordinate;
2305 if (!tb_port_is_null(port))
2308 /* Check for the subordinate port */
2309 if (i == sw->config.max_port_number ||
2310 !tb_port_is_null(&sw->ports[i + 1]))
2313 /* Link them if not already done so (by DROM) */
2314 subordinate = &sw->ports[i + 1];
2315 if (!port->dual_link_port && !subordinate->dual_link_port) {
2317 port->dual_link_port = subordinate;
2318 subordinate->link_nr = 1;
2319 subordinate->dual_link_port = port;
2321 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2322 port->port, subordinate->port);
2327 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2329 const struct tb_port *up = tb_upstream_port(sw);
2331 if (!up->dual_link_port || !up->dual_link_port->remote)
2334 if (tb_switch_is_usb4(sw))
2335 return usb4_switch_lane_bonding_possible(sw);
2336 return tb_lc_lane_bonding_possible(sw);
2339 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2342 bool change = false;
2345 if (!tb_route(sw) || tb_switch_is_icm(sw))
2348 up = tb_upstream_port(sw);
2350 ret = tb_port_get_link_speed(up);
2353 if (sw->link_speed != ret)
2355 sw->link_speed = ret;
2357 ret = tb_port_get_link_width(up);
2360 if (sw->link_width != ret)
2362 sw->link_width = ret;
2364 /* Notify userspace that there is possible link attribute change */
2365 if (device_is_registered(&sw->dev) && change)
2366 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2372 * tb_switch_lane_bonding_enable() - Enable lane bonding
2373 * @sw: Switch to enable lane bonding
2375 * Connection manager can call this function to enable lane bonding of a
2376 * switch. If conditions are correct and both switches support the feature,
2377 * lanes are bonded. It is safe to call this to any switch.
2379 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2381 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2382 struct tb_port *up, *down;
2383 u64 route = tb_route(sw);
2389 if (!tb_switch_lane_bonding_possible(sw))
2392 up = tb_upstream_port(sw);
2393 down = tb_port_at(route, parent);
2395 if (!tb_port_is_width_supported(up, 2) ||
2396 !tb_port_is_width_supported(down, 2))
2399 ret = tb_port_lane_bonding_enable(up);
2401 tb_port_warn(up, "failed to enable lane bonding\n");
2405 ret = tb_port_lane_bonding_enable(down);
2407 tb_port_warn(down, "failed to enable lane bonding\n");
2408 tb_port_lane_bonding_disable(up);
2412 tb_switch_update_link_attributes(sw);
2414 tb_sw_dbg(sw, "lane bonding enabled\n");
2419 * tb_switch_lane_bonding_disable() - Disable lane bonding
2420 * @sw: Switch whose lane bonding to disable
2422 * Disables lane bonding between @sw and parent. This can be called even
2423 * if lanes were not bonded originally.
2425 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2427 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2428 struct tb_port *up, *down;
2433 up = tb_upstream_port(sw);
2437 down = tb_port_at(tb_route(sw), parent);
2439 tb_port_lane_bonding_disable(up);
2440 tb_port_lane_bonding_disable(down);
2442 tb_switch_update_link_attributes(sw);
2443 tb_sw_dbg(sw, "lane bonding disabled\n");
2447 * tb_switch_configure_link() - Set link configured
2448 * @sw: Switch whose link is configured
2450 * Sets the link upstream from @sw configured (from both ends) so that
2451 * it will not be disconnected when the domain exits sleep. Can be
2452 * called for any switch.
2454 * It is recommended that this is called after lane bonding is enabled.
2456 * Returns %0 on success and negative errno in case of error.
2458 int tb_switch_configure_link(struct tb_switch *sw)
2460 struct tb_port *up, *down;
2463 if (!tb_route(sw) || tb_switch_is_icm(sw))
2466 up = tb_upstream_port(sw);
2467 if (tb_switch_is_usb4(up->sw))
2468 ret = usb4_port_configure(up);
2470 ret = tb_lc_configure_port(up);
2475 if (tb_switch_is_usb4(down->sw))
2476 return usb4_port_configure(down);
2477 return tb_lc_configure_port(down);
2481 * tb_switch_unconfigure_link() - Unconfigure link
2482 * @sw: Switch whose link is unconfigured
2484 * Sets the link unconfigured so the @sw will be disconnected if the
2485 * domain exists sleep.
2487 void tb_switch_unconfigure_link(struct tb_switch *sw)
2489 struct tb_port *up, *down;
2491 if (sw->is_unplugged)
2493 if (!tb_route(sw) || tb_switch_is_icm(sw))
2496 up = tb_upstream_port(sw);
2497 if (tb_switch_is_usb4(up->sw))
2498 usb4_port_unconfigure(up);
2500 tb_lc_unconfigure_port(up);
2503 if (tb_switch_is_usb4(down->sw))
2504 usb4_port_unconfigure(down);
2506 tb_lc_unconfigure_port(down);
2510 * tb_switch_add() - Add a switch to the domain
2511 * @sw: Switch to add
2513 * This is the last step in adding switch to the domain. It will read
2514 * identification information from DROM and initializes ports so that
2515 * they can be used to connect other switches. The switch will be
2516 * exposed to the userspace when this function successfully returns. To
2517 * remove and release the switch, call tb_switch_remove().
2519 * Return: %0 in case of success and negative errno in case of failure
2521 int tb_switch_add(struct tb_switch *sw)
2526 * Initialize DMA control port now before we read DROM. Recent
2527 * host controllers have more complete DROM on NVM that includes
2528 * vendor and model identification strings which we then expose
2529 * to the userspace. NVM can be accessed through DMA
2530 * configuration based mailbox.
2532 ret = tb_switch_add_dma_port(sw);
2534 dev_err(&sw->dev, "failed to add DMA port\n");
2538 if (!sw->safe_mode) {
2540 ret = tb_drom_read(sw);
2542 dev_err(&sw->dev, "reading DROM failed\n");
2545 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2547 ret = tb_switch_set_uuid(sw);
2549 dev_err(&sw->dev, "failed to set UUID\n");
2553 for (i = 0; i <= sw->config.max_port_number; i++) {
2554 if (sw->ports[i].disabled) {
2555 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2558 ret = tb_init_port(&sw->ports[i]);
2560 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2565 tb_switch_default_link_ports(sw);
2567 ret = tb_switch_update_link_attributes(sw);
2571 ret = tb_switch_tmu_init(sw);
2576 ret = device_add(&sw->dev);
2578 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2583 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2584 sw->vendor, sw->device);
2585 if (sw->vendor_name && sw->device_name)
2586 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2590 ret = tb_switch_nvm_add(sw);
2592 dev_err(&sw->dev, "failed to add NVM devices\n");
2593 device_del(&sw->dev);
2598 * Thunderbolt routers do not generate wakeups themselves but
2599 * they forward wakeups from tunneled protocols, so enable it
2602 device_init_wakeup(&sw->dev, true);
2604 pm_runtime_set_active(&sw->dev);
2606 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2607 pm_runtime_use_autosuspend(&sw->dev);
2608 pm_runtime_mark_last_busy(&sw->dev);
2609 pm_runtime_enable(&sw->dev);
2610 pm_request_autosuspend(&sw->dev);
2613 tb_switch_debugfs_init(sw);
2618 * tb_switch_remove() - Remove and release a switch
2619 * @sw: Switch to remove
2621 * This will remove the switch from the domain and release it after last
2622 * reference count drops to zero. If there are switches connected below
2623 * this switch, they will be removed as well.
2625 void tb_switch_remove(struct tb_switch *sw)
2627 struct tb_port *port;
2629 tb_switch_debugfs_remove(sw);
2632 pm_runtime_get_sync(&sw->dev);
2633 pm_runtime_disable(&sw->dev);
2636 /* port 0 is the switch itself and never has a remote */
2637 tb_switch_for_each_port(sw, port) {
2638 if (tb_port_has_remote(port)) {
2639 tb_switch_remove(port->remote->sw);
2640 port->remote = NULL;
2641 } else if (port->xdomain) {
2642 tb_xdomain_remove(port->xdomain);
2643 port->xdomain = NULL;
2646 /* Remove any downstream retimers */
2647 tb_retimer_remove_all(port);
2650 if (!sw->is_unplugged)
2651 tb_plug_events_active(sw, false);
2653 tb_switch_nvm_remove(sw);
2656 dev_info(&sw->dev, "device disconnected\n");
2657 device_unregister(&sw->dev);
2661 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2662 * @sw: Router to mark unplugged
2664 void tb_sw_set_unplugged(struct tb_switch *sw)
2666 struct tb_port *port;
2668 if (sw == sw->tb->root_switch) {
2669 tb_sw_WARN(sw, "cannot unplug root switch\n");
2672 if (sw->is_unplugged) {
2673 tb_sw_WARN(sw, "is_unplugged already set\n");
2676 sw->is_unplugged = true;
2677 tb_switch_for_each_port(sw, port) {
2678 if (tb_port_has_remote(port))
2679 tb_sw_set_unplugged(port->remote->sw);
2680 else if (port->xdomain)
2681 port->xdomain->is_unplugged = true;
2685 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
2688 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
2690 tb_sw_dbg(sw, "disabling wakeup\n");
2692 if (tb_switch_is_usb4(sw))
2693 return usb4_switch_set_wake(sw, flags);
2694 return tb_lc_set_wake(sw, flags);
2697 int tb_switch_resume(struct tb_switch *sw)
2699 struct tb_port *port;
2702 tb_sw_dbg(sw, "resuming switch\n");
2705 * Check for UID of the connected switches except for root
2706 * switch which we assume cannot be removed.
2712 * Check first that we can still read the switch config
2713 * space. It may be that there is now another domain
2716 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2718 tb_sw_info(sw, "switch not present anymore\n");
2722 if (tb_switch_is_usb4(sw))
2723 err = usb4_switch_read_uid(sw, &uid);
2725 err = tb_drom_read_uid_only(sw, &uid);
2727 tb_sw_warn(sw, "uid read failed\n");
2730 if (sw->uid != uid) {
2732 "changed while suspended (uid %#llx -> %#llx)\n",
2738 err = tb_switch_configure(sw);
2743 tb_switch_set_wake(sw, 0);
2745 err = tb_switch_tmu_init(sw);
2749 /* check for surviving downstream switches */
2750 tb_switch_for_each_port(sw, port) {
2751 if (!tb_port_has_remote(port) && !port->xdomain) {
2753 * For disconnected downstream lane adapters
2754 * start lane initialization now so we detect
2757 if (!tb_is_upstream_port(port) && tb_port_is_null(port))
2758 tb_port_start_lane_initialization(port);
2760 } else if (port->xdomain) {
2762 * Start lane initialization for XDomain so the
2763 * link gets re-established.
2765 tb_port_start_lane_initialization(port);
2768 if (tb_wait_for_port(port, true) <= 0) {
2770 "lost during suspend, disconnecting\n");
2771 if (tb_port_has_remote(port))
2772 tb_sw_set_unplugged(port->remote->sw);
2773 else if (port->xdomain)
2774 port->xdomain->is_unplugged = true;
2775 } else if (tb_port_has_remote(port) || port->xdomain) {
2777 * Always unlock the port so the downstream
2778 * switch/domain is accessible.
2780 if (tb_port_unlock(port))
2781 tb_port_warn(port, "failed to unlock port\n");
2782 if (port->remote && tb_switch_resume(port->remote->sw)) {
2784 "lost during suspend, disconnecting\n");
2785 tb_sw_set_unplugged(port->remote->sw);
2793 * tb_switch_suspend() - Put a switch to sleep
2794 * @sw: Switch to suspend
2795 * @runtime: Is this runtime suspend or system sleep
2797 * Suspends router and all its children. Enables wakes according to
2798 * value of @runtime and then sets sleep bit for the router. If @sw is
2799 * host router the domain is ready to go to sleep once this function
2802 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
2804 unsigned int flags = 0;
2805 struct tb_port *port;
2808 tb_sw_dbg(sw, "suspending switch\n");
2810 err = tb_plug_events_active(sw, false);
2814 tb_switch_for_each_port(sw, port) {
2815 if (tb_port_has_remote(port))
2816 tb_switch_suspend(port->remote->sw, runtime);
2820 /* Trigger wake when something is plugged in/out */
2821 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
2822 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2823 } else if (device_may_wakeup(&sw->dev)) {
2824 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
2827 tb_switch_set_wake(sw, flags);
2829 if (tb_switch_is_usb4(sw))
2830 usb4_switch_set_sleep(sw);
2832 tb_lc_set_sleep(sw);
2836 * tb_switch_query_dp_resource() - Query availability of DP resource
2837 * @sw: Switch whose DP resource is queried
2840 * Queries availability of DP resource for DP tunneling using switch
2841 * specific means. Returns %true if resource is available.
2843 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2845 if (tb_switch_is_usb4(sw))
2846 return usb4_switch_query_dp_resource(sw, in);
2847 return tb_lc_dp_sink_query(sw, in);
2851 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2852 * @sw: Switch whose DP resource is allocated
2855 * Allocates DP resource for DP tunneling. The resource must be
2856 * available for this to succeed (see tb_switch_query_dp_resource()).
2857 * Returns %0 in success and negative errno otherwise.
2859 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2861 if (tb_switch_is_usb4(sw))
2862 return usb4_switch_alloc_dp_resource(sw, in);
2863 return tb_lc_dp_sink_alloc(sw, in);
2867 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2868 * @sw: Switch whose DP resource is de-allocated
2871 * De-allocates DP resource that was previously allocated for DP
2874 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2878 if (tb_switch_is_usb4(sw))
2879 ret = usb4_switch_dealloc_dp_resource(sw, in);
2881 ret = tb_lc_dp_sink_dealloc(sw, in);
2884 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2888 struct tb_sw_lookup {
2896 static int tb_switch_match(struct device *dev, const void *data)
2898 struct tb_switch *sw = tb_to_switch(dev);
2899 const struct tb_sw_lookup *lookup = data;
2903 if (sw->tb != lookup->tb)
2907 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2909 if (lookup->route) {
2910 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2911 sw->config.route_hi == upper_32_bits(lookup->route);
2914 /* Root switch is matched only by depth */
2918 return sw->link == lookup->link && sw->depth == lookup->depth;
2922 * tb_switch_find_by_link_depth() - Find switch by link and depth
2923 * @tb: Domain the switch belongs
2924 * @link: Link number the switch is connected
2925 * @depth: Depth of the switch in link
2927 * Returned switch has reference count increased so the caller needs to
2928 * call tb_switch_put() when done with the switch.
2930 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2932 struct tb_sw_lookup lookup;
2935 memset(&lookup, 0, sizeof(lookup));
2938 lookup.depth = depth;
2940 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2942 return tb_to_switch(dev);
2948 * tb_switch_find_by_uuid() - Find switch by UUID
2949 * @tb: Domain the switch belongs
2950 * @uuid: UUID to look for
2952 * Returned switch has reference count increased so the caller needs to
2953 * call tb_switch_put() when done with the switch.
2955 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2957 struct tb_sw_lookup lookup;
2960 memset(&lookup, 0, sizeof(lookup));
2964 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2966 return tb_to_switch(dev);
2972 * tb_switch_find_by_route() - Find switch by route string
2973 * @tb: Domain the switch belongs
2974 * @route: Route string to look for
2976 * Returned switch has reference count increased so the caller needs to
2977 * call tb_switch_put() when done with the switch.
2979 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2981 struct tb_sw_lookup lookup;
2985 return tb_switch_get(tb->root_switch);
2987 memset(&lookup, 0, sizeof(lookup));
2989 lookup.route = route;
2991 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2993 return tb_to_switch(dev);
2999 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3000 * @sw: Switch to find the port from
3001 * @type: Port type to look for
3003 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3004 enum tb_port_type type)
3006 struct tb_port *port;
3008 tb_switch_for_each_port(sw, port) {
3009 if (port->config.type == type)