1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - bus logic (NHI independent)
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/platform_data/x86/apple.h>
19 #define TB_TIMEOUT 100 /* ms */
22 * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
23 * direction. This is 40G - 10% guard band bandwidth.
25 #define TB_ASYM_MIN (40000 * 90 / 100)
28 * Threshold bandwidth (in Mb/s) that is used to switch the links to
29 * asymmetric and back. This is selected as 45G which means when the
30 * request is higher than this, we switch the link to asymmetric, and
31 * when it is less than this we switch it back. The 45G is selected so
32 * that we still have 27G (of the total 72G) for bulk PCIe traffic when
33 * switching back to symmetric.
35 #define TB_ASYM_THRESHOLD 45000
37 #define MAX_GROUPS 7 /* max Group_ID is 7 */
39 static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
40 module_param_named(asym_threshold, asym_threshold, uint, 0444);
41 MODULE_PARM_DESC(asym_threshold,
42 "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
43 __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
46 * struct tb_cm - Simple Thunderbolt connection manager
47 * @tunnel_list: List of active tunnels
48 * @dp_resources: List of available DP resources for DP tunneling
49 * @hotplug_active: tb_handle_hotplug will stop progressing plug
50 * events and exit if this is not set (it needs to
51 * acquire the lock one more time). Used to drain wq
52 * after cfg has been paused.
53 * @remove_work: Work used to remove any unplugged routers after
55 * @groups: Bandwidth groups used in this domain.
58 struct list_head tunnel_list;
59 struct list_head dp_resources;
61 struct delayed_work remove_work;
62 struct tb_bandwidth_group groups[MAX_GROUPS];
65 static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
67 return ((void *)tcm - sizeof(struct tb));
70 struct tb_hotplug_event {
71 struct work_struct work;
78 static void tb_init_bandwidth_groups(struct tb_cm *tcm)
82 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
83 struct tb_bandwidth_group *group = &tcm->groups[i];
85 group->tb = tcm_to_tb(tcm);
87 INIT_LIST_HEAD(&group->ports);
91 static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
94 if (!group || WARN_ON(in->group))
98 list_add_tail(&in->group_list, &group->ports);
100 tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
103 static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
107 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
108 struct tb_bandwidth_group *group = &tcm->groups[i];
110 if (list_empty(&group->ports))
117 static struct tb_bandwidth_group *
118 tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
121 struct tb_bandwidth_group *group;
122 struct tb_tunnel *tunnel;
125 * Find all DP tunnels that go through all the same USB4 links
126 * as this one. Because we always setup tunnels the same way we
127 * can just check for the routers at both ends of the tunnels
128 * and if they are the same we have a match.
130 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
131 if (!tb_tunnel_is_dp(tunnel))
134 if (tunnel->src_port->sw == in->sw &&
135 tunnel->dst_port->sw == out->sw) {
136 group = tunnel->src_port->group;
138 tb_bandwidth_group_attach_port(group, in);
144 /* Pick up next available group then */
145 group = tb_find_free_bandwidth_group(tcm);
147 tb_bandwidth_group_attach_port(group, in);
149 tb_port_warn(in, "no available bandwidth groups\n");
154 static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
157 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
160 index = usb4_dp_port_group_id(in);
161 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
162 if (tcm->groups[i].index == index) {
163 tb_bandwidth_group_attach_port(&tcm->groups[i], in);
169 tb_attach_bandwidth_group(tcm, in, out);
172 static void tb_detach_bandwidth_group(struct tb_port *in)
174 struct tb_bandwidth_group *group = in->group;
178 list_del_init(&in->group_list);
180 tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
184 static void tb_handle_hotplug(struct work_struct *work);
186 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
188 struct tb_hotplug_event *ev;
190 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
198 INIT_WORK(&ev->work, tb_handle_hotplug);
199 queue_work(tb->wq, &ev->work);
202 /* enumeration & hot plug handling */
204 static void tb_add_dp_resources(struct tb_switch *sw)
206 struct tb_cm *tcm = tb_priv(sw->tb);
207 struct tb_port *port;
209 tb_switch_for_each_port(sw, port) {
210 if (!tb_port_is_dpin(port))
213 if (!tb_switch_query_dp_resource(sw, port))
216 list_add(&port->list, &tcm->dp_resources);
217 tb_port_dbg(port, "DP IN resource available\n");
221 static void tb_remove_dp_resources(struct tb_switch *sw)
223 struct tb_cm *tcm = tb_priv(sw->tb);
224 struct tb_port *port, *tmp;
226 /* Clear children resources first */
227 tb_switch_for_each_port(sw, port) {
228 if (tb_port_has_remote(port))
229 tb_remove_dp_resources(port->remote->sw);
232 list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
233 if (port->sw == sw) {
234 tb_port_dbg(port, "DP OUT resource unavailable\n");
235 list_del_init(&port->list);
240 static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
242 struct tb_cm *tcm = tb_priv(tb);
245 list_for_each_entry(p, &tcm->dp_resources, list) {
250 tb_port_dbg(port, "DP %s resource available discovered\n",
251 tb_port_is_dpin(port) ? "IN" : "OUT");
252 list_add_tail(&port->list, &tcm->dp_resources);
255 static void tb_discover_dp_resources(struct tb *tb)
257 struct tb_cm *tcm = tb_priv(tb);
258 struct tb_tunnel *tunnel;
260 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
261 if (tb_tunnel_is_dp(tunnel))
262 tb_discover_dp_resource(tb, tunnel->dst_port);
266 /* Enables CL states up to host router */
267 static int tb_enable_clx(struct tb_switch *sw)
269 struct tb_cm *tcm = tb_priv(sw->tb);
270 unsigned int clx = TB_CL0S | TB_CL1;
271 const struct tb_tunnel *tunnel;
275 * Currently only enable CLx for the first link. This is enough
276 * to allow the CPU to save energy at least on Intel hardware
277 * and makes it slightly simpler to implement. We may change
278 * this in the future to cover the whole topology if it turns
279 * out to be beneficial.
281 while (sw && tb_switch_depth(sw) > 1)
282 sw = tb_switch_parent(sw);
287 if (tb_switch_depth(sw) != 1)
291 * If we are re-enabling then check if there is an active DMA
292 * tunnel and in that case bail out.
294 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
295 if (tb_tunnel_is_dma(tunnel)) {
296 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
302 * Initially try with CL2. If that's not supported by the
303 * topology try with CL0s and CL1 and then give up.
305 ret = tb_switch_clx_enable(sw, clx | TB_CL2);
306 if (ret == -EOPNOTSUPP)
307 ret = tb_switch_clx_enable(sw, clx);
308 return ret == -EOPNOTSUPP ? 0 : ret;
312 * tb_disable_clx() - Disable CL states up to host router
313 * @sw: Router to start
315 * Disables CL states from @sw up to the host router. Returns true if
316 * any CL state were disabled. This can be used to figure out whether
317 * the link was setup by us or the boot firmware so we don't
318 * accidentally enable them if they were not enabled during discovery.
320 static bool tb_disable_clx(struct tb_switch *sw)
322 bool disabled = false;
327 ret = tb_switch_clx_disable(sw);
331 tb_sw_warn(sw, "failed to disable CL states\n");
333 sw = tb_switch_parent(sw);
339 static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
341 struct tb_switch *sw;
343 sw = tb_to_switch(dev);
347 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
348 enum tb_switch_tmu_mode mode;
351 if (tb_switch_clx_is_enabled(sw, TB_CL1))
352 mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
354 mode = TB_SWITCH_TMU_MODE_HIFI_BI;
356 ret = tb_switch_tmu_configure(sw, mode);
360 return tb_switch_tmu_enable(sw);
366 static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
368 struct tb_switch *sw;
374 * Once first DP tunnel is established we change the TMU
375 * accuracy of first depth child routers (and the host router)
376 * to the highest. This is needed for the DP tunneling to work
377 * but also allows CL0s.
379 * If both routers are v2 then we don't need to do anything as
380 * they are using enhanced TMU mode that allows all CLx.
382 sw = tunnel->tb->root_switch;
383 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
386 static int tb_enable_tmu(struct tb_switch *sw)
391 * If both routers at the end of the link are v2 we simply
392 * enable the enhanched uni-directional mode. That covers all
393 * the CL states. For v1 and before we need to use the normal
394 * rate to allow CL1 (when supported). Otherwise we keep the TMU
395 * running at the highest accuracy.
397 ret = tb_switch_tmu_configure(sw,
398 TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
399 if (ret == -EOPNOTSUPP) {
400 if (tb_switch_clx_is_enabled(sw, TB_CL1))
401 ret = tb_switch_tmu_configure(sw,
402 TB_SWITCH_TMU_MODE_LOWRES);
404 ret = tb_switch_tmu_configure(sw,
405 TB_SWITCH_TMU_MODE_HIFI_BI);
410 /* If it is already enabled in correct mode, don't touch it */
411 if (tb_switch_tmu_is_enabled(sw))
414 ret = tb_switch_tmu_disable(sw);
418 ret = tb_switch_tmu_post_time(sw);
422 return tb_switch_tmu_enable(sw);
425 static void tb_switch_discover_tunnels(struct tb_switch *sw,
426 struct list_head *list,
429 struct tb *tb = sw->tb;
430 struct tb_port *port;
432 tb_switch_for_each_port(sw, port) {
433 struct tb_tunnel *tunnel = NULL;
435 switch (port->config.type) {
436 case TB_TYPE_DP_HDMI_IN:
437 tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
438 tb_increase_tmu_accuracy(tunnel);
441 case TB_TYPE_PCIE_DOWN:
442 tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
445 case TB_TYPE_USB3_DOWN:
446 tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
454 list_add_tail(&tunnel->list, list);
457 tb_switch_for_each_port(sw, port) {
458 if (tb_port_has_remote(port)) {
459 tb_switch_discover_tunnels(port->remote->sw, list,
465 static void tb_discover_tunnels(struct tb *tb)
467 struct tb_cm *tcm = tb_priv(tb);
468 struct tb_tunnel *tunnel;
470 tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
472 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
473 if (tb_tunnel_is_pci(tunnel)) {
474 struct tb_switch *parent = tunnel->dst_port->sw;
476 while (parent != tunnel->src_port->sw) {
478 parent = tb_switch_parent(parent);
480 } else if (tb_tunnel_is_dp(tunnel)) {
481 struct tb_port *in = tunnel->src_port;
482 struct tb_port *out = tunnel->dst_port;
484 /* Keep the domain from powering down */
485 pm_runtime_get_sync(&in->sw->dev);
486 pm_runtime_get_sync(&out->sw->dev);
488 tb_discover_bandwidth_group(tcm, in, out);
493 static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
495 if (tb_switch_is_usb4(port->sw))
496 return usb4_port_configure_xdomain(port, xd);
497 return tb_lc_configure_xdomain(port);
500 static void tb_port_unconfigure_xdomain(struct tb_port *port)
502 if (tb_switch_is_usb4(port->sw))
503 usb4_port_unconfigure_xdomain(port);
505 tb_lc_unconfigure_xdomain(port);
507 tb_port_enable(port->dual_link_port);
510 static void tb_scan_xdomain(struct tb_port *port)
512 struct tb_switch *sw = port->sw;
513 struct tb *tb = sw->tb;
514 struct tb_xdomain *xd;
517 if (!tb_is_xdomain_enabled())
520 route = tb_downstream_route(port);
521 xd = tb_xdomain_find_by_route(tb, route);
527 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
530 tb_port_at(route, sw)->xdomain = xd;
531 tb_port_configure_xdomain(port, xd);
537 * tb_find_unused_port() - return the first inactive port on @sw
538 * @sw: Switch to find the port on
539 * @type: Port type to look for
541 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
542 enum tb_port_type type)
544 struct tb_port *port;
546 tb_switch_for_each_port(sw, port) {
547 if (tb_is_upstream_port(port))
549 if (port->config.type != type)
553 if (tb_port_is_enabled(port))
560 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
561 const struct tb_port *port)
563 struct tb_port *down;
565 down = usb4_switch_map_usb3_down(sw, port);
566 if (down && !tb_usb3_port_is_enabled(down))
571 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
572 struct tb_port *src_port,
573 struct tb_port *dst_port)
575 struct tb_cm *tcm = tb_priv(tb);
576 struct tb_tunnel *tunnel;
578 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
579 if (tunnel->type == type &&
580 ((src_port && src_port == tunnel->src_port) ||
581 (dst_port && dst_port == tunnel->dst_port))) {
589 static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
590 struct tb_port *src_port,
591 struct tb_port *dst_port)
593 struct tb_port *port, *usb3_down;
594 struct tb_switch *sw;
596 /* Pick the router that is deepest in the topology */
597 if (tb_port_path_direction_downstream(src_port, dst_port))
602 /* Can't be the host router */
603 if (sw == tb->root_switch)
606 /* Find the downstream USB4 port that leads to this router */
607 port = tb_port_at(tb_route(sw), tb->root_switch);
608 /* Find the corresponding host router USB3 downstream port */
609 usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
613 return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
617 * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
618 * @tb: Domain structure
619 * @src_port: Source protocol adapter
620 * @dst_port: Destination protocol adapter
621 * @port: USB4 port the consumed bandwidth is calculated
622 * @consumed_up: Consumed upsream bandwidth (Mb/s)
623 * @consumed_down: Consumed downstream bandwidth (Mb/s)
625 * Calculates consumed USB3 and PCIe bandwidth at @port between path
626 * from @src_port to @dst_port. Does not take tunnel starting from
627 * @src_port and ending from @src_port into account.
629 static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
630 struct tb_port *src_port,
631 struct tb_port *dst_port,
632 struct tb_port *port,
636 int pci_consumed_up, pci_consumed_down;
637 struct tb_tunnel *tunnel;
639 *consumed_up = *consumed_down = 0;
641 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
642 if (tunnel && tunnel->src_port != src_port &&
643 tunnel->dst_port != dst_port) {
646 ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
653 * If there is anything reserved for PCIe bulk traffic take it
654 * into account here too.
656 if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
657 *consumed_up += pci_consumed_up;
658 *consumed_down += pci_consumed_down;
665 * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
666 * @tb: Domain structure
667 * @src_port: Source protocol adapter
668 * @dst_port: Destination protocol adapter
669 * @port: USB4 port the consumed bandwidth is calculated
670 * @consumed_up: Consumed upsream bandwidth (Mb/s)
671 * @consumed_down: Consumed downstream bandwidth (Mb/s)
673 * Calculates consumed DP bandwidth at @port between path from @src_port
674 * to @dst_port. Does not take tunnel starting from @src_port and ending
675 * from @src_port into account.
677 static int tb_consumed_dp_bandwidth(struct tb *tb,
678 struct tb_port *src_port,
679 struct tb_port *dst_port,
680 struct tb_port *port,
684 struct tb_cm *tcm = tb_priv(tb);
685 struct tb_tunnel *tunnel;
688 *consumed_up = *consumed_down = 0;
691 * Find all DP tunnels that cross the port and reduce
692 * their consumed bandwidth from the available.
694 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
695 int dp_consumed_up, dp_consumed_down;
697 if (tb_tunnel_is_invalid(tunnel))
700 if (!tb_tunnel_is_dp(tunnel))
703 if (!tb_tunnel_port_on_path(tunnel, port))
707 * Ignore the DP tunnel between src_port and dst_port
708 * because it is the same tunnel and we may be
709 * re-calculating estimated bandwidth.
711 if (tunnel->src_port == src_port &&
712 tunnel->dst_port == dst_port)
715 ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
720 *consumed_up += dp_consumed_up;
721 *consumed_down += dp_consumed_down;
727 static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
728 struct tb_port *port)
730 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
731 enum tb_link_width width;
733 if (tb_is_upstream_port(port))
734 width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
736 width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
738 return tb_port_width_supported(port, width);
742 * tb_maximum_bandwidth() - Maximum bandwidth over a single link
743 * @tb: Domain structure
744 * @src_port: Source protocol adapter
745 * @dst_port: Destination protocol adapter
746 * @port: USB4 port the total bandwidth is calculated
747 * @max_up: Maximum upstream bandwidth (Mb/s)
748 * @max_down: Maximum downstream bandwidth (Mb/s)
749 * @include_asym: Include bandwidth if the link is switched from
750 * symmetric to asymmetric
752 * Returns maximum possible bandwidth in @max_up and @max_down over a
753 * single link at @port. If @include_asym is set then includes the
754 * additional banwdith if the links are transitioned into asymmetric to
755 * direction from @src_port to @dst_port.
757 static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
758 struct tb_port *dst_port, struct tb_port *port,
759 int *max_up, int *max_down, bool include_asym)
761 bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
762 int link_speed, link_width, up_bw, down_bw;
765 * Can include asymmetric, only if it is actually supported by
768 if (!tb_asym_supported(src_port, dst_port, port))
769 include_asym = false;
771 if (tb_is_upstream_port(port)) {
772 link_speed = port->sw->link_speed;
774 * sw->link_width is from upstream perspective so we use
775 * the opposite for downstream of the host router.
777 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
778 up_bw = link_speed * 3 * 1000;
779 down_bw = link_speed * 1 * 1000;
780 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
781 up_bw = link_speed * 1 * 1000;
782 down_bw = link_speed * 3 * 1000;
783 } else if (include_asym) {
785 * The link is symmetric at the moment but we
786 * can switch it to asymmetric as needed. Report
787 * this bandwidth as available (even though it
788 * is not yet enabled).
791 up_bw = link_speed * 1 * 1000;
792 down_bw = link_speed * 3 * 1000;
794 up_bw = link_speed * 3 * 1000;
795 down_bw = link_speed * 1 * 1000;
798 up_bw = link_speed * port->sw->link_width * 1000;
802 link_speed = tb_port_get_link_speed(port);
806 link_width = tb_port_get_link_width(port);
810 if (link_width == TB_LINK_WIDTH_ASYM_TX) {
811 up_bw = link_speed * 1 * 1000;
812 down_bw = link_speed * 3 * 1000;
813 } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
814 up_bw = link_speed * 3 * 1000;
815 down_bw = link_speed * 1 * 1000;
816 } else if (include_asym) {
818 * The link is symmetric at the moment but we
819 * can switch it to asymmetric as needed. Report
820 * this bandwidth as available (even though it
821 * is not yet enabled).
824 up_bw = link_speed * 1 * 1000;
825 down_bw = link_speed * 3 * 1000;
827 up_bw = link_speed * 3 * 1000;
828 down_bw = link_speed * 1 * 1000;
831 up_bw = link_speed * link_width * 1000;
836 /* Leave 10% guard band */
837 *max_up = up_bw - up_bw / 10;
838 *max_down = down_bw - down_bw / 10;
840 tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
845 * tb_available_bandwidth() - Available bandwidth for tunneling
846 * @tb: Domain structure
847 * @src_port: Source protocol adapter
848 * @dst_port: Destination protocol adapter
849 * @available_up: Available bandwidth upstream (Mb/s)
850 * @available_down: Available bandwidth downstream (Mb/s)
851 * @include_asym: Include bandwidth if the link is switched from
852 * symmetric to asymmetric
854 * Calculates maximum available bandwidth for protocol tunneling between
855 * @src_port and @dst_port at the moment. This is minimum of maximum
856 * link bandwidth across all links reduced by currently consumed
857 * bandwidth on that link.
859 * If @include_asym is true then includes also bandwidth that can be
860 * added when the links are transitioned into asymmetric (but does not
861 * transition the links).
863 static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
864 struct tb_port *dst_port, int *available_up,
865 int *available_down, bool include_asym)
867 struct tb_port *port;
870 /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
871 *available_up = *available_down = 120000;
873 /* Find the minimum available bandwidth over all links */
874 tb_for_each_port_on_path(src_port, dst_port, port) {
875 int max_up, max_down, consumed_up, consumed_down;
877 if (!tb_port_is_null(port))
880 ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
881 &max_up, &max_down, include_asym);
885 ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
890 max_up -= consumed_up;
891 max_down -= consumed_down;
893 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
894 &consumed_up, &consumed_down);
897 max_up -= consumed_up;
898 max_down -= consumed_down;
900 if (max_up < *available_up)
901 *available_up = max_up;
902 if (max_down < *available_down)
903 *available_down = max_down;
906 if (*available_up < 0)
908 if (*available_down < 0)
914 static int tb_release_unused_usb3_bandwidth(struct tb *tb,
915 struct tb_port *src_port,
916 struct tb_port *dst_port)
918 struct tb_tunnel *tunnel;
920 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
921 return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
924 static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
925 struct tb_port *dst_port)
927 int ret, available_up, available_down;
928 struct tb_tunnel *tunnel;
930 tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
934 tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
937 * Calculate available bandwidth for the first hop USB3 tunnel.
938 * That determines the whole USB3 bandwidth for this branch.
940 ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
941 &available_up, &available_down, false);
943 tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
947 tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
950 tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
953 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
955 struct tb_switch *parent = tb_switch_parent(sw);
956 int ret, available_up, available_down;
957 struct tb_port *up, *down, *port;
958 struct tb_cm *tcm = tb_priv(tb);
959 struct tb_tunnel *tunnel;
961 if (!tb_acpi_may_tunnel_usb3()) {
962 tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
966 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
974 * Look up available down port. Since we are chaining it should
975 * be found right above this switch.
977 port = tb_switch_downstream_port(sw);
978 down = tb_find_usb3_down(parent, port);
982 if (tb_route(parent)) {
983 struct tb_port *parent_up;
985 * Check first that the parent switch has its upstream USB3
986 * port enabled. Otherwise the chain is not complete and
987 * there is no point setting up a new tunnel.
989 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
990 if (!parent_up || !tb_port_is_enabled(parent_up))
993 /* Make all unused bandwidth available for the new tunnel */
994 ret = tb_release_unused_usb3_bandwidth(tb, down, up);
999 ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
1004 tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
1005 available_up, available_down);
1007 tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
1014 if (tb_tunnel_activate(tunnel)) {
1016 "USB3 tunnel activation failed, aborting\n");
1021 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1022 if (tb_route(parent))
1023 tb_reclaim_usb3_bandwidth(tb, down, up);
1028 tb_tunnel_free(tunnel);
1030 if (tb_route(parent))
1031 tb_reclaim_usb3_bandwidth(tb, down, up);
1036 static int tb_create_usb3_tunnels(struct tb_switch *sw)
1038 struct tb_port *port;
1041 if (!tb_acpi_may_tunnel_usb3())
1045 ret = tb_tunnel_usb3(sw->tb, sw);
1050 tb_switch_for_each_port(sw, port) {
1051 if (!tb_port_has_remote(port))
1053 ret = tb_create_usb3_tunnels(port->remote->sw);
1062 * tb_configure_asym() - Transition links to asymmetric if needed
1063 * @tb: Domain structure
1064 * @src_port: Source adapter to start the transition
1065 * @dst_port: Destination adapter
1066 * @requested_up: Additional bandwidth (Mb/s) required upstream
1067 * @requested_down: Additional bandwidth (Mb/s) required downstream
1069 * Transition links between @src_port and @dst_port into asymmetric, with
1070 * three lanes in the direction from @src_port towards @dst_port and one lane
1071 * in the opposite direction, if the bandwidth requirements
1072 * (requested + currently consumed) on that link exceed @asym_threshold.
1074 * Must be called with available >= requested over all links.
1076 static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
1077 struct tb_port *dst_port, int requested_up,
1080 struct tb_switch *sw;
1081 bool clx, downstream;
1085 if (!asym_threshold)
1088 /* Disable CL states before doing any transitions */
1089 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1090 /* Pick up router deepest in the hierarchy */
1096 clx = tb_disable_clx(sw);
1098 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1099 int consumed_up, consumed_down;
1100 enum tb_link_width width;
1102 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1103 &consumed_up, &consumed_down);
1109 * Downstream so make sure upstream is within the 36G
1110 * (40G - guard band 10%), and the requested is above
1111 * what the threshold is.
1113 if (consumed_up + requested_up >= TB_ASYM_MIN) {
1117 /* Does consumed + requested exceed the threshold */
1118 if (consumed_down + requested_down < asym_threshold)
1121 width = TB_LINK_WIDTH_ASYM_RX;
1123 /* Upstream, the opposite of above */
1124 if (consumed_down + requested_down >= TB_ASYM_MIN) {
1128 if (consumed_up + requested_up < asym_threshold)
1131 width = TB_LINK_WIDTH_ASYM_TX;
1134 if (up->sw->link_width == width)
1137 if (!tb_port_width_supported(up, width))
1140 tb_sw_dbg(up->sw, "configuring asymmetric link\n");
1143 * Here requested + consumed > threshold so we need to
1144 * transtion the link into asymmetric now.
1146 ret = tb_switch_set_link_width(up->sw, width);
1148 tb_sw_warn(up->sw, "failed to set link width\n");
1153 /* Re-enable CL states if they were previosly enabled */
1161 * tb_configure_sym() - Transition links to symmetric if possible
1162 * @tb: Domain structure
1163 * @src_port: Source adapter to start the transition
1164 * @dst_port: Destination adapter
1165 * @requested_up: New lower bandwidth request upstream (Mb/s)
1166 * @requested_down: New lower bandwidth request downstream (Mb/s)
1168 * Goes over each link from @src_port to @dst_port and tries to
1169 * transition the link to symmetric if the currently consumed bandwidth
1172 static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
1173 struct tb_port *dst_port, int requested_up,
1176 struct tb_switch *sw;
1177 bool clx, downstream;
1181 if (!asym_threshold)
1184 /* Disable CL states before doing any transitions */
1185 downstream = tb_port_path_direction_downstream(src_port, dst_port);
1186 /* Pick up router deepest in the hierarchy */
1192 clx = tb_disable_clx(sw);
1194 tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
1195 int consumed_up, consumed_down;
1197 /* Already symmetric */
1198 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
1200 /* Unplugged, no need to switch */
1201 if (up->sw->is_unplugged)
1204 ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
1205 &consumed_up, &consumed_down);
1211 * Downstream so we want the consumed_down < threshold.
1212 * Upstream traffic should be less than 36G (40G
1213 * guard band 10%) as the link was configured asymmetric
1216 if (consumed_down + requested_down >= asym_threshold)
1219 if (consumed_up + requested_up >= asym_threshold)
1223 if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
1226 tb_sw_dbg(up->sw, "configuring symmetric link\n");
1228 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
1230 tb_sw_warn(up->sw, "failed to set link width\n");
1235 /* Re-enable CL states if they were previosly enabled */
1242 static void tb_configure_link(struct tb_port *down, struct tb_port *up,
1243 struct tb_switch *sw)
1245 struct tb *tb = sw->tb;
1247 /* Link the routers using both links if available */
1250 if (down->dual_link_port && up->dual_link_port) {
1251 down->dual_link_port->remote = up->dual_link_port;
1252 up->dual_link_port->remote = down->dual_link_port;
1256 * Enable lane bonding if the link is currently two single lane
1259 if (sw->link_width < TB_LINK_WIDTH_DUAL)
1260 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
1263 * Device router that comes up as symmetric link is
1264 * connected deeper in the hierarchy, we transition the links
1265 * above into symmetric if bandwidth allows.
1267 if (tb_switch_depth(sw) > 1 &&
1268 tb_port_get_link_generation(up) >= 4 &&
1269 up->sw->link_width == TB_LINK_WIDTH_DUAL) {
1270 struct tb_port *host_port;
1272 host_port = tb_port_at(tb_route(sw), tb->root_switch);
1273 tb_configure_sym(tb, host_port, up, 0, 0);
1276 /* Set the link configured */
1277 tb_switch_configure_link(sw);
1280 static void tb_scan_port(struct tb_port *port);
1283 * tb_scan_switch() - scan for and initialize downstream switches
1285 static void tb_scan_switch(struct tb_switch *sw)
1287 struct tb_port *port;
1289 pm_runtime_get_sync(&sw->dev);
1291 tb_switch_for_each_port(sw, port)
1294 pm_runtime_mark_last_busy(&sw->dev);
1295 pm_runtime_put_autosuspend(&sw->dev);
1299 * tb_scan_port() - check for and initialize switches below port
1301 static void tb_scan_port(struct tb_port *port)
1303 struct tb_cm *tcm = tb_priv(port->sw->tb);
1304 struct tb_port *upstream_port;
1305 bool discovery = false;
1306 struct tb_switch *sw;
1308 if (tb_is_upstream_port(port))
1311 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
1312 !tb_dp_port_is_enabled(port)) {
1313 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
1314 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
1319 if (port->config.type != TB_TYPE_PORT)
1321 if (port->dual_link_port && port->link_nr)
1323 * Downstream switch is reachable through two ports.
1324 * Only scan on the primary port (link_nr == 0).
1328 pm_runtime_get_sync(&port->usb4->dev);
1330 if (tb_wait_for_port(port, false) <= 0)
1333 tb_port_dbg(port, "port already has a remote\n");
1337 tb_retimer_scan(port, true);
1339 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
1340 tb_downstream_route(port));
1343 * If there is an error accessing the connected switch
1344 * it may be connected to another domain. Also we allow
1345 * the other domain to be connected to a max depth switch.
1347 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
1348 tb_scan_xdomain(port);
1352 if (tb_switch_configure(sw)) {
1358 * If there was previously another domain connected remove it
1361 if (port->xdomain) {
1362 tb_xdomain_remove(port->xdomain);
1363 tb_port_unconfigure_xdomain(port);
1364 port->xdomain = NULL;
1368 * Do not send uevents until we have discovered all existing
1369 * tunnels and know which switches were authorized already by
1370 * the boot firmware.
1372 if (!tcm->hotplug_active) {
1373 dev_set_uevent_suppress(&sw->dev, true);
1378 * At the moment Thunderbolt 2 and beyond (devices with LC) we
1379 * can support runtime PM.
1381 sw->rpm = sw->generation > 1;
1383 if (tb_switch_add(sw)) {
1388 upstream_port = tb_upstream_port(sw);
1389 tb_configure_link(port, upstream_port, sw);
1392 * CL0s and CL1 are enabled and supported together.
1393 * Silently ignore CLx enabling in case CLx is not supported.
1396 tb_sw_dbg(sw, "discovery, not touching CL states\n");
1397 else if (tb_enable_clx(sw))
1398 tb_sw_warn(sw, "failed to enable CL states\n");
1400 if (tb_enable_tmu(sw))
1401 tb_sw_warn(sw, "failed to enable TMU\n");
1404 * Configuration valid needs to be set after the TMU has been
1405 * enabled for the upstream port of the router so we do it here.
1407 tb_switch_configuration_valid(sw);
1409 /* Scan upstream retimers */
1410 tb_retimer_scan(upstream_port, true);
1413 * Create USB 3.x tunnels only when the switch is plugged to the
1414 * domain. This is because we scan the domain also during discovery
1415 * and want to discover existing USB 3.x tunnels before we create
1418 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
1419 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
1421 tb_add_dp_resources(sw);
1426 pm_runtime_mark_last_busy(&port->usb4->dev);
1427 pm_runtime_put_autosuspend(&port->usb4->dev);
1431 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
1433 struct tb_port *src_port, *dst_port;
1439 tb_tunnel_deactivate(tunnel);
1440 list_del(&tunnel->list);
1443 src_port = tunnel->src_port;
1444 dst_port = tunnel->dst_port;
1446 switch (tunnel->type) {
1448 tb_detach_bandwidth_group(src_port);
1450 * In case of DP tunnel make sure the DP IN resource is
1451 * deallocated properly.
1453 tb_switch_dealloc_dp_resource(src_port->sw, src_port);
1455 * If bandwidth on a link is < asym_threshold
1456 * transition the link to symmetric.
1458 tb_configure_sym(tb, src_port, dst_port, 0, 0);
1459 /* Now we can allow the domain to runtime suspend again */
1460 pm_runtime_mark_last_busy(&dst_port->sw->dev);
1461 pm_runtime_put_autosuspend(&dst_port->sw->dev);
1462 pm_runtime_mark_last_busy(&src_port->sw->dev);
1463 pm_runtime_put_autosuspend(&src_port->sw->dev);
1466 case TB_TUNNEL_USB3:
1467 tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
1472 * PCIe and DMA tunnels do not consume guaranteed
1478 tb_tunnel_free(tunnel);
1482 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
1484 static void tb_free_invalid_tunnels(struct tb *tb)
1486 struct tb_cm *tcm = tb_priv(tb);
1487 struct tb_tunnel *tunnel;
1488 struct tb_tunnel *n;
1490 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
1491 if (tb_tunnel_is_invalid(tunnel))
1492 tb_deactivate_and_free_tunnel(tunnel);
1497 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
1499 static void tb_free_unplugged_children(struct tb_switch *sw)
1501 struct tb_port *port;
1503 tb_switch_for_each_port(sw, port) {
1504 if (!tb_port_has_remote(port))
1507 if (port->remote->sw->is_unplugged) {
1508 tb_retimer_remove_all(port);
1509 tb_remove_dp_resources(port->remote->sw);
1510 tb_switch_unconfigure_link(port->remote->sw);
1511 tb_switch_set_link_width(port->remote->sw,
1512 TB_LINK_WIDTH_SINGLE);
1513 tb_switch_remove(port->remote->sw);
1514 port->remote = NULL;
1515 if (port->dual_link_port)
1516 port->dual_link_port->remote = NULL;
1518 tb_free_unplugged_children(port->remote->sw);
1523 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
1524 const struct tb_port *port)
1526 struct tb_port *down = NULL;
1529 * To keep plugging devices consistently in the same PCIe
1530 * hierarchy, do mapping here for switch downstream PCIe ports.
1532 if (tb_switch_is_usb4(sw)) {
1533 down = usb4_switch_map_pcie_down(sw, port);
1534 } else if (!tb_route(sw)) {
1535 int phy_port = tb_phy_port_from_link(port->port);
1539 * Hard-coded Thunderbolt port to PCIe down port mapping
1542 if (tb_switch_is_cactus_ridge(sw) ||
1543 tb_switch_is_alpine_ridge(sw))
1544 index = !phy_port ? 6 : 7;
1545 else if (tb_switch_is_falcon_ridge(sw))
1546 index = !phy_port ? 6 : 8;
1547 else if (tb_switch_is_titan_ridge(sw))
1548 index = !phy_port ? 8 : 9;
1552 /* Validate the hard-coding */
1553 if (WARN_ON(index > sw->config.max_port_number))
1556 down = &sw->ports[index];
1560 if (WARN_ON(!tb_port_is_pcie_down(down)))
1562 if (tb_pci_port_is_enabled(down))
1569 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
1573 tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
1575 struct tb_tunnel *first_tunnel;
1576 struct tb *tb = group->tb;
1580 tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
1583 first_tunnel = NULL;
1584 list_for_each_entry(in, &group->ports, group_list) {
1585 int estimated_bw, estimated_up, estimated_down;
1586 struct tb_tunnel *tunnel;
1587 struct tb_port *out;
1589 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1592 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
1593 if (WARN_ON(!tunnel))
1596 if (!first_tunnel) {
1598 * Since USB3 bandwidth is shared by all DP
1599 * tunnels under the host router USB4 port, even
1600 * if they do not begin from the host router, we
1601 * can release USB3 bandwidth just once and not
1602 * for each tunnel separately.
1604 first_tunnel = tunnel;
1605 ret = tb_release_unused_usb3_bandwidth(tb,
1606 first_tunnel->src_port, first_tunnel->dst_port);
1608 tb_tunnel_warn(tunnel,
1609 "failed to release unused bandwidth\n");
1614 out = tunnel->dst_port;
1615 ret = tb_available_bandwidth(tb, in, out, &estimated_up,
1616 &estimated_down, true);
1618 tb_tunnel_warn(tunnel,
1619 "failed to re-calculate estimated bandwidth\n");
1624 * Estimated bandwidth includes:
1625 * - already allocated bandwidth for the DP tunnel
1626 * - available bandwidth along the path
1627 * - bandwidth allocated for USB 3.x but not used.
1629 tb_tunnel_dbg(tunnel,
1630 "re-calculated estimated bandwidth %u/%u Mb/s\n",
1631 estimated_up, estimated_down);
1633 if (tb_port_path_direction_downstream(in, out))
1634 estimated_bw = estimated_down;
1636 estimated_bw = estimated_up;
1638 if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
1639 tb_tunnel_warn(tunnel,
1640 "failed to update estimated bandwidth\n");
1644 tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
1645 first_tunnel->dst_port);
1647 tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
1650 static void tb_recalc_estimated_bandwidth(struct tb *tb)
1652 struct tb_cm *tcm = tb_priv(tb);
1655 tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
1657 for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
1658 struct tb_bandwidth_group *group = &tcm->groups[i];
1660 if (!list_empty(&group->ports))
1661 tb_recalc_estimated_bandwidth_for_group(group);
1664 tb_dbg(tb, "bandwidth re-calculation done\n");
1667 static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
1669 struct tb_port *host_port, *port;
1670 struct tb_cm *tcm = tb_priv(tb);
1672 host_port = tb_route(in->sw) ?
1673 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
1675 list_for_each_entry(port, &tcm->dp_resources, list) {
1676 if (!tb_port_is_dpout(port))
1679 if (tb_port_is_enabled(port)) {
1680 tb_port_dbg(port, "DP OUT in use\n");
1684 tb_port_dbg(port, "DP OUT available\n");
1687 * Keep the DP tunnel under the topology starting from
1688 * the same host router downstream port.
1690 if (host_port && tb_route(port->sw)) {
1693 p = tb_port_at(tb_route(port->sw), tb->root_switch);
1704 static bool tb_tunnel_one_dp(struct tb *tb)
1706 int available_up, available_down, ret, link_nr;
1707 struct tb_cm *tcm = tb_priv(tb);
1708 struct tb_port *port, *in, *out;
1709 int consumed_up, consumed_down;
1710 struct tb_tunnel *tunnel;
1713 * Find pair of inactive DP IN and DP OUT adapters and then
1714 * establish a DP tunnel between them.
1716 tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
1720 list_for_each_entry(port, &tcm->dp_resources, list) {
1721 if (!tb_port_is_dpin(port))
1724 if (tb_port_is_enabled(port)) {
1725 tb_port_dbg(port, "DP IN in use\n");
1730 tb_port_dbg(in, "DP IN available\n");
1732 out = tb_find_dp_out(tb, port);
1738 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
1742 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
1747 * This is only applicable to links that are not bonded (so
1748 * when Thunderbolt 1 hardware is involved somewhere in the
1749 * topology). For these try to share the DP bandwidth between
1753 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
1754 if (tb_tunnel_is_dp(tunnel)) {
1761 * DP stream needs the domain to be active so runtime resume
1762 * both ends of the tunnel.
1764 * This should bring the routers in the middle active as well
1765 * and keeps the domain from runtime suspending while the DP
1768 pm_runtime_get_sync(&in->sw->dev);
1769 pm_runtime_get_sync(&out->sw->dev);
1771 if (tb_switch_alloc_dp_resource(in->sw, in)) {
1772 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
1776 if (!tb_attach_bandwidth_group(tcm, in, out))
1777 goto err_dealloc_dp;
1779 /* Make all unused USB3 bandwidth available for the new DP tunnel */
1780 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
1782 tb_warn(tb, "failed to release unused bandwidth\n");
1783 goto err_detach_group;
1786 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
1789 goto err_reclaim_usb;
1791 tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
1792 available_up, available_down);
1794 tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
1797 tb_port_dbg(out, "could not allocate DP tunnel\n");
1798 goto err_reclaim_usb;
1801 if (tb_tunnel_activate(tunnel)) {
1802 tb_port_info(out, "DP tunnel activation failed, aborting\n");
1806 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1807 tb_reclaim_usb3_bandwidth(tb, in, out);
1810 * Transition the links to asymmetric if the consumption exceeds
1813 if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
1814 tb_configure_asym(tb, in, out, consumed_up, consumed_down);
1816 /* Update the domain with the new bandwidth estimation */
1817 tb_recalc_estimated_bandwidth(tb);
1820 * In case of DP tunnel exists, change host router's 1st children
1821 * TMU mode to HiFi for CL0s to work.
1823 tb_increase_tmu_accuracy(tunnel);
1827 tb_tunnel_free(tunnel);
1829 tb_reclaim_usb3_bandwidth(tb, in, out);
1831 tb_detach_bandwidth_group(in);
1833 tb_switch_dealloc_dp_resource(in->sw, in);
1835 pm_runtime_mark_last_busy(&out->sw->dev);
1836 pm_runtime_put_autosuspend(&out->sw->dev);
1837 pm_runtime_mark_last_busy(&in->sw->dev);
1838 pm_runtime_put_autosuspend(&in->sw->dev);
1843 static void tb_tunnel_dp(struct tb *tb)
1845 if (!tb_acpi_may_tunnel_dp()) {
1846 tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
1850 while (tb_tunnel_one_dp(tb))
1854 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
1856 struct tb_port *in, *out;
1857 struct tb_tunnel *tunnel;
1859 if (tb_port_is_dpin(port)) {
1860 tb_port_dbg(port, "DP IN resource unavailable\n");
1864 tb_port_dbg(port, "DP OUT resource unavailable\n");
1869 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
1870 tb_deactivate_and_free_tunnel(tunnel);
1871 list_del_init(&port->list);
1874 * See if there is another DP OUT port that can be used for
1875 * to create another tunnel.
1877 tb_recalc_estimated_bandwidth(tb);
1881 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
1883 struct tb_cm *tcm = tb_priv(tb);
1886 if (tb_port_is_enabled(port))
1889 list_for_each_entry(p, &tcm->dp_resources, list) {
1894 tb_port_dbg(port, "DP %s resource available\n",
1895 tb_port_is_dpin(port) ? "IN" : "OUT");
1896 list_add_tail(&port->list, &tcm->dp_resources);
1898 /* Look for suitable DP IN <-> DP OUT pairs now */
1902 static void tb_disconnect_and_release_dp(struct tb *tb)
1904 struct tb_cm *tcm = tb_priv(tb);
1905 struct tb_tunnel *tunnel, *n;
1908 * Tear down all DP tunnels and release their resources. They
1909 * will be re-established after resume based on plug events.
1911 list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
1912 if (tb_tunnel_is_dp(tunnel))
1913 tb_deactivate_and_free_tunnel(tunnel);
1916 while (!list_empty(&tcm->dp_resources)) {
1917 struct tb_port *port;
1919 port = list_first_entry(&tcm->dp_resources,
1920 struct tb_port, list);
1921 list_del_init(&port->list);
1925 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
1927 struct tb_tunnel *tunnel;
1930 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1934 tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
1935 if (WARN_ON(!tunnel))
1938 tb_switch_xhci_disconnect(sw);
1940 tb_tunnel_deactivate(tunnel);
1941 list_del(&tunnel->list);
1942 tb_tunnel_free(tunnel);
1946 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
1948 struct tb_port *up, *down, *port;
1949 struct tb_cm *tcm = tb_priv(tb);
1950 struct tb_tunnel *tunnel;
1952 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
1957 * Look up available down port. Since we are chaining it should
1958 * be found right above this switch.
1960 port = tb_switch_downstream_port(sw);
1961 down = tb_find_pcie_down(tb_switch_parent(sw), port);
1965 tunnel = tb_tunnel_alloc_pci(tb, up, down);
1969 if (tb_tunnel_activate(tunnel)) {
1971 "PCIe tunnel activation failed, aborting\n");
1972 tb_tunnel_free(tunnel);
1977 * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
1980 if (tb_switch_pcie_l1_enable(sw))
1981 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
1983 if (tb_switch_xhci_connect(sw))
1984 tb_sw_warn(sw, "failed to connect xHCI\n");
1986 list_add_tail(&tunnel->list, &tcm->tunnel_list);
1990 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1991 int transmit_path, int transmit_ring,
1992 int receive_path, int receive_ring)
1994 struct tb_cm *tcm = tb_priv(tb);
1995 struct tb_port *nhi_port, *dst_port;
1996 struct tb_tunnel *tunnel;
1997 struct tb_switch *sw;
2000 sw = tb_to_switch(xd->dev.parent);
2001 dst_port = tb_port_at(xd->route, sw);
2002 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2004 mutex_lock(&tb->lock);
2007 * When tunneling DMA paths the link should not enter CL states
2008 * so disable them now.
2012 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
2013 transmit_ring, receive_path, receive_ring);
2019 if (tb_tunnel_activate(tunnel)) {
2020 tb_port_info(nhi_port,
2021 "DMA tunnel activation failed, aborting\n");
2026 list_add_tail(&tunnel->list, &tcm->tunnel_list);
2027 mutex_unlock(&tb->lock);
2031 tb_tunnel_free(tunnel);
2034 mutex_unlock(&tb->lock);
2039 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2040 int transmit_path, int transmit_ring,
2041 int receive_path, int receive_ring)
2043 struct tb_cm *tcm = tb_priv(tb);
2044 struct tb_port *nhi_port, *dst_port;
2045 struct tb_tunnel *tunnel, *n;
2046 struct tb_switch *sw;
2048 sw = tb_to_switch(xd->dev.parent);
2049 dst_port = tb_port_at(xd->route, sw);
2050 nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
2052 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2053 if (!tb_tunnel_is_dma(tunnel))
2055 if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
2058 if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
2059 receive_path, receive_ring))
2060 tb_deactivate_and_free_tunnel(tunnel);
2064 * Try to re-enable CL states now, it is OK if this fails
2065 * because we may still have another DMA tunnel active through
2066 * the same host router USB4 downstream port.
2071 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
2072 int transmit_path, int transmit_ring,
2073 int receive_path, int receive_ring)
2075 if (!xd->is_unplugged) {
2076 mutex_lock(&tb->lock);
2077 __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
2078 transmit_ring, receive_path,
2080 mutex_unlock(&tb->lock);
2085 /* hotplug handling */
2088 * tb_handle_hotplug() - handle hotplug event
2090 * Executes on tb->wq.
2092 static void tb_handle_hotplug(struct work_struct *work)
2094 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2095 struct tb *tb = ev->tb;
2096 struct tb_cm *tcm = tb_priv(tb);
2097 struct tb_switch *sw;
2098 struct tb_port *port;
2100 /* Bring the domain back from sleep if it was suspended */
2101 pm_runtime_get_sync(&tb->dev);
2103 mutex_lock(&tb->lock);
2104 if (!tcm->hotplug_active)
2105 goto out; /* during init, suspend or shutdown */
2107 sw = tb_switch_find_by_route(tb, ev->route);
2110 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
2111 ev->route, ev->port, ev->unplug);
2114 if (ev->port > sw->config.max_port_number) {
2116 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
2117 ev->route, ev->port, ev->unplug);
2120 port = &sw->ports[ev->port];
2121 if (tb_is_upstream_port(port)) {
2122 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
2123 ev->route, ev->port, ev->unplug);
2127 pm_runtime_get_sync(&sw->dev);
2130 tb_retimer_remove_all(port);
2132 if (tb_port_has_remote(port)) {
2133 tb_port_dbg(port, "switch unplugged\n");
2134 tb_sw_set_unplugged(port->remote->sw);
2135 tb_free_invalid_tunnels(tb);
2136 tb_remove_dp_resources(port->remote->sw);
2137 tb_switch_tmu_disable(port->remote->sw);
2138 tb_switch_unconfigure_link(port->remote->sw);
2139 tb_switch_set_link_width(port->remote->sw,
2140 TB_LINK_WIDTH_SINGLE);
2141 tb_switch_remove(port->remote->sw);
2142 port->remote = NULL;
2143 if (port->dual_link_port)
2144 port->dual_link_port->remote = NULL;
2145 /* Maybe we can create another DP tunnel */
2146 tb_recalc_estimated_bandwidth(tb);
2148 } else if (port->xdomain) {
2149 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
2151 tb_port_dbg(port, "xdomain unplugged\n");
2153 * Service drivers are unbound during
2154 * tb_xdomain_remove() so setting XDomain as
2155 * unplugged here prevents deadlock if they call
2156 * tb_xdomain_disable_paths(). We will tear down
2157 * all the tunnels below.
2159 xd->is_unplugged = true;
2160 tb_xdomain_remove(xd);
2161 port->xdomain = NULL;
2162 __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
2164 tb_port_unconfigure_xdomain(port);
2165 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2166 tb_dp_resource_unavailable(tb, port);
2167 } else if (!port->port) {
2168 tb_sw_dbg(sw, "xHCI disconnect request\n");
2169 tb_switch_xhci_disconnect(sw);
2172 "got unplug event for disconnected port, ignoring\n");
2174 } else if (port->remote) {
2175 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
2176 } else if (!port->port && sw->authorized) {
2177 tb_sw_dbg(sw, "xHCI connect request\n");
2178 tb_switch_xhci_connect(sw);
2180 if (tb_port_is_null(port)) {
2181 tb_port_dbg(port, "hotplug: scanning\n");
2184 tb_port_dbg(port, "hotplug: no switch found\n");
2185 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
2186 tb_dp_resource_available(tb, port);
2190 pm_runtime_mark_last_busy(&sw->dev);
2191 pm_runtime_put_autosuspend(&sw->dev);
2196 mutex_unlock(&tb->lock);
2198 pm_runtime_mark_last_busy(&tb->dev);
2199 pm_runtime_put_autosuspend(&tb->dev);
2204 static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
2205 int *requested_down)
2207 int allocated_up, allocated_down, available_up, available_down, ret;
2208 int requested_up_corrected, requested_down_corrected, granularity;
2209 int max_up, max_down, max_up_rounded, max_down_rounded;
2210 struct tb *tb = tunnel->tb;
2211 struct tb_port *in, *out;
2213 ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
2217 in = tunnel->src_port;
2218 out = tunnel->dst_port;
2220 tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
2221 allocated_up, allocated_down);
2224 * If we get rounded up request from graphics side, say HBR2 x 4
2225 * that is 17500 instead of 17280 (this is because of the
2226 * granularity), we allow it too. Here the graphics has already
2227 * negotiated with the DPRX the maximum possible rates (which is
2228 * 17280 in this case).
2230 * Since the link cannot go higher than 17280 we use that in our
2231 * calculations but the DP IN adapter Allocated BW write must be
2232 * the same value (17500) otherwise the adapter will mark it as
2233 * failed for graphics.
2235 ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
2239 ret = usb4_dp_port_granularity(in);
2244 max_up_rounded = roundup(max_up, granularity);
2245 max_down_rounded = roundup(max_down, granularity);
2248 * This will "fix" the request down to the maximum supported
2249 * rate * lanes if it is at the maximum rounded up level.
2251 requested_up_corrected = *requested_up;
2252 if (requested_up_corrected == max_up_rounded)
2253 requested_up_corrected = max_up;
2254 else if (requested_up_corrected < 0)
2255 requested_up_corrected = 0;
2256 requested_down_corrected = *requested_down;
2257 if (requested_down_corrected == max_down_rounded)
2258 requested_down_corrected = max_down;
2259 else if (requested_down_corrected < 0)
2260 requested_down_corrected = 0;
2262 tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
2263 requested_up_corrected, requested_down_corrected);
2265 if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
2266 (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
2267 tb_tunnel_dbg(tunnel,
2268 "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
2269 requested_up_corrected, requested_down_corrected,
2270 max_up_rounded, max_down_rounded);
2274 if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
2275 (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
2277 * If bandwidth on a link is < asym_threshold transition
2278 * the link to symmetric.
2280 tb_configure_sym(tb, in, out, *requested_up, *requested_down);
2282 * If requested bandwidth is less or equal than what is
2283 * currently allocated to that tunnel we simply change
2284 * the reservation of the tunnel. Since all the tunnels
2285 * going out from the same USB4 port are in the same
2286 * group the released bandwidth will be taken into
2287 * account for the other tunnels automatically below.
2289 return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2294 * More bandwidth is requested. Release all the potential
2295 * bandwidth from USB3 first.
2297 ret = tb_release_unused_usb3_bandwidth(tb, in, out);
2302 * Then go over all tunnels that cross the same USB4 ports (they
2303 * are also in the same group but we use the same function here
2304 * that we use with the normal bandwidth allocation).
2306 ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
2311 tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
2312 available_up, available_down);
2314 if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
2315 (*requested_down >= 0 && available_down >= requested_down_corrected)) {
2317 * If bandwidth on a link is >= asym_threshold
2318 * transition the link to asymmetric.
2320 ret = tb_configure_asym(tb, in, out, *requested_up,
2323 tb_configure_sym(tb, in, out, 0, 0);
2327 ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
2330 tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
2331 tb_configure_sym(tb, in, out, 0, 0);
2338 tb_reclaim_usb3_bandwidth(tb, in, out);
2342 static void tb_handle_dp_bandwidth_request(struct work_struct *work)
2344 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
2345 int requested_bw, requested_up, requested_down, ret;
2346 struct tb_port *in, *out;
2347 struct tb_tunnel *tunnel;
2348 struct tb *tb = ev->tb;
2349 struct tb_cm *tcm = tb_priv(tb);
2350 struct tb_switch *sw;
2352 pm_runtime_get_sync(&tb->dev);
2354 mutex_lock(&tb->lock);
2355 if (!tcm->hotplug_active)
2358 sw = tb_switch_find_by_route(tb, ev->route);
2360 tb_warn(tb, "bandwidth request from non-existent router %llx\n",
2365 in = &sw->ports[ev->port];
2366 if (!tb_port_is_dpin(in)) {
2367 tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
2371 tb_port_dbg(in, "handling bandwidth allocation request\n");
2373 if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
2374 tb_port_warn(in, "bandwidth allocation mode not enabled\n");
2378 ret = usb4_dp_port_requested_bandwidth(in);
2380 if (ret == -ENODATA)
2381 tb_port_dbg(in, "no bandwidth request active\n");
2383 tb_port_warn(in, "failed to read requested bandwidth\n");
2388 tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
2390 tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
2392 tb_port_warn(in, "failed to find tunnel\n");
2396 out = tunnel->dst_port;
2398 if (tb_port_path_direction_downstream(in, out)) {
2400 requested_down = requested_bw;
2402 requested_up = requested_bw;
2403 requested_down = -1;
2406 ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
2408 if (ret == -ENOBUFS)
2409 tb_tunnel_warn(tunnel,
2410 "not enough bandwidth available\n");
2412 tb_tunnel_warn(tunnel,
2413 "failed to change bandwidth allocation\n");
2415 tb_tunnel_dbg(tunnel,
2416 "bandwidth allocation changed to %d/%d Mb/s\n",
2417 requested_up, requested_down);
2419 /* Update other clients about the allocation change */
2420 tb_recalc_estimated_bandwidth(tb);
2426 mutex_unlock(&tb->lock);
2428 pm_runtime_mark_last_busy(&tb->dev);
2429 pm_runtime_put_autosuspend(&tb->dev);
2434 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
2436 struct tb_hotplug_event *ev;
2438 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
2445 INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
2446 queue_work(tb->wq, &ev->work);
2449 static void tb_handle_notification(struct tb *tb, u64 route,
2450 const struct cfg_error_pkg *error)
2453 switch (error->error) {
2454 case TB_CFG_ERROR_PCIE_WAKE:
2455 case TB_CFG_ERROR_DP_CON_CHANGE:
2456 case TB_CFG_ERROR_DPTX_DISCOVERY:
2457 if (tb_cfg_ack_notification(tb->ctl, route, error))
2458 tb_warn(tb, "could not ack notification on %llx\n",
2462 case TB_CFG_ERROR_DP_BW:
2463 if (tb_cfg_ack_notification(tb->ctl, route, error))
2464 tb_warn(tb, "could not ack notification on %llx\n",
2466 tb_queue_dp_bandwidth_request(tb, route, error->port);
2470 /* Ignore for now */
2476 * tb_schedule_hotplug_handler() - callback function for the control channel
2478 * Delegates to tb_handle_hotplug.
2480 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
2481 const void *buf, size_t size)
2483 const struct cfg_event_pkg *pkg = buf;
2484 u64 route = tb_cfg_get_route(&pkg->header);
2487 case TB_CFG_PKG_ERROR:
2488 tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
2490 case TB_CFG_PKG_EVENT:
2493 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
2497 if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
2498 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
2502 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
2505 static void tb_stop(struct tb *tb)
2507 struct tb_cm *tcm = tb_priv(tb);
2508 struct tb_tunnel *tunnel;
2509 struct tb_tunnel *n;
2511 cancel_delayed_work(&tcm->remove_work);
2512 /* tunnels are only present after everything has been initialized */
2513 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2515 * DMA tunnels require the driver to be functional so we
2516 * tear them down. Other protocol tunnels can be left
2519 if (tb_tunnel_is_dma(tunnel))
2520 tb_tunnel_deactivate(tunnel);
2521 tb_tunnel_free(tunnel);
2523 tb_switch_remove(tb->root_switch);
2524 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2527 static int tb_scan_finalize_switch(struct device *dev, void *data)
2529 if (tb_is_switch(dev)) {
2530 struct tb_switch *sw = tb_to_switch(dev);
2533 * If we found that the switch was already setup by the
2534 * boot firmware, mark it as authorized now before we
2535 * send uevent to userspace.
2540 dev_set_uevent_suppress(dev, false);
2541 kobject_uevent(&dev->kobj, KOBJ_ADD);
2542 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
2548 static int tb_start(struct tb *tb)
2550 struct tb_cm *tcm = tb_priv(tb);
2553 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2554 if (IS_ERR(tb->root_switch))
2555 return PTR_ERR(tb->root_switch);
2558 * ICM firmware upgrade needs running firmware and in native
2559 * mode that is not available so disable firmware upgrade of the
2562 * However, USB4 routers support NVM firmware upgrade if they
2563 * implement the necessary router operations.
2565 tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
2566 /* All USB4 routers support runtime PM */
2567 tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
2569 ret = tb_switch_configure(tb->root_switch);
2571 tb_switch_put(tb->root_switch);
2575 /* Announce the switch to the world */
2576 ret = tb_switch_add(tb->root_switch);
2578 tb_switch_put(tb->root_switch);
2583 * To support highest CLx state, we set host router's TMU to
2586 tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
2587 /* Enable TMU if it is off */
2588 tb_switch_tmu_enable(tb->root_switch);
2589 /* Full scan to discover devices added before the driver was loaded. */
2590 tb_scan_switch(tb->root_switch);
2591 /* Find out tunnels created by the boot firmware */
2592 tb_discover_tunnels(tb);
2593 /* Add DP resources from the DP tunnels created by the boot firmware */
2594 tb_discover_dp_resources(tb);
2596 * If the boot firmware did not create USB 3.x tunnels create them
2597 * now for the whole topology.
2599 tb_create_usb3_tunnels(tb->root_switch);
2600 /* Add DP IN resources for the root switch */
2601 tb_add_dp_resources(tb->root_switch);
2602 /* Make the discovered switches available to the userspace */
2603 device_for_each_child(&tb->root_switch->dev, NULL,
2604 tb_scan_finalize_switch);
2606 /* Allow tb_handle_hotplug to progress events */
2607 tcm->hotplug_active = true;
2611 static int tb_suspend_noirq(struct tb *tb)
2613 struct tb_cm *tcm = tb_priv(tb);
2615 tb_dbg(tb, "suspending...\n");
2616 tb_disconnect_and_release_dp(tb);
2617 tb_switch_suspend(tb->root_switch, false);
2618 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
2619 tb_dbg(tb, "suspend finished\n");
2624 static void tb_restore_children(struct tb_switch *sw)
2626 struct tb_port *port;
2628 /* No need to restore if the router is already unplugged */
2629 if (sw->is_unplugged)
2632 if (tb_enable_clx(sw))
2633 tb_sw_warn(sw, "failed to re-enable CL states\n");
2635 if (tb_enable_tmu(sw))
2636 tb_sw_warn(sw, "failed to restore TMU configuration\n");
2638 tb_switch_configuration_valid(sw);
2640 tb_switch_for_each_port(sw, port) {
2641 if (!tb_port_has_remote(port) && !port->xdomain)
2645 tb_switch_set_link_width(port->remote->sw,
2646 port->remote->sw->link_width);
2647 tb_switch_configure_link(port->remote->sw);
2649 tb_restore_children(port->remote->sw);
2650 } else if (port->xdomain) {
2651 tb_port_configure_xdomain(port, port->xdomain);
2656 static int tb_resume_noirq(struct tb *tb)
2658 struct tb_cm *tcm = tb_priv(tb);
2659 struct tb_tunnel *tunnel, *n;
2660 unsigned int usb3_delay = 0;
2663 tb_dbg(tb, "resuming...\n");
2665 /* remove any pci devices the firmware might have setup */
2666 tb_switch_reset(tb->root_switch);
2668 tb_switch_resume(tb->root_switch);
2669 tb_free_invalid_tunnels(tb);
2670 tb_free_unplugged_children(tb->root_switch);
2671 tb_restore_children(tb->root_switch);
2674 * If we get here from suspend to disk the boot firmware or the
2675 * restore kernel might have created tunnels of its own. Since
2676 * we cannot be sure they are usable for us we find and tear
2679 tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
2680 list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
2681 if (tb_tunnel_is_usb3(tunnel))
2683 tb_tunnel_deactivate(tunnel);
2684 tb_tunnel_free(tunnel);
2687 /* Re-create our tunnels now */
2688 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
2689 /* USB3 requires delay before it can be re-activated */
2690 if (tb_tunnel_is_usb3(tunnel)) {
2692 /* Only need to do it once */
2695 tb_tunnel_restart(tunnel);
2697 if (!list_empty(&tcm->tunnel_list)) {
2699 * the pcie links need some time to get going.
2700 * 100ms works for me...
2702 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
2705 /* Allow tb_handle_hotplug to progress events */
2706 tcm->hotplug_active = true;
2707 tb_dbg(tb, "resume finished\n");
2712 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
2714 struct tb_port *port;
2717 tb_switch_for_each_port(sw, port) {
2718 if (tb_is_upstream_port(port))
2720 if (port->xdomain && port->xdomain->is_unplugged) {
2721 tb_retimer_remove_all(port);
2722 tb_xdomain_remove(port->xdomain);
2723 tb_port_unconfigure_xdomain(port);
2724 port->xdomain = NULL;
2726 } else if (port->remote) {
2727 ret += tb_free_unplugged_xdomains(port->remote->sw);
2734 static int tb_freeze_noirq(struct tb *tb)
2736 struct tb_cm *tcm = tb_priv(tb);
2738 tcm->hotplug_active = false;
2742 static int tb_thaw_noirq(struct tb *tb)
2744 struct tb_cm *tcm = tb_priv(tb);
2746 tcm->hotplug_active = true;
2750 static void tb_complete(struct tb *tb)
2753 * Release any unplugged XDomains and if there is a case where
2754 * another domain is swapped in place of unplugged XDomain we
2755 * need to run another rescan.
2757 mutex_lock(&tb->lock);
2758 if (tb_free_unplugged_xdomains(tb->root_switch))
2759 tb_scan_switch(tb->root_switch);
2760 mutex_unlock(&tb->lock);
2763 static int tb_runtime_suspend(struct tb *tb)
2765 struct tb_cm *tcm = tb_priv(tb);
2767 mutex_lock(&tb->lock);
2768 tb_switch_suspend(tb->root_switch, true);
2769 tcm->hotplug_active = false;
2770 mutex_unlock(&tb->lock);
2775 static void tb_remove_work(struct work_struct *work)
2777 struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
2778 struct tb *tb = tcm_to_tb(tcm);
2780 mutex_lock(&tb->lock);
2781 if (tb->root_switch) {
2782 tb_free_unplugged_children(tb->root_switch);
2783 tb_free_unplugged_xdomains(tb->root_switch);
2785 mutex_unlock(&tb->lock);
2788 static int tb_runtime_resume(struct tb *tb)
2790 struct tb_cm *tcm = tb_priv(tb);
2791 struct tb_tunnel *tunnel, *n;
2793 mutex_lock(&tb->lock);
2794 tb_switch_resume(tb->root_switch);
2795 tb_free_invalid_tunnels(tb);
2796 tb_restore_children(tb->root_switch);
2797 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
2798 tb_tunnel_restart(tunnel);
2799 tcm->hotplug_active = true;
2800 mutex_unlock(&tb->lock);
2803 * Schedule cleanup of any unplugged devices. Run this in a
2804 * separate thread to avoid possible deadlock if the device
2805 * removal runtime resumes the unplugged device.
2807 queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
2811 static const struct tb_cm_ops tb_cm_ops = {
2814 .suspend_noirq = tb_suspend_noirq,
2815 .resume_noirq = tb_resume_noirq,
2816 .freeze_noirq = tb_freeze_noirq,
2817 .thaw_noirq = tb_thaw_noirq,
2818 .complete = tb_complete,
2819 .runtime_suspend = tb_runtime_suspend,
2820 .runtime_resume = tb_runtime_resume,
2821 .handle_event = tb_handle_event,
2822 .disapprove_switch = tb_disconnect_pci,
2823 .approve_switch = tb_tunnel_pci,
2824 .approve_xdomain_paths = tb_approve_xdomain_paths,
2825 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
2829 * During suspend the Thunderbolt controller is reset and all PCIe
2830 * tunnels are lost. The NHI driver will try to reestablish all tunnels
2831 * during resume. This adds device links between the tunneled PCIe
2832 * downstream ports and the NHI so that the device core will make sure
2833 * NHI is resumed first before the rest.
2835 static bool tb_apple_add_links(struct tb_nhi *nhi)
2837 struct pci_dev *upstream, *pdev;
2840 if (!x86_apple_machine)
2843 switch (nhi->pdev->device) {
2844 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2845 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2846 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2847 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2853 upstream = pci_upstream_bridge(nhi->pdev);
2855 if (!pci_is_pcie(upstream))
2857 if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
2859 upstream = pci_upstream_bridge(upstream);
2866 * For each hotplug downstream port, create add device link
2867 * back to NHI so that PCIe tunnels can be re-established after
2871 for_each_pci_bridge(pdev, upstream->subordinate) {
2872 const struct device_link *link;
2874 if (!pci_is_pcie(pdev))
2876 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
2877 !pdev->is_hotplug_bridge)
2880 link = device_link_add(&pdev->dev, &nhi->pdev->dev,
2881 DL_FLAG_AUTOREMOVE_SUPPLIER |
2882 DL_FLAG_PM_RUNTIME);
2884 dev_dbg(&nhi->pdev->dev, "created link from %s\n",
2885 dev_name(&pdev->dev));
2888 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
2889 dev_name(&pdev->dev));
2896 struct tb *tb_probe(struct tb_nhi *nhi)
2901 tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
2905 if (tb_acpi_may_tunnel_pcie())
2906 tb->security_level = TB_SECURITY_USER;
2908 tb->security_level = TB_SECURITY_NOPCIE;
2910 tb->cm_ops = &tb_cm_ops;
2913 INIT_LIST_HEAD(&tcm->tunnel_list);
2914 INIT_LIST_HEAD(&tcm->dp_resources);
2915 INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
2916 tb_init_bandwidth_groups(tcm);
2918 tb_dbg(tb, "using software connection manager\n");
2921 * Device links are needed to make sure we establish tunnels
2922 * before the PCIe/USB stack is resumed so complain here if we
2923 * found them missing.
2925 if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
2926 tb_warn(tb, "device links to tunneled native ports are missing!\n");