1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID 8
25 #define TB_USB3_PATH_DOWN 0
26 #define TB_USB3_PATH_UP 1
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID 8
30 #define TB_DP_AUX_RX_HOPID 8
31 #define TB_DP_VIDEO_HOPID 9
33 #define TB_DP_VIDEO_PATH_OUT 0
34 #define TB_DP_AUX_PATH_OUT 1
35 #define TB_DP_AUX_PATH_IN 2
37 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
39 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
41 struct tb_tunnel *__tunnel = (tunnel); \
42 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
43 tb_route(__tunnel->src_port->sw), \
44 __tunnel->src_port->port, \
45 tb_route(__tunnel->dst_port->sw), \
46 __tunnel->dst_port->port, \
47 tb_tunnel_names[__tunnel->type], \
51 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
52 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
53 #define tb_tunnel_warn(tunnel, fmt, arg...) \
54 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
55 #define tb_tunnel_info(tunnel, fmt, arg...) \
56 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
57 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
58 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
60 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
61 enum tb_tunnel_type type)
63 struct tb_tunnel *tunnel;
65 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
71 tb_tunnel_free(tunnel);
75 INIT_LIST_HEAD(&tunnel->list);
77 tunnel->npaths = npaths;
83 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87 res = tb_pci_port_enable(tunnel->src_port, activate);
91 if (tb_port_is_pcie_up(tunnel->dst_port))
92 return tb_pci_port_enable(tunnel->dst_port, activate);
97 static int tb_initial_credits(const struct tb_switch *sw)
99 /* If the path is complete sw is not NULL */
101 /* More credits for faster link */
102 switch (sw->link_speed * sw->link_width) {
113 static void tb_pci_init_path(struct tb_path *path)
115 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
116 path->egress_shared_buffer = TB_PATH_NONE;
117 path->ingress_fc_enable = TB_PATH_ALL;
118 path->ingress_shared_buffer = TB_PATH_NONE;
121 path->drop_packages = 0;
122 path->nfc_credits = 0;
123 path->hops[0].initial_credits = 7;
124 if (path->path_length > 1)
125 path->hops[1].initial_credits =
126 tb_initial_credits(path->hops[1].in_port->sw);
130 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
131 * @tb: Pointer to the domain structure
132 * @down: PCIe downstream adapter
134 * If @down adapter is active, follows the tunnel to the PCIe upstream
135 * adapter and back. Returns the discovered tunnel or %NULL if there was
138 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
140 struct tb_tunnel *tunnel;
141 struct tb_path *path;
143 if (!tb_pci_port_is_enabled(down))
146 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
150 tunnel->activate = tb_pci_activate;
151 tunnel->src_port = down;
154 * Discover both paths even if they are not complete. We will
155 * clean them up by calling tb_tunnel_deactivate() below in that
158 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
159 &tunnel->dst_port, "PCIe Up");
161 /* Just disable the downstream port */
162 tb_pci_port_enable(down, false);
165 tunnel->paths[TB_PCI_PATH_UP] = path;
166 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
168 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
172 tunnel->paths[TB_PCI_PATH_DOWN] = path;
173 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
175 /* Validate that the tunnel is complete */
176 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
177 tb_port_warn(tunnel->dst_port,
178 "path does not end on a PCIe adapter, cleaning up\n");
182 if (down != tunnel->src_port) {
183 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
187 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
188 tb_tunnel_warn(tunnel,
189 "tunnel is not fully activated, cleaning up\n");
193 tb_tunnel_dbg(tunnel, "discovered\n");
197 tb_tunnel_deactivate(tunnel);
199 tb_tunnel_free(tunnel);
205 * tb_tunnel_alloc_pci() - allocate a pci tunnel
206 * @tb: Pointer to the domain structure
207 * @up: PCIe upstream adapter port
208 * @down: PCIe downstream adapter port
210 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
213 * Return: Returns a tb_tunnel on success or NULL on failure.
215 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
216 struct tb_port *down)
218 struct tb_tunnel *tunnel;
219 struct tb_path *path;
221 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
225 tunnel->activate = tb_pci_activate;
226 tunnel->src_port = down;
227 tunnel->dst_port = up;
229 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
232 tb_tunnel_free(tunnel);
235 tb_pci_init_path(path);
236 tunnel->paths[TB_PCI_PATH_DOWN] = path;
238 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
241 tb_tunnel_free(tunnel);
244 tb_pci_init_path(path);
245 tunnel->paths[TB_PCI_PATH_UP] = path;
250 static bool tb_dp_is_usb4(const struct tb_switch *sw)
252 /* Titan Ridge DP adapters need the same treatment as USB4 */
253 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
256 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
262 /* Both ends need to support this */
263 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
266 ret = tb_port_read(out, &val, TB_CFG_PORT,
267 out->cap_adap + DP_STATUS_CTRL, 1);
271 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
273 ret = tb_port_write(out, &val, TB_CFG_PORT,
274 out->cap_adap + DP_STATUS_CTRL, 1);
279 ret = tb_port_read(out, &val, TB_CFG_PORT,
280 out->cap_adap + DP_STATUS_CTRL, 1);
283 if (!(val & DP_STATUS_CTRL_CMHS))
285 usleep_range(10, 100);
291 static inline u32 tb_dp_cap_get_rate(u32 val)
293 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
296 case DP_COMMON_CAP_RATE_RBR:
298 case DP_COMMON_CAP_RATE_HBR:
300 case DP_COMMON_CAP_RATE_HBR2:
302 case DP_COMMON_CAP_RATE_HBR3:
309 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
311 val &= ~DP_COMMON_CAP_RATE_MASK;
314 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
317 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
320 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
323 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
326 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
332 static inline u32 tb_dp_cap_get_lanes(u32 val)
334 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
337 case DP_COMMON_CAP_1_LANE:
339 case DP_COMMON_CAP_2_LANES:
341 case DP_COMMON_CAP_4_LANES:
348 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
350 val &= ~DP_COMMON_CAP_LANES_MASK;
353 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
357 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
360 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
363 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
369 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
371 /* Tunneling removes the DP 8b/10b encoding */
372 return rate * lanes * 8 / 10;
375 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
376 u32 out_rate, u32 out_lanes, u32 *new_rate,
379 static const u32 dp_bw[][2] = {
381 { 8100, 4 }, /* 25920 Mb/s */
382 { 5400, 4 }, /* 17280 Mb/s */
383 { 8100, 2 }, /* 12960 Mb/s */
384 { 2700, 4 }, /* 8640 Mb/s */
385 { 5400, 2 }, /* 8640 Mb/s */
386 { 8100, 1 }, /* 6480 Mb/s */
387 { 1620, 4 }, /* 5184 Mb/s */
388 { 5400, 1 }, /* 4320 Mb/s */
389 { 2700, 2 }, /* 4320 Mb/s */
390 { 1620, 2 }, /* 2592 Mb/s */
391 { 2700, 1 }, /* 2160 Mb/s */
392 { 1620, 1 }, /* 1296 Mb/s */
397 * Find a combination that can fit into max_bw and does not
398 * exceed the maximum rate and lanes supported by the DP OUT and
401 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
402 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
405 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
408 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
409 *new_rate = dp_bw[i][0];
410 *new_lanes = dp_bw[i][1];
418 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
420 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
421 struct tb_port *out = tunnel->dst_port;
422 struct tb_port *in = tunnel->src_port;
426 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
427 * newer generation hardware.
429 if (in->sw->generation < 2 || out->sw->generation < 2)
433 * Perform connection manager handshake between IN and OUT ports
434 * before capabilities exchange can take place.
436 ret = tb_dp_cm_handshake(in, out);
440 /* Read both DP_LOCAL_CAP registers */
441 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
442 in->cap_adap + DP_LOCAL_CAP, 1);
446 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
447 out->cap_adap + DP_LOCAL_CAP, 1);
451 /* Write IN local caps to OUT remote caps */
452 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
453 out->cap_adap + DP_REMOTE_CAP, 1);
457 in_rate = tb_dp_cap_get_rate(in_dp_cap);
458 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
459 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
460 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
463 * If the tunnel bandwidth is limited (max_bw is set) then see
464 * if we need to reduce bandwidth to fit there.
466 out_rate = tb_dp_cap_get_rate(out_dp_cap);
467 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
468 bw = tb_dp_bandwidth(out_rate, out_lanes);
469 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
470 out_rate, out_lanes, bw);
472 if (in->sw->config.depth < out->sw->config.depth)
473 max_bw = tunnel->max_down;
475 max_bw = tunnel->max_up;
477 if (max_bw && bw > max_bw) {
478 u32 new_rate, new_lanes, new_bw;
480 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
481 out_rate, out_lanes, &new_rate,
484 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
488 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
489 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
490 new_rate, new_lanes, new_bw);
493 * Set new rate and number of lanes before writing it to
494 * the IN port remote caps.
496 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
497 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
500 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
501 in->cap_adap + DP_REMOTE_CAP, 1);
504 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
509 struct tb_path **paths;
512 paths = tunnel->paths;
513 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
515 tb_dp_port_set_hops(tunnel->src_port,
516 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
517 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
518 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
520 tb_dp_port_set_hops(tunnel->dst_port,
521 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
522 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
523 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
525 tb_dp_port_hpd_clear(tunnel->src_port);
526 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
527 if (tb_port_is_dpout(tunnel->dst_port))
528 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
531 ret = tb_dp_port_enable(tunnel->src_port, active);
535 if (tb_port_is_dpout(tunnel->dst_port))
536 return tb_dp_port_enable(tunnel->dst_port, active);
541 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
544 struct tb_port *in = tunnel->src_port;
545 const struct tb_switch *sw = in->sw;
546 u32 val, rate = 0, lanes = 0;
549 if (tb_dp_is_usb4(sw)) {
553 * Wait for DPRX done. Normally it should be already set
557 ret = tb_port_read(in, &val, TB_CFG_PORT,
558 in->cap_adap + DP_COMMON_CAP, 1);
562 if (val & DP_COMMON_CAP_DPRX_DONE) {
563 rate = tb_dp_cap_get_rate(val);
564 lanes = tb_dp_cap_get_lanes(val);
572 } else if (sw->generation >= 2) {
574 * Read from the copied remote cap so that we take into
575 * account if capabilities were reduced during exchange.
577 ret = tb_port_read(in, &val, TB_CFG_PORT,
578 in->cap_adap + DP_REMOTE_CAP, 1);
582 rate = tb_dp_cap_get_rate(val);
583 lanes = tb_dp_cap_get_lanes(val);
585 /* No bandwidth management for legacy devices */
591 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
593 *consumed_down = tb_dp_bandwidth(rate, lanes);
595 *consumed_up = tb_dp_bandwidth(rate, lanes);
602 static void tb_dp_init_aux_path(struct tb_path *path)
606 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
607 path->egress_shared_buffer = TB_PATH_NONE;
608 path->ingress_fc_enable = TB_PATH_ALL;
609 path->ingress_shared_buffer = TB_PATH_NONE;
613 for (i = 0; i < path->path_length; i++)
614 path->hops[i].initial_credits = 1;
617 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
619 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
621 path->egress_fc_enable = TB_PATH_NONE;
622 path->egress_shared_buffer = TB_PATH_NONE;
623 path->ingress_fc_enable = TB_PATH_NONE;
624 path->ingress_shared_buffer = TB_PATH_NONE;
629 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
633 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
634 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
635 /* Leave some credits for AUX path */
636 path->nfc_credits = min(max_credits - 2, 12U);
641 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
642 * @tb: Pointer to the domain structure
645 * If @in adapter is active, follows the tunnel to the DP out adapter
646 * and back. Returns the discovered tunnel or %NULL if there was no
649 * Return: DP tunnel or %NULL if no tunnel found.
651 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
653 struct tb_tunnel *tunnel;
654 struct tb_port *port;
655 struct tb_path *path;
657 if (!tb_dp_port_is_enabled(in))
660 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
664 tunnel->init = tb_dp_xchg_caps;
665 tunnel->activate = tb_dp_activate;
666 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
667 tunnel->src_port = in;
669 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
670 &tunnel->dst_port, "Video");
672 /* Just disable the DP IN port */
673 tb_dp_port_enable(in, false);
676 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
677 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
679 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
682 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
683 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
685 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
689 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
690 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
692 /* Validate that the tunnel is complete */
693 if (!tb_port_is_dpout(tunnel->dst_port)) {
694 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
698 if (!tb_dp_port_is_enabled(tunnel->dst_port))
701 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
704 if (port != tunnel->src_port) {
705 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
709 tb_tunnel_dbg(tunnel, "discovered\n");
713 tb_tunnel_deactivate(tunnel);
715 tb_tunnel_free(tunnel);
721 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
722 * @tb: Pointer to the domain structure
723 * @in: DP in adapter port
724 * @out: DP out adapter port
725 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
727 * @max_down: Maximum available downstream bandwidth for the DP tunnel
728 * (%0 if not limited)
730 * Allocates a tunnel between @in and @out that is capable of tunneling
731 * Display Port traffic.
733 * Return: Returns a tb_tunnel on success or NULL on failure.
735 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
736 struct tb_port *out, int max_up,
739 struct tb_tunnel *tunnel;
740 struct tb_path **paths;
741 struct tb_path *path;
743 if (WARN_ON(!in->cap_adap || !out->cap_adap))
746 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
750 tunnel->init = tb_dp_xchg_caps;
751 tunnel->activate = tb_dp_activate;
752 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
753 tunnel->src_port = in;
754 tunnel->dst_port = out;
755 tunnel->max_up = max_up;
756 tunnel->max_down = max_down;
758 paths = tunnel->paths;
760 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
764 tb_dp_init_video_path(path, false);
765 paths[TB_DP_VIDEO_PATH_OUT] = path;
767 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
768 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
771 tb_dp_init_aux_path(path);
772 paths[TB_DP_AUX_PATH_OUT] = path;
774 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
775 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
778 tb_dp_init_aux_path(path);
779 paths[TB_DP_AUX_PATH_IN] = path;
784 tb_tunnel_free(tunnel);
788 static u32 tb_dma_credits(struct tb_port *nhi)
792 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
793 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
794 return min(max_credits, 13U);
797 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
799 struct tb_port *nhi = tunnel->src_port;
802 credits = active ? tb_dma_credits(nhi) : 0;
803 return tb_port_set_initial_credits(nhi, credits);
806 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
807 unsigned int efc, u32 credits)
811 path->egress_fc_enable = efc;
812 path->ingress_fc_enable = TB_PATH_ALL;
813 path->egress_shared_buffer = TB_PATH_NONE;
814 path->ingress_shared_buffer = isb;
817 path->clear_fc = true;
819 for (i = 0; i < path->path_length; i++)
820 path->hops[i].initial_credits = credits;
824 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
825 * @tb: Pointer to the domain structure
826 * @nhi: Host controller port
827 * @dst: Destination null port which the other domain is connected to
828 * @transmit_ring: NHI ring number used to send packets towards the
829 * other domain. Set to %0 if TX path is not needed.
830 * @transmit_path: HopID used for transmitting packets
831 * @receive_ring: NHI ring number used to receive packets from the
832 * other domain. Set to %0 if RX path is not needed.
833 * @receive_path: HopID used for receiving packets
835 * Return: Returns a tb_tunnel on success or NULL on failure.
837 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
838 struct tb_port *dst, int transmit_ring,
839 int transmit_path, int receive_ring,
842 struct tb_tunnel *tunnel;
843 size_t npaths = 0, i = 0;
844 struct tb_path *path;
852 if (WARN_ON(!npaths))
855 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
859 tunnel->activate = tb_dma_activate;
860 tunnel->src_port = nhi;
861 tunnel->dst_port = dst;
863 credits = tb_dma_credits(nhi);
866 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
869 tb_tunnel_free(tunnel);
872 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
874 tunnel->paths[i++] = path;
878 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
881 tb_tunnel_free(tunnel);
884 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
885 tunnel->paths[i++] = path;
891 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
893 int ret, up_max_rate, down_max_rate;
895 ret = usb4_usb3_port_max_link_rate(up);
900 ret = usb4_usb3_port_max_link_rate(down);
905 return min(up_max_rate, down_max_rate);
908 static int tb_usb3_init(struct tb_tunnel *tunnel)
910 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
911 tunnel->allocated_up, tunnel->allocated_down);
913 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
914 &tunnel->allocated_up,
915 &tunnel->allocated_down);
918 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
922 res = tb_usb3_port_enable(tunnel->src_port, activate);
926 if (tb_port_is_usb3_up(tunnel->dst_port))
927 return tb_usb3_port_enable(tunnel->dst_port, activate);
932 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
933 int *consumed_up, int *consumed_down)
935 int pcie_enabled = tb_acpi_may_tunnel_pcie();
938 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
939 * take that it into account here.
941 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
942 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
946 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
950 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
951 &tunnel->allocated_up,
952 &tunnel->allocated_down);
956 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
957 tunnel->allocated_up, tunnel->allocated_down);
961 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
965 int ret, max_rate, allocate_up, allocate_down;
967 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
969 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
972 /* Use maximum link rate if the link valid is not set */
973 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
975 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
981 * 90% of the max rate can be allocated for isochronous
984 max_rate = ret * 90 / 100;
986 /* No need to reclaim if already at maximum */
987 if (tunnel->allocated_up >= max_rate &&
988 tunnel->allocated_down >= max_rate)
991 /* Don't go lower than what is already allocated */
992 allocate_up = min(max_rate, *available_up);
993 if (allocate_up < tunnel->allocated_up)
994 allocate_up = tunnel->allocated_up;
996 allocate_down = min(max_rate, *available_down);
997 if (allocate_down < tunnel->allocated_down)
998 allocate_down = tunnel->allocated_down;
1000 /* If no changes no need to do more */
1001 if (allocate_up == tunnel->allocated_up &&
1002 allocate_down == tunnel->allocated_down)
1005 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1008 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1012 tunnel->allocated_up = allocate_up;
1013 *available_up -= tunnel->allocated_up;
1015 tunnel->allocated_down = allocate_down;
1016 *available_down -= tunnel->allocated_down;
1018 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1019 tunnel->allocated_up, tunnel->allocated_down);
1022 static void tb_usb3_init_path(struct tb_path *path)
1024 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1025 path->egress_shared_buffer = TB_PATH_NONE;
1026 path->ingress_fc_enable = TB_PATH_ALL;
1027 path->ingress_shared_buffer = TB_PATH_NONE;
1030 path->drop_packages = 0;
1031 path->nfc_credits = 0;
1032 path->hops[0].initial_credits = 7;
1033 if (path->path_length > 1)
1034 path->hops[1].initial_credits =
1035 tb_initial_credits(path->hops[1].in_port->sw);
1039 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1040 * @tb: Pointer to the domain structure
1041 * @down: USB3 downstream adapter
1043 * If @down adapter is active, follows the tunnel to the USB3 upstream
1044 * adapter and back. Returns the discovered tunnel or %NULL if there was
1047 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1049 struct tb_tunnel *tunnel;
1050 struct tb_path *path;
1052 if (!tb_usb3_port_is_enabled(down))
1055 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1059 tunnel->activate = tb_usb3_activate;
1060 tunnel->src_port = down;
1063 * Discover both paths even if they are not complete. We will
1064 * clean them up by calling tb_tunnel_deactivate() below in that
1067 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1068 &tunnel->dst_port, "USB3 Down");
1070 /* Just disable the downstream port */
1071 tb_usb3_port_enable(down, false);
1074 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1075 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1077 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1080 goto err_deactivate;
1081 tunnel->paths[TB_USB3_PATH_UP] = path;
1082 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1084 /* Validate that the tunnel is complete */
1085 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1086 tb_port_warn(tunnel->dst_port,
1087 "path does not end on an USB3 adapter, cleaning up\n");
1088 goto err_deactivate;
1091 if (down != tunnel->src_port) {
1092 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1093 goto err_deactivate;
1096 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1097 tb_tunnel_warn(tunnel,
1098 "tunnel is not fully activated, cleaning up\n");
1099 goto err_deactivate;
1102 if (!tb_route(down->sw)) {
1106 * Read the initial bandwidth allocation for the first
1109 ret = usb4_usb3_port_allocated_bandwidth(down,
1110 &tunnel->allocated_up, &tunnel->allocated_down);
1112 goto err_deactivate;
1114 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1115 tunnel->allocated_up, tunnel->allocated_down);
1117 tunnel->init = tb_usb3_init;
1118 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1119 tunnel->release_unused_bandwidth =
1120 tb_usb3_release_unused_bandwidth;
1121 tunnel->reclaim_available_bandwidth =
1122 tb_usb3_reclaim_available_bandwidth;
1125 tb_tunnel_dbg(tunnel, "discovered\n");
1129 tb_tunnel_deactivate(tunnel);
1131 tb_tunnel_free(tunnel);
1137 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1138 * @tb: Pointer to the domain structure
1139 * @up: USB3 upstream adapter port
1140 * @down: USB3 downstream adapter port
1141 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1143 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1144 * (%0 if not limited).
1146 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1147 * @TB_TYPE_USB3_DOWN.
1149 * Return: Returns a tb_tunnel on success or %NULL on failure.
1151 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1152 struct tb_port *down, int max_up,
1155 struct tb_tunnel *tunnel;
1156 struct tb_path *path;
1160 * Check that we have enough bandwidth available for the new
1163 if (max_up > 0 || max_down > 0) {
1164 max_rate = tb_usb3_max_link_rate(down, up);
1168 /* Only 90% can be allocated for USB3 isochronous transfers */
1169 max_rate = max_rate * 90 / 100;
1170 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1173 if (max_rate > max_up || max_rate > max_down) {
1174 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1179 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1183 tunnel->activate = tb_usb3_activate;
1184 tunnel->src_port = down;
1185 tunnel->dst_port = up;
1186 tunnel->max_up = max_up;
1187 tunnel->max_down = max_down;
1189 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1192 tb_tunnel_free(tunnel);
1195 tb_usb3_init_path(path);
1196 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1198 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1201 tb_tunnel_free(tunnel);
1204 tb_usb3_init_path(path);
1205 tunnel->paths[TB_USB3_PATH_UP] = path;
1207 if (!tb_route(down->sw)) {
1208 tunnel->allocated_up = max_rate;
1209 tunnel->allocated_down = max_rate;
1211 tunnel->init = tb_usb3_init;
1212 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1213 tunnel->release_unused_bandwidth =
1214 tb_usb3_release_unused_bandwidth;
1215 tunnel->reclaim_available_bandwidth =
1216 tb_usb3_reclaim_available_bandwidth;
1223 * tb_tunnel_free() - free a tunnel
1224 * @tunnel: Tunnel to be freed
1226 * Frees a tunnel. The tunnel does not need to be deactivated.
1228 void tb_tunnel_free(struct tb_tunnel *tunnel)
1235 for (i = 0; i < tunnel->npaths; i++) {
1236 if (tunnel->paths[i])
1237 tb_path_free(tunnel->paths[i]);
1240 kfree(tunnel->paths);
1245 * tb_tunnel_is_invalid - check whether an activated path is still valid
1246 * @tunnel: Tunnel to check
1248 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1252 for (i = 0; i < tunnel->npaths; i++) {
1253 WARN_ON(!tunnel->paths[i]->activated);
1254 if (tb_path_is_invalid(tunnel->paths[i]))
1262 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1263 * @tunnel: Tunnel to restart
1265 * Return: 0 on success and negative errno in case if failure
1267 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1271 tb_tunnel_dbg(tunnel, "activating\n");
1274 * Make sure all paths are properly disabled before enabling
1277 for (i = 0; i < tunnel->npaths; i++) {
1278 if (tunnel->paths[i]->activated) {
1279 tb_path_deactivate(tunnel->paths[i]);
1280 tunnel->paths[i]->activated = false;
1285 res = tunnel->init(tunnel);
1290 for (i = 0; i < tunnel->npaths; i++) {
1291 res = tb_path_activate(tunnel->paths[i]);
1296 if (tunnel->activate) {
1297 res = tunnel->activate(tunnel, true);
1305 tb_tunnel_warn(tunnel, "activation failed\n");
1306 tb_tunnel_deactivate(tunnel);
1311 * tb_tunnel_activate() - activate a tunnel
1312 * @tunnel: Tunnel to activate
1314 * Return: Returns 0 on success or an error code on failure.
1316 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1320 for (i = 0; i < tunnel->npaths; i++) {
1321 if (tunnel->paths[i]->activated) {
1322 tb_tunnel_WARN(tunnel,
1323 "trying to activate an already activated tunnel\n");
1328 return tb_tunnel_restart(tunnel);
1332 * tb_tunnel_deactivate() - deactivate a tunnel
1333 * @tunnel: Tunnel to deactivate
1335 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1339 tb_tunnel_dbg(tunnel, "deactivating\n");
1341 if (tunnel->activate)
1342 tunnel->activate(tunnel, false);
1344 for (i = 0; i < tunnel->npaths; i++) {
1345 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1346 tb_path_deactivate(tunnel->paths[i]);
1351 * tb_tunnel_port_on_path() - Does the tunnel go through port
1352 * @tunnel: Tunnel to check
1353 * @port: Port to check
1355 * Returns true if @tunnel goes through @port (direction does not matter),
1358 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1359 const struct tb_port *port)
1363 for (i = 0; i < tunnel->npaths; i++) {
1364 if (!tunnel->paths[i])
1367 if (tb_path_port_on_path(tunnel->paths[i], port))
1374 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1378 for (i = 0; i < tunnel->npaths; i++) {
1379 if (!tunnel->paths[i])
1381 if (!tunnel->paths[i]->activated)
1389 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1390 * @tunnel: Tunnel to check
1391 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1393 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1396 * Stores the amount of isochronous bandwidth @tunnel consumes in
1397 * @consumed_up and @consumed_down. In case of success returns %0,
1398 * negative errno otherwise.
1400 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1403 int up_bw = 0, down_bw = 0;
1405 if (!tb_tunnel_is_active(tunnel))
1408 if (tunnel->consumed_bandwidth) {
1411 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1415 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1421 *consumed_up = up_bw;
1423 *consumed_down = down_bw;
1429 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1430 * @tunnel: Tunnel whose unused bandwidth to release
1432 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1433 * moment) this function makes it to release all the unused bandwidth.
1435 * Returns %0 in case of success and negative errno otherwise.
1437 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1439 if (!tb_tunnel_is_active(tunnel))
1442 if (tunnel->release_unused_bandwidth) {
1445 ret = tunnel->release_unused_bandwidth(tunnel);
1454 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1455 * @tunnel: Tunnel reclaiming available bandwidth
1456 * @available_up: Available upstream bandwidth (in Mb/s)
1457 * @available_down: Available downstream bandwidth (in Mb/s)
1459 * Reclaims bandwidth from @available_up and @available_down and updates
1460 * the variables accordingly (e.g decreases both according to what was
1461 * reclaimed by the tunnel). If nothing was reclaimed the values are
1464 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1466 int *available_down)
1468 if (!tb_tunnel_is_active(tunnel))
1471 if (tunnel->reclaim_available_bandwidth)
1472 tunnel->reclaim_available_bandwidth(tunnel, available_up,