1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID 8
19 #define TB_PCI_PATH_DOWN 0
20 #define TB_PCI_PATH_UP 1
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID 8
25 #define TB_USB3_PATH_DOWN 0
26 #define TB_USB3_PATH_UP 1
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID 8
30 #define TB_DP_AUX_RX_HOPID 8
31 #define TB_DP_VIDEO_HOPID 9
33 #define TB_DP_VIDEO_PATH_OUT 0
34 #define TB_DP_AUX_PATH_OUT 1
35 #define TB_DP_AUX_PATH_IN 2
37 #define TB_DMA_PATH_OUT 0
38 #define TB_DMA_PATH_IN 1
40 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
42 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
44 struct tb_tunnel *__tunnel = (tunnel); \
45 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
46 tb_route(__tunnel->src_port->sw), \
47 __tunnel->src_port->port, \
48 tb_route(__tunnel->dst_port->sw), \
49 __tunnel->dst_port->port, \
50 tb_tunnel_names[__tunnel->type], \
54 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
55 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
56 #define tb_tunnel_warn(tunnel, fmt, arg...) \
57 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
58 #define tb_tunnel_info(tunnel, fmt, arg...) \
59 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
60 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
61 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
63 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
64 enum tb_tunnel_type type)
66 struct tb_tunnel *tunnel;
68 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
72 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
74 tb_tunnel_free(tunnel);
78 INIT_LIST_HEAD(&tunnel->list);
80 tunnel->npaths = npaths;
86 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
90 res = tb_pci_port_enable(tunnel->src_port, activate);
94 if (tb_port_is_pcie_up(tunnel->dst_port))
95 return tb_pci_port_enable(tunnel->dst_port, activate);
100 static int tb_initial_credits(const struct tb_switch *sw)
102 /* If the path is complete sw is not NULL */
104 /* More credits for faster link */
105 switch (sw->link_speed * sw->link_width) {
116 static void tb_pci_init_path(struct tb_path *path)
118 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
119 path->egress_shared_buffer = TB_PATH_NONE;
120 path->ingress_fc_enable = TB_PATH_ALL;
121 path->ingress_shared_buffer = TB_PATH_NONE;
124 path->drop_packages = 0;
125 path->nfc_credits = 0;
126 path->hops[0].initial_credits = 7;
127 if (path->path_length > 1)
128 path->hops[1].initial_credits =
129 tb_initial_credits(path->hops[1].in_port->sw);
133 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
134 * @tb: Pointer to the domain structure
135 * @down: PCIe downstream adapter
137 * If @down adapter is active, follows the tunnel to the PCIe upstream
138 * adapter and back. Returns the discovered tunnel or %NULL if there was
141 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
143 struct tb_tunnel *tunnel;
144 struct tb_path *path;
146 if (!tb_pci_port_is_enabled(down))
149 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
153 tunnel->activate = tb_pci_activate;
154 tunnel->src_port = down;
157 * Discover both paths even if they are not complete. We will
158 * clean them up by calling tb_tunnel_deactivate() below in that
161 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
162 &tunnel->dst_port, "PCIe Up");
164 /* Just disable the downstream port */
165 tb_pci_port_enable(down, false);
168 tunnel->paths[TB_PCI_PATH_UP] = path;
169 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
171 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
175 tunnel->paths[TB_PCI_PATH_DOWN] = path;
176 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
178 /* Validate that the tunnel is complete */
179 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
180 tb_port_warn(tunnel->dst_port,
181 "path does not end on a PCIe adapter, cleaning up\n");
185 if (down != tunnel->src_port) {
186 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
190 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
191 tb_tunnel_warn(tunnel,
192 "tunnel is not fully activated, cleaning up\n");
196 tb_tunnel_dbg(tunnel, "discovered\n");
200 tb_tunnel_deactivate(tunnel);
202 tb_tunnel_free(tunnel);
208 * tb_tunnel_alloc_pci() - allocate a pci tunnel
209 * @tb: Pointer to the domain structure
210 * @up: PCIe upstream adapter port
211 * @down: PCIe downstream adapter port
213 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
216 * Return: Returns a tb_tunnel on success or NULL on failure.
218 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
219 struct tb_port *down)
221 struct tb_tunnel *tunnel;
222 struct tb_path *path;
224 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
228 tunnel->activate = tb_pci_activate;
229 tunnel->src_port = down;
230 tunnel->dst_port = up;
232 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
235 tb_tunnel_free(tunnel);
238 tb_pci_init_path(path);
239 tunnel->paths[TB_PCI_PATH_DOWN] = path;
241 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
244 tb_tunnel_free(tunnel);
247 tb_pci_init_path(path);
248 tunnel->paths[TB_PCI_PATH_UP] = path;
253 static bool tb_dp_is_usb4(const struct tb_switch *sw)
255 /* Titan Ridge DP adapters need the same treatment as USB4 */
256 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
259 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
265 /* Both ends need to support this */
266 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
269 ret = tb_port_read(out, &val, TB_CFG_PORT,
270 out->cap_adap + DP_STATUS_CTRL, 1);
274 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
276 ret = tb_port_write(out, &val, TB_CFG_PORT,
277 out->cap_adap + DP_STATUS_CTRL, 1);
282 ret = tb_port_read(out, &val, TB_CFG_PORT,
283 out->cap_adap + DP_STATUS_CTRL, 1);
286 if (!(val & DP_STATUS_CTRL_CMHS))
288 usleep_range(10, 100);
294 static inline u32 tb_dp_cap_get_rate(u32 val)
296 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
299 case DP_COMMON_CAP_RATE_RBR:
301 case DP_COMMON_CAP_RATE_HBR:
303 case DP_COMMON_CAP_RATE_HBR2:
305 case DP_COMMON_CAP_RATE_HBR3:
312 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
314 val &= ~DP_COMMON_CAP_RATE_MASK;
317 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
320 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
323 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
326 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
329 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
335 static inline u32 tb_dp_cap_get_lanes(u32 val)
337 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
340 case DP_COMMON_CAP_1_LANE:
342 case DP_COMMON_CAP_2_LANES:
344 case DP_COMMON_CAP_4_LANES:
351 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
353 val &= ~DP_COMMON_CAP_LANES_MASK;
356 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
360 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
363 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
366 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
372 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
374 /* Tunneling removes the DP 8b/10b encoding */
375 return rate * lanes * 8 / 10;
378 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
379 u32 out_rate, u32 out_lanes, u32 *new_rate,
382 static const u32 dp_bw[][2] = {
384 { 8100, 4 }, /* 25920 Mb/s */
385 { 5400, 4 }, /* 17280 Mb/s */
386 { 8100, 2 }, /* 12960 Mb/s */
387 { 2700, 4 }, /* 8640 Mb/s */
388 { 5400, 2 }, /* 8640 Mb/s */
389 { 8100, 1 }, /* 6480 Mb/s */
390 { 1620, 4 }, /* 5184 Mb/s */
391 { 5400, 1 }, /* 4320 Mb/s */
392 { 2700, 2 }, /* 4320 Mb/s */
393 { 1620, 2 }, /* 2592 Mb/s */
394 { 2700, 1 }, /* 2160 Mb/s */
395 { 1620, 1 }, /* 1296 Mb/s */
400 * Find a combination that can fit into max_bw and does not
401 * exceed the maximum rate and lanes supported by the DP OUT and
404 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
405 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
408 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
411 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
412 *new_rate = dp_bw[i][0];
413 *new_lanes = dp_bw[i][1];
421 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
423 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
424 struct tb_port *out = tunnel->dst_port;
425 struct tb_port *in = tunnel->src_port;
429 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
430 * newer generation hardware.
432 if (in->sw->generation < 2 || out->sw->generation < 2)
436 * Perform connection manager handshake between IN and OUT ports
437 * before capabilities exchange can take place.
439 ret = tb_dp_cm_handshake(in, out);
443 /* Read both DP_LOCAL_CAP registers */
444 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
445 in->cap_adap + DP_LOCAL_CAP, 1);
449 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
450 out->cap_adap + DP_LOCAL_CAP, 1);
454 /* Write IN local caps to OUT remote caps */
455 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
456 out->cap_adap + DP_REMOTE_CAP, 1);
460 in_rate = tb_dp_cap_get_rate(in_dp_cap);
461 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
462 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
463 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
466 * If the tunnel bandwidth is limited (max_bw is set) then see
467 * if we need to reduce bandwidth to fit there.
469 out_rate = tb_dp_cap_get_rate(out_dp_cap);
470 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
471 bw = tb_dp_bandwidth(out_rate, out_lanes);
472 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
473 out_rate, out_lanes, bw);
475 if (in->sw->config.depth < out->sw->config.depth)
476 max_bw = tunnel->max_down;
478 max_bw = tunnel->max_up;
480 if (max_bw && bw > max_bw) {
481 u32 new_rate, new_lanes, new_bw;
483 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
484 out_rate, out_lanes, &new_rate,
487 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
491 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
492 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
493 new_rate, new_lanes, new_bw);
496 * Set new rate and number of lanes before writing it to
497 * the IN port remote caps.
499 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
500 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
503 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
504 in->cap_adap + DP_REMOTE_CAP, 1);
507 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
512 struct tb_path **paths;
515 paths = tunnel->paths;
516 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
518 tb_dp_port_set_hops(tunnel->src_port,
519 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
520 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
521 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
523 tb_dp_port_set_hops(tunnel->dst_port,
524 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
525 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
526 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
528 tb_dp_port_hpd_clear(tunnel->src_port);
529 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
530 if (tb_port_is_dpout(tunnel->dst_port))
531 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
534 ret = tb_dp_port_enable(tunnel->src_port, active);
538 if (tb_port_is_dpout(tunnel->dst_port))
539 return tb_dp_port_enable(tunnel->dst_port, active);
544 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
547 struct tb_port *in = tunnel->src_port;
548 const struct tb_switch *sw = in->sw;
549 u32 val, rate = 0, lanes = 0;
552 if (tb_dp_is_usb4(sw)) {
556 * Wait for DPRX done. Normally it should be already set
560 ret = tb_port_read(in, &val, TB_CFG_PORT,
561 in->cap_adap + DP_COMMON_CAP, 1);
565 if (val & DP_COMMON_CAP_DPRX_DONE) {
566 rate = tb_dp_cap_get_rate(val);
567 lanes = tb_dp_cap_get_lanes(val);
575 } else if (sw->generation >= 2) {
577 * Read from the copied remote cap so that we take into
578 * account if capabilities were reduced during exchange.
580 ret = tb_port_read(in, &val, TB_CFG_PORT,
581 in->cap_adap + DP_REMOTE_CAP, 1);
585 rate = tb_dp_cap_get_rate(val);
586 lanes = tb_dp_cap_get_lanes(val);
588 /* No bandwidth management for legacy devices */
594 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
596 *consumed_down = tb_dp_bandwidth(rate, lanes);
598 *consumed_up = tb_dp_bandwidth(rate, lanes);
605 static void tb_dp_init_aux_path(struct tb_path *path)
609 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
610 path->egress_shared_buffer = TB_PATH_NONE;
611 path->ingress_fc_enable = TB_PATH_ALL;
612 path->ingress_shared_buffer = TB_PATH_NONE;
616 for (i = 0; i < path->path_length; i++)
617 path->hops[i].initial_credits = 1;
620 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
622 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
624 path->egress_fc_enable = TB_PATH_NONE;
625 path->egress_shared_buffer = TB_PATH_NONE;
626 path->ingress_fc_enable = TB_PATH_NONE;
627 path->ingress_shared_buffer = TB_PATH_NONE;
632 path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
636 max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
637 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
638 /* Leave some credits for AUX path */
639 path->nfc_credits = min(max_credits - 2, 12U);
644 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
645 * @tb: Pointer to the domain structure
648 * If @in adapter is active, follows the tunnel to the DP out adapter
649 * and back. Returns the discovered tunnel or %NULL if there was no
652 * Return: DP tunnel or %NULL if no tunnel found.
654 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
656 struct tb_tunnel *tunnel;
657 struct tb_port *port;
658 struct tb_path *path;
660 if (!tb_dp_port_is_enabled(in))
663 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
667 tunnel->init = tb_dp_xchg_caps;
668 tunnel->activate = tb_dp_activate;
669 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
670 tunnel->src_port = in;
672 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
673 &tunnel->dst_port, "Video");
675 /* Just disable the DP IN port */
676 tb_dp_port_enable(in, false);
679 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
680 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
682 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
685 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
686 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
688 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
692 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
693 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
695 /* Validate that the tunnel is complete */
696 if (!tb_port_is_dpout(tunnel->dst_port)) {
697 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
701 if (!tb_dp_port_is_enabled(tunnel->dst_port))
704 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
707 if (port != tunnel->src_port) {
708 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
712 tb_tunnel_dbg(tunnel, "discovered\n");
716 tb_tunnel_deactivate(tunnel);
718 tb_tunnel_free(tunnel);
724 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
725 * @tb: Pointer to the domain structure
726 * @in: DP in adapter port
727 * @out: DP out adapter port
728 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
730 * @max_down: Maximum available downstream bandwidth for the DP tunnel
731 * (%0 if not limited)
733 * Allocates a tunnel between @in and @out that is capable of tunneling
734 * Display Port traffic.
736 * Return: Returns a tb_tunnel on success or NULL on failure.
738 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
739 struct tb_port *out, int max_up,
742 struct tb_tunnel *tunnel;
743 struct tb_path **paths;
744 struct tb_path *path;
746 if (WARN_ON(!in->cap_adap || !out->cap_adap))
749 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
753 tunnel->init = tb_dp_xchg_caps;
754 tunnel->activate = tb_dp_activate;
755 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
756 tunnel->src_port = in;
757 tunnel->dst_port = out;
758 tunnel->max_up = max_up;
759 tunnel->max_down = max_down;
761 paths = tunnel->paths;
763 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
767 tb_dp_init_video_path(path, false);
768 paths[TB_DP_VIDEO_PATH_OUT] = path;
770 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
771 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
774 tb_dp_init_aux_path(path);
775 paths[TB_DP_AUX_PATH_OUT] = path;
777 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
778 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
781 tb_dp_init_aux_path(path);
782 paths[TB_DP_AUX_PATH_IN] = path;
787 tb_tunnel_free(tunnel);
791 static u32 tb_dma_credits(struct tb_port *nhi)
795 max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
796 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
797 return min(max_credits, 13U);
800 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
802 struct tb_port *nhi = tunnel->src_port;
805 credits = active ? tb_dma_credits(nhi) : 0;
806 return tb_port_set_initial_credits(nhi, credits);
809 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
810 unsigned int efc, u32 credits)
814 path->egress_fc_enable = efc;
815 path->ingress_fc_enable = TB_PATH_ALL;
816 path->egress_shared_buffer = TB_PATH_NONE;
817 path->ingress_shared_buffer = isb;
820 path->clear_fc = true;
822 for (i = 0; i < path->path_length; i++)
823 path->hops[i].initial_credits = credits;
827 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
828 * @tb: Pointer to the domain structure
829 * @nhi: Host controller port
830 * @dst: Destination null port which the other domain is connected to
831 * @transmit_ring: NHI ring number used to send packets towards the
833 * @transmit_path: HopID used for transmitting packets
834 * @receive_ring: NHI ring number used to receive packets from the
836 * @reveive_path: HopID used for receiving packets
838 * Return: Returns a tb_tunnel on success or NULL on failure.
840 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
841 struct tb_port *dst, int transmit_ring,
842 int transmit_path, int receive_ring,
845 struct tb_tunnel *tunnel;
846 struct tb_path *path;
849 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
853 tunnel->activate = tb_dma_activate;
854 tunnel->src_port = nhi;
855 tunnel->dst_port = dst;
857 credits = tb_dma_credits(nhi);
859 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
861 tb_tunnel_free(tunnel);
864 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
866 tunnel->paths[TB_DMA_PATH_IN] = path;
868 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
870 tb_tunnel_free(tunnel);
873 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
874 tunnel->paths[TB_DMA_PATH_OUT] = path;
879 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
881 int ret, up_max_rate, down_max_rate;
883 ret = usb4_usb3_port_max_link_rate(up);
888 ret = usb4_usb3_port_max_link_rate(down);
893 return min(up_max_rate, down_max_rate);
896 static int tb_usb3_init(struct tb_tunnel *tunnel)
898 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
899 tunnel->allocated_up, tunnel->allocated_down);
901 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
902 &tunnel->allocated_up,
903 &tunnel->allocated_down);
906 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
910 res = tb_usb3_port_enable(tunnel->src_port, activate);
914 if (tb_port_is_usb3_up(tunnel->dst_port))
915 return tb_usb3_port_enable(tunnel->dst_port, activate);
920 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
921 int *consumed_up, int *consumed_down)
924 * PCIe tunneling affects the USB3 bandwidth so take that it
927 *consumed_up = tunnel->allocated_up * (3 + 1) / 3;
928 *consumed_down = tunnel->allocated_down * (3 + 1) / 3;
932 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
936 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
937 &tunnel->allocated_up,
938 &tunnel->allocated_down);
942 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
943 tunnel->allocated_up, tunnel->allocated_down);
947 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
951 int ret, max_rate, allocate_up, allocate_down;
953 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
955 tb_tunnel_warn(tunnel, "tunnel is not up\n");
959 * 90% of the max rate can be allocated for isochronous
962 max_rate = ret * 90 / 100;
964 /* No need to reclaim if already at maximum */
965 if (tunnel->allocated_up >= max_rate &&
966 tunnel->allocated_down >= max_rate)
969 /* Don't go lower than what is already allocated */
970 allocate_up = min(max_rate, *available_up);
971 if (allocate_up < tunnel->allocated_up)
972 allocate_up = tunnel->allocated_up;
974 allocate_down = min(max_rate, *available_down);
975 if (allocate_down < tunnel->allocated_down)
976 allocate_down = tunnel->allocated_down;
978 /* If no changes no need to do more */
979 if (allocate_up == tunnel->allocated_up &&
980 allocate_down == tunnel->allocated_down)
983 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
986 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
990 tunnel->allocated_up = allocate_up;
991 *available_up -= tunnel->allocated_up;
993 tunnel->allocated_down = allocate_down;
994 *available_down -= tunnel->allocated_down;
996 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
997 tunnel->allocated_up, tunnel->allocated_down);
1000 static void tb_usb3_init_path(struct tb_path *path)
1002 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1003 path->egress_shared_buffer = TB_PATH_NONE;
1004 path->ingress_fc_enable = TB_PATH_ALL;
1005 path->ingress_shared_buffer = TB_PATH_NONE;
1008 path->drop_packages = 0;
1009 path->nfc_credits = 0;
1010 path->hops[0].initial_credits = 7;
1011 if (path->path_length > 1)
1012 path->hops[1].initial_credits =
1013 tb_initial_credits(path->hops[1].in_port->sw);
1017 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1018 * @tb: Pointer to the domain structure
1019 * @down: USB3 downstream adapter
1021 * If @down adapter is active, follows the tunnel to the USB3 upstream
1022 * adapter and back. Returns the discovered tunnel or %NULL if there was
1025 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1027 struct tb_tunnel *tunnel;
1028 struct tb_path *path;
1030 if (!tb_usb3_port_is_enabled(down))
1033 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1037 tunnel->activate = tb_usb3_activate;
1038 tunnel->src_port = down;
1041 * Discover both paths even if they are not complete. We will
1042 * clean them up by calling tb_tunnel_deactivate() below in that
1045 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1046 &tunnel->dst_port, "USB3 Down");
1048 /* Just disable the downstream port */
1049 tb_usb3_port_enable(down, false);
1052 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1053 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1055 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1058 goto err_deactivate;
1059 tunnel->paths[TB_USB3_PATH_UP] = path;
1060 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1062 /* Validate that the tunnel is complete */
1063 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1064 tb_port_warn(tunnel->dst_port,
1065 "path does not end on an USB3 adapter, cleaning up\n");
1066 goto err_deactivate;
1069 if (down != tunnel->src_port) {
1070 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1071 goto err_deactivate;
1074 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1075 tb_tunnel_warn(tunnel,
1076 "tunnel is not fully activated, cleaning up\n");
1077 goto err_deactivate;
1080 if (!tb_route(down->sw)) {
1084 * Read the initial bandwidth allocation for the first
1087 ret = usb4_usb3_port_allocated_bandwidth(down,
1088 &tunnel->allocated_up, &tunnel->allocated_down);
1090 goto err_deactivate;
1092 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1093 tunnel->allocated_up, tunnel->allocated_down);
1095 tunnel->init = tb_usb3_init;
1096 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1097 tunnel->release_unused_bandwidth =
1098 tb_usb3_release_unused_bandwidth;
1099 tunnel->reclaim_available_bandwidth =
1100 tb_usb3_reclaim_available_bandwidth;
1103 tb_tunnel_dbg(tunnel, "discovered\n");
1107 tb_tunnel_deactivate(tunnel);
1109 tb_tunnel_free(tunnel);
1115 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1116 * @tb: Pointer to the domain structure
1117 * @up: USB3 upstream adapter port
1118 * @down: USB3 downstream adapter port
1119 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1121 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1122 * (%0 if not limited).
1124 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1125 * @TB_TYPE_USB3_DOWN.
1127 * Return: Returns a tb_tunnel on success or %NULL on failure.
1129 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1130 struct tb_port *down, int max_up,
1133 struct tb_tunnel *tunnel;
1134 struct tb_path *path;
1138 * Check that we have enough bandwidth available for the new
1141 if (max_up > 0 || max_down > 0) {
1142 max_rate = tb_usb3_max_link_rate(down, up);
1146 /* Only 90% can be allocated for USB3 isochronous transfers */
1147 max_rate = max_rate * 90 / 100;
1148 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1151 if (max_rate > max_up || max_rate > max_down) {
1152 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1157 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1161 tunnel->activate = tb_usb3_activate;
1162 tunnel->src_port = down;
1163 tunnel->dst_port = up;
1164 tunnel->max_up = max_up;
1165 tunnel->max_down = max_down;
1167 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1170 tb_tunnel_free(tunnel);
1173 tb_usb3_init_path(path);
1174 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1176 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1179 tb_tunnel_free(tunnel);
1182 tb_usb3_init_path(path);
1183 tunnel->paths[TB_USB3_PATH_UP] = path;
1185 if (!tb_route(down->sw)) {
1186 tunnel->allocated_up = max_rate;
1187 tunnel->allocated_down = max_rate;
1189 tunnel->init = tb_usb3_init;
1190 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1191 tunnel->release_unused_bandwidth =
1192 tb_usb3_release_unused_bandwidth;
1193 tunnel->reclaim_available_bandwidth =
1194 tb_usb3_reclaim_available_bandwidth;
1201 * tb_tunnel_free() - free a tunnel
1202 * @tunnel: Tunnel to be freed
1204 * Frees a tunnel. The tunnel does not need to be deactivated.
1206 void tb_tunnel_free(struct tb_tunnel *tunnel)
1213 for (i = 0; i < tunnel->npaths; i++) {
1214 if (tunnel->paths[i])
1215 tb_path_free(tunnel->paths[i]);
1218 kfree(tunnel->paths);
1223 * tb_tunnel_is_invalid - check whether an activated path is still valid
1224 * @tunnel: Tunnel to check
1226 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1230 for (i = 0; i < tunnel->npaths; i++) {
1231 WARN_ON(!tunnel->paths[i]->activated);
1232 if (tb_path_is_invalid(tunnel->paths[i]))
1240 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1241 * @tunnel: Tunnel to restart
1243 * Return: 0 on success and negative errno in case if failure
1245 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1249 tb_tunnel_dbg(tunnel, "activating\n");
1252 * Make sure all paths are properly disabled before enabling
1255 for (i = 0; i < tunnel->npaths; i++) {
1256 if (tunnel->paths[i]->activated) {
1257 tb_path_deactivate(tunnel->paths[i]);
1258 tunnel->paths[i]->activated = false;
1263 res = tunnel->init(tunnel);
1268 for (i = 0; i < tunnel->npaths; i++) {
1269 res = tb_path_activate(tunnel->paths[i]);
1274 if (tunnel->activate) {
1275 res = tunnel->activate(tunnel, true);
1283 tb_tunnel_warn(tunnel, "activation failed\n");
1284 tb_tunnel_deactivate(tunnel);
1289 * tb_tunnel_activate() - activate a tunnel
1290 * @tunnel: Tunnel to activate
1292 * Return: Returns 0 on success or an error code on failure.
1294 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1298 for (i = 0; i < tunnel->npaths; i++) {
1299 if (tunnel->paths[i]->activated) {
1300 tb_tunnel_WARN(tunnel,
1301 "trying to activate an already activated tunnel\n");
1306 return tb_tunnel_restart(tunnel);
1310 * tb_tunnel_deactivate() - deactivate a tunnel
1311 * @tunnel: Tunnel to deactivate
1313 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1317 tb_tunnel_dbg(tunnel, "deactivating\n");
1319 if (tunnel->activate)
1320 tunnel->activate(tunnel, false);
1322 for (i = 0; i < tunnel->npaths; i++) {
1323 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1324 tb_path_deactivate(tunnel->paths[i]);
1329 * tb_tunnel_port_on_path() - Does the tunnel go through port
1330 * @tunnel: Tunnel to check
1331 * @port: Port to check
1333 * Returns true if @tunnel goes through @port (direction does not matter),
1336 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1337 const struct tb_port *port)
1341 for (i = 0; i < tunnel->npaths; i++) {
1342 if (!tunnel->paths[i])
1345 if (tb_path_port_on_path(tunnel->paths[i], port))
1352 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1356 for (i = 0; i < tunnel->npaths; i++) {
1357 if (!tunnel->paths[i])
1359 if (!tunnel->paths[i]->activated)
1367 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1368 * @tunnel: Tunnel to check
1369 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1371 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1374 * Stores the amount of isochronous bandwidth @tunnel consumes in
1375 * @consumed_up and @consumed_down. In case of success returns %0,
1376 * negative errno otherwise.
1378 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1381 int up_bw = 0, down_bw = 0;
1383 if (!tb_tunnel_is_active(tunnel))
1386 if (tunnel->consumed_bandwidth) {
1389 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1393 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1399 *consumed_up = up_bw;
1401 *consumed_down = down_bw;
1407 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1408 * @tunnel: Tunnel whose unused bandwidth to release
1410 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1411 * moment) this function makes it to release all the unused bandwidth.
1413 * Returns %0 in case of success and negative errno otherwise.
1415 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1417 if (!tb_tunnel_is_active(tunnel))
1420 if (tunnel->release_unused_bandwidth) {
1423 ret = tunnel->release_unused_bandwidth(tunnel);
1432 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1433 * @tunnel: Tunnel reclaiming available bandwidth
1434 * @available_up: Available upstream bandwidth (in Mb/s)
1435 * @available_down: Available downstream bandwidth (in Mb/s)
1437 * Reclaims bandwidth from @available_up and @available_down and updates
1438 * the variables accordingly (e.g decreases both according to what was
1439 * reclaimed by the tunnel). If nothing was reclaimed the values are
1442 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1444 int *available_down)
1446 if (!tb_tunnel_is_active(tunnel))
1449 if (tunnel->reclaim_available_bandwidth)
1450 tunnel->reclaim_available_bandwidth(tunnel, available_up,