5be0f31949f1c102033dbbc88ecac4393e5a8064
[linux-2.6-microblaze.git] / drivers / thunderbolt / tunnel.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12
13 #include "tunnel.h"
14 #include "tb.h"
15
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID                    8
18
19 #define TB_PCI_PATH_DOWN                0
20 #define TB_PCI_PATH_UP                  1
21
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID                   8
24
25 #define TB_USB3_PATH_DOWN               0
26 #define TB_USB3_PATH_UP                 1
27
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID              8
30 #define TB_DP_AUX_RX_HOPID              8
31 #define TB_DP_VIDEO_HOPID               9
32
33 #define TB_DP_VIDEO_PATH_OUT            0
34 #define TB_DP_AUX_PATH_OUT              1
35 #define TB_DP_AUX_PATH_IN               2
36
37 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
38
39 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
40         do {                                                            \
41                 struct tb_tunnel *__tunnel = (tunnel);                  \
42                 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
43                       tb_route(__tunnel->src_port->sw),                 \
44                       __tunnel->src_port->port,                         \
45                       tb_route(__tunnel->dst_port->sw),                 \
46                       __tunnel->dst_port->port,                         \
47                       tb_tunnel_names[__tunnel->type],                  \
48                       ## arg);                                          \
49         } while (0)
50
51 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
52         __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
53 #define tb_tunnel_warn(tunnel, fmt, arg...) \
54         __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
55 #define tb_tunnel_info(tunnel, fmt, arg...) \
56         __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
57 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
58         __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
59
60 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
61                                          enum tb_tunnel_type type)
62 {
63         struct tb_tunnel *tunnel;
64
65         tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
66         if (!tunnel)
67                 return NULL;
68
69         tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
70         if (!tunnel->paths) {
71                 tb_tunnel_free(tunnel);
72                 return NULL;
73         }
74
75         INIT_LIST_HEAD(&tunnel->list);
76         tunnel->tb = tb;
77         tunnel->npaths = npaths;
78         tunnel->type = type;
79
80         return tunnel;
81 }
82
83 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
84 {
85         int res;
86
87         res = tb_pci_port_enable(tunnel->src_port, activate);
88         if (res)
89                 return res;
90
91         if (tb_port_is_pcie_up(tunnel->dst_port))
92                 return tb_pci_port_enable(tunnel->dst_port, activate);
93
94         return 0;
95 }
96
97 static int tb_initial_credits(const struct tb_switch *sw)
98 {
99         /* If the path is complete sw is not NULL */
100         if (sw) {
101                 /* More credits for faster link */
102                 switch (sw->link_speed * sw->link_width) {
103                 case 40:
104                         return 32;
105                 case 20:
106                         return 24;
107                 }
108         }
109
110         return 16;
111 }
112
113 static void tb_pci_init_path(struct tb_path *path)
114 {
115         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
116         path->egress_shared_buffer = TB_PATH_NONE;
117         path->ingress_fc_enable = TB_PATH_ALL;
118         path->ingress_shared_buffer = TB_PATH_NONE;
119         path->priority = 3;
120         path->weight = 1;
121         path->drop_packages = 0;
122         path->hops[0].initial_credits = 7;
123         if (path->path_length > 1)
124                 path->hops[1].initial_credits =
125                         tb_initial_credits(path->hops[1].in_port->sw);
126 }
127
128 /**
129  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
130  * @tb: Pointer to the domain structure
131  * @down: PCIe downstream adapter
132  *
133  * If @down adapter is active, follows the tunnel to the PCIe upstream
134  * adapter and back. Returns the discovered tunnel or %NULL if there was
135  * no tunnel.
136  */
137 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
138 {
139         struct tb_tunnel *tunnel;
140         struct tb_path *path;
141
142         if (!tb_pci_port_is_enabled(down))
143                 return NULL;
144
145         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
146         if (!tunnel)
147                 return NULL;
148
149         tunnel->activate = tb_pci_activate;
150         tunnel->src_port = down;
151
152         /*
153          * Discover both paths even if they are not complete. We will
154          * clean them up by calling tb_tunnel_deactivate() below in that
155          * case.
156          */
157         path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
158                                 &tunnel->dst_port, "PCIe Up");
159         if (!path) {
160                 /* Just disable the downstream port */
161                 tb_pci_port_enable(down, false);
162                 goto err_free;
163         }
164         tunnel->paths[TB_PCI_PATH_UP] = path;
165         tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
166
167         path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
168                                 "PCIe Down");
169         if (!path)
170                 goto err_deactivate;
171         tunnel->paths[TB_PCI_PATH_DOWN] = path;
172         tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
173
174         /* Validate that the tunnel is complete */
175         if (!tb_port_is_pcie_up(tunnel->dst_port)) {
176                 tb_port_warn(tunnel->dst_port,
177                              "path does not end on a PCIe adapter, cleaning up\n");
178                 goto err_deactivate;
179         }
180
181         if (down != tunnel->src_port) {
182                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
183                 goto err_deactivate;
184         }
185
186         if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
187                 tb_tunnel_warn(tunnel,
188                                "tunnel is not fully activated, cleaning up\n");
189                 goto err_deactivate;
190         }
191
192         tb_tunnel_dbg(tunnel, "discovered\n");
193         return tunnel;
194
195 err_deactivate:
196         tb_tunnel_deactivate(tunnel);
197 err_free:
198         tb_tunnel_free(tunnel);
199
200         return NULL;
201 }
202
203 /**
204  * tb_tunnel_alloc_pci() - allocate a pci tunnel
205  * @tb: Pointer to the domain structure
206  * @up: PCIe upstream adapter port
207  * @down: PCIe downstream adapter port
208  *
209  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
210  * TB_TYPE_PCIE_DOWN.
211  *
212  * Return: Returns a tb_tunnel on success or NULL on failure.
213  */
214 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
215                                       struct tb_port *down)
216 {
217         struct tb_tunnel *tunnel;
218         struct tb_path *path;
219
220         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
221         if (!tunnel)
222                 return NULL;
223
224         tunnel->activate = tb_pci_activate;
225         tunnel->src_port = down;
226         tunnel->dst_port = up;
227
228         path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
229                              "PCIe Down");
230         if (!path) {
231                 tb_tunnel_free(tunnel);
232                 return NULL;
233         }
234         tb_pci_init_path(path);
235         tunnel->paths[TB_PCI_PATH_DOWN] = path;
236
237         path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
238                              "PCIe Up");
239         if (!path) {
240                 tb_tunnel_free(tunnel);
241                 return NULL;
242         }
243         tb_pci_init_path(path);
244         tunnel->paths[TB_PCI_PATH_UP] = path;
245
246         return tunnel;
247 }
248
249 static bool tb_dp_is_usb4(const struct tb_switch *sw)
250 {
251         /* Titan Ridge DP adapters need the same treatment as USB4 */
252         return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
253 }
254
255 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
256 {
257         int timeout = 10;
258         u32 val;
259         int ret;
260
261         /* Both ends need to support this */
262         if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
263                 return 0;
264
265         ret = tb_port_read(out, &val, TB_CFG_PORT,
266                            out->cap_adap + DP_STATUS_CTRL, 1);
267         if (ret)
268                 return ret;
269
270         val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
271
272         ret = tb_port_write(out, &val, TB_CFG_PORT,
273                             out->cap_adap + DP_STATUS_CTRL, 1);
274         if (ret)
275                 return ret;
276
277         do {
278                 ret = tb_port_read(out, &val, TB_CFG_PORT,
279                                    out->cap_adap + DP_STATUS_CTRL, 1);
280                 if (ret)
281                         return ret;
282                 if (!(val & DP_STATUS_CTRL_CMHS))
283                         return 0;
284                 usleep_range(10, 100);
285         } while (timeout--);
286
287         return -ETIMEDOUT;
288 }
289
290 static inline u32 tb_dp_cap_get_rate(u32 val)
291 {
292         u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
293
294         switch (rate) {
295         case DP_COMMON_CAP_RATE_RBR:
296                 return 1620;
297         case DP_COMMON_CAP_RATE_HBR:
298                 return 2700;
299         case DP_COMMON_CAP_RATE_HBR2:
300                 return 5400;
301         case DP_COMMON_CAP_RATE_HBR3:
302                 return 8100;
303         default:
304                 return 0;
305         }
306 }
307
308 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
309 {
310         val &= ~DP_COMMON_CAP_RATE_MASK;
311         switch (rate) {
312         default:
313                 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
314                 fallthrough;
315         case 1620:
316                 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
317                 break;
318         case 2700:
319                 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
320                 break;
321         case 5400:
322                 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
323                 break;
324         case 8100:
325                 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
326                 break;
327         }
328         return val;
329 }
330
331 static inline u32 tb_dp_cap_get_lanes(u32 val)
332 {
333         u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
334
335         switch (lanes) {
336         case DP_COMMON_CAP_1_LANE:
337                 return 1;
338         case DP_COMMON_CAP_2_LANES:
339                 return 2;
340         case DP_COMMON_CAP_4_LANES:
341                 return 4;
342         default:
343                 return 0;
344         }
345 }
346
347 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
348 {
349         val &= ~DP_COMMON_CAP_LANES_MASK;
350         switch (lanes) {
351         default:
352                 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
353                      lanes);
354                 fallthrough;
355         case 1:
356                 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
357                 break;
358         case 2:
359                 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
360                 break;
361         case 4:
362                 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
363                 break;
364         }
365         return val;
366 }
367
368 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
369 {
370         /* Tunneling removes the DP 8b/10b encoding */
371         return rate * lanes * 8 / 10;
372 }
373
374 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
375                                   u32 out_rate, u32 out_lanes, u32 *new_rate,
376                                   u32 *new_lanes)
377 {
378         static const u32 dp_bw[][2] = {
379                 /* Mb/s, lanes */
380                 { 8100, 4 }, /* 25920 Mb/s */
381                 { 5400, 4 }, /* 17280 Mb/s */
382                 { 8100, 2 }, /* 12960 Mb/s */
383                 { 2700, 4 }, /* 8640 Mb/s */
384                 { 5400, 2 }, /* 8640 Mb/s */
385                 { 8100, 1 }, /* 6480 Mb/s */
386                 { 1620, 4 }, /* 5184 Mb/s */
387                 { 5400, 1 }, /* 4320 Mb/s */
388                 { 2700, 2 }, /* 4320 Mb/s */
389                 { 1620, 2 }, /* 2592 Mb/s */
390                 { 2700, 1 }, /* 2160 Mb/s */
391                 { 1620, 1 }, /* 1296 Mb/s */
392         };
393         unsigned int i;
394
395         /*
396          * Find a combination that can fit into max_bw and does not
397          * exceed the maximum rate and lanes supported by the DP OUT and
398          * DP IN adapters.
399          */
400         for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
401                 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
402                         continue;
403
404                 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
405                         continue;
406
407                 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
408                         *new_rate = dp_bw[i][0];
409                         *new_lanes = dp_bw[i][1];
410                         return 0;
411                 }
412         }
413
414         return -ENOSR;
415 }
416
417 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
418 {
419         u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
420         struct tb_port *out = tunnel->dst_port;
421         struct tb_port *in = tunnel->src_port;
422         int ret, max_bw;
423
424         /*
425          * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
426          * newer generation hardware.
427          */
428         if (in->sw->generation < 2 || out->sw->generation < 2)
429                 return 0;
430
431         /*
432          * Perform connection manager handshake between IN and OUT ports
433          * before capabilities exchange can take place.
434          */
435         ret = tb_dp_cm_handshake(in, out);
436         if (ret)
437                 return ret;
438
439         /* Read both DP_LOCAL_CAP registers */
440         ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
441                            in->cap_adap + DP_LOCAL_CAP, 1);
442         if (ret)
443                 return ret;
444
445         ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
446                            out->cap_adap + DP_LOCAL_CAP, 1);
447         if (ret)
448                 return ret;
449
450         /* Write IN local caps to OUT remote caps */
451         ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
452                             out->cap_adap + DP_REMOTE_CAP, 1);
453         if (ret)
454                 return ret;
455
456         in_rate = tb_dp_cap_get_rate(in_dp_cap);
457         in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
458         tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
459                     in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
460
461         /*
462          * If the tunnel bandwidth is limited (max_bw is set) then see
463          * if we need to reduce bandwidth to fit there.
464          */
465         out_rate = tb_dp_cap_get_rate(out_dp_cap);
466         out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
467         bw = tb_dp_bandwidth(out_rate, out_lanes);
468         tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
469                     out_rate, out_lanes, bw);
470
471         if (in->sw->config.depth < out->sw->config.depth)
472                 max_bw = tunnel->max_down;
473         else
474                 max_bw = tunnel->max_up;
475
476         if (max_bw && bw > max_bw) {
477                 u32 new_rate, new_lanes, new_bw;
478
479                 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
480                                              out_rate, out_lanes, &new_rate,
481                                              &new_lanes);
482                 if (ret) {
483                         tb_port_info(out, "not enough bandwidth for DP tunnel\n");
484                         return ret;
485                 }
486
487                 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
488                 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
489                             new_rate, new_lanes, new_bw);
490
491                 /*
492                  * Set new rate and number of lanes before writing it to
493                  * the IN port remote caps.
494                  */
495                 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
496                 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
497         }
498
499         return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
500                              in->cap_adap + DP_REMOTE_CAP, 1);
501 }
502
503 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
504 {
505         int ret;
506
507         if (active) {
508                 struct tb_path **paths;
509                 int last;
510
511                 paths = tunnel->paths;
512                 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
513
514                 tb_dp_port_set_hops(tunnel->src_port,
515                         paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
516                         paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
517                         paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
518
519                 tb_dp_port_set_hops(tunnel->dst_port,
520                         paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
521                         paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
522                         paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
523         } else {
524                 tb_dp_port_hpd_clear(tunnel->src_port);
525                 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
526                 if (tb_port_is_dpout(tunnel->dst_port))
527                         tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
528         }
529
530         ret = tb_dp_port_enable(tunnel->src_port, active);
531         if (ret)
532                 return ret;
533
534         if (tb_port_is_dpout(tunnel->dst_port))
535                 return tb_dp_port_enable(tunnel->dst_port, active);
536
537         return 0;
538 }
539
540 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
541                                     int *consumed_down)
542 {
543         struct tb_port *in = tunnel->src_port;
544         const struct tb_switch *sw = in->sw;
545         u32 val, rate = 0, lanes = 0;
546         int ret;
547
548         if (tb_dp_is_usb4(sw)) {
549                 int timeout = 20;
550
551                 /*
552                  * Wait for DPRX done. Normally it should be already set
553                  * for active tunnel.
554                  */
555                 do {
556                         ret = tb_port_read(in, &val, TB_CFG_PORT,
557                                            in->cap_adap + DP_COMMON_CAP, 1);
558                         if (ret)
559                                 return ret;
560
561                         if (val & DP_COMMON_CAP_DPRX_DONE) {
562                                 rate = tb_dp_cap_get_rate(val);
563                                 lanes = tb_dp_cap_get_lanes(val);
564                                 break;
565                         }
566                         msleep(250);
567                 } while (timeout--);
568
569                 if (!timeout)
570                         return -ETIMEDOUT;
571         } else if (sw->generation >= 2) {
572                 /*
573                  * Read from the copied remote cap so that we take into
574                  * account if capabilities were reduced during exchange.
575                  */
576                 ret = tb_port_read(in, &val, TB_CFG_PORT,
577                                    in->cap_adap + DP_REMOTE_CAP, 1);
578                 if (ret)
579                         return ret;
580
581                 rate = tb_dp_cap_get_rate(val);
582                 lanes = tb_dp_cap_get_lanes(val);
583         } else {
584                 /* No bandwidth management for legacy devices  */
585                 *consumed_up = 0;
586                 *consumed_down = 0;
587                 return 0;
588         }
589
590         if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
591                 *consumed_up = 0;
592                 *consumed_down = tb_dp_bandwidth(rate, lanes);
593         } else {
594                 *consumed_up = tb_dp_bandwidth(rate, lanes);
595                 *consumed_down = 0;
596         }
597
598         return 0;
599 }
600
601 static void tb_dp_init_aux_path(struct tb_path *path)
602 {
603         int i;
604
605         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
606         path->egress_shared_buffer = TB_PATH_NONE;
607         path->ingress_fc_enable = TB_PATH_ALL;
608         path->ingress_shared_buffer = TB_PATH_NONE;
609         path->priority = 2;
610         path->weight = 1;
611
612         for (i = 0; i < path->path_length; i++)
613                 path->hops[i].initial_credits = 1;
614 }
615
616 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
617 {
618         int i;
619
620         path->egress_fc_enable = TB_PATH_NONE;
621         path->egress_shared_buffer = TB_PATH_NONE;
622         path->ingress_fc_enable = TB_PATH_NONE;
623         path->ingress_shared_buffer = TB_PATH_NONE;
624         path->priority = 1;
625         path->weight = 1;
626
627         for (i = 0; i < path->path_length; i++) {
628                 u32 nfc_credits = path->hops[i].in_port->config.nfc_credits;
629
630                 if (discover) {
631                         path->hops[i].nfc_credits =
632                                 nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
633                 } else {
634                         u32 max_credits;
635
636                         max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
637                                 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
638                         /* Leave some credits for AUX path */
639                         path->hops[i].nfc_credits = min(max_credits - 2, 12U);
640                 }
641         }
642 }
643
644 /**
645  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
646  * @tb: Pointer to the domain structure
647  * @in: DP in adapter
648  *
649  * If @in adapter is active, follows the tunnel to the DP out adapter
650  * and back. Returns the discovered tunnel or %NULL if there was no
651  * tunnel.
652  *
653  * Return: DP tunnel or %NULL if no tunnel found.
654  */
655 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
656 {
657         struct tb_tunnel *tunnel;
658         struct tb_port *port;
659         struct tb_path *path;
660
661         if (!tb_dp_port_is_enabled(in))
662                 return NULL;
663
664         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
665         if (!tunnel)
666                 return NULL;
667
668         tunnel->init = tb_dp_xchg_caps;
669         tunnel->activate = tb_dp_activate;
670         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
671         tunnel->src_port = in;
672
673         path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
674                                 &tunnel->dst_port, "Video");
675         if (!path) {
676                 /* Just disable the DP IN port */
677                 tb_dp_port_enable(in, false);
678                 goto err_free;
679         }
680         tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
681         tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
682
683         path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
684         if (!path)
685                 goto err_deactivate;
686         tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
687         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
688
689         path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
690                                 &port, "AUX RX");
691         if (!path)
692                 goto err_deactivate;
693         tunnel->paths[TB_DP_AUX_PATH_IN] = path;
694         tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
695
696         /* Validate that the tunnel is complete */
697         if (!tb_port_is_dpout(tunnel->dst_port)) {
698                 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
699                 goto err_deactivate;
700         }
701
702         if (!tb_dp_port_is_enabled(tunnel->dst_port))
703                 goto err_deactivate;
704
705         if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
706                 goto err_deactivate;
707
708         if (port != tunnel->src_port) {
709                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
710                 goto err_deactivate;
711         }
712
713         tb_tunnel_dbg(tunnel, "discovered\n");
714         return tunnel;
715
716 err_deactivate:
717         tb_tunnel_deactivate(tunnel);
718 err_free:
719         tb_tunnel_free(tunnel);
720
721         return NULL;
722 }
723
724 /**
725  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
726  * @tb: Pointer to the domain structure
727  * @in: DP in adapter port
728  * @out: DP out adapter port
729  * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
730  *          if not limited)
731  * @max_down: Maximum available downstream bandwidth for the DP tunnel
732  *            (%0 if not limited)
733  *
734  * Allocates a tunnel between @in and @out that is capable of tunneling
735  * Display Port traffic.
736  *
737  * Return: Returns a tb_tunnel on success or NULL on failure.
738  */
739 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
740                                      struct tb_port *out, int max_up,
741                                      int max_down)
742 {
743         struct tb_tunnel *tunnel;
744         struct tb_path **paths;
745         struct tb_path *path;
746
747         if (WARN_ON(!in->cap_adap || !out->cap_adap))
748                 return NULL;
749
750         tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
751         if (!tunnel)
752                 return NULL;
753
754         tunnel->init = tb_dp_xchg_caps;
755         tunnel->activate = tb_dp_activate;
756         tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
757         tunnel->src_port = in;
758         tunnel->dst_port = out;
759         tunnel->max_up = max_up;
760         tunnel->max_down = max_down;
761
762         paths = tunnel->paths;
763
764         path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
765                              1, "Video");
766         if (!path)
767                 goto err_free;
768         tb_dp_init_video_path(path, false);
769         paths[TB_DP_VIDEO_PATH_OUT] = path;
770
771         path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
772                              TB_DP_AUX_TX_HOPID, 1, "AUX TX");
773         if (!path)
774                 goto err_free;
775         tb_dp_init_aux_path(path);
776         paths[TB_DP_AUX_PATH_OUT] = path;
777
778         path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
779                              TB_DP_AUX_RX_HOPID, 1, "AUX RX");
780         if (!path)
781                 goto err_free;
782         tb_dp_init_aux_path(path);
783         paths[TB_DP_AUX_PATH_IN] = path;
784
785         return tunnel;
786
787 err_free:
788         tb_tunnel_free(tunnel);
789         return NULL;
790 }
791
792 static u32 tb_dma_credits(struct tb_port *nhi)
793 {
794         u32 max_credits;
795
796         max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
797                 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
798         return min(max_credits, 13U);
799 }
800
801 static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
802 {
803         int i;
804
805         path->egress_fc_enable = efc;
806         path->ingress_fc_enable = TB_PATH_ALL;
807         path->egress_shared_buffer = TB_PATH_NONE;
808         path->ingress_shared_buffer = TB_PATH_NONE;
809         path->priority = 5;
810         path->weight = 1;
811         path->clear_fc = true;
812
813         for (i = 0; i < path->path_length; i++)
814                 path->hops[i].initial_credits = credits;
815 }
816
817 /**
818  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
819  * @tb: Pointer to the domain structure
820  * @nhi: Host controller port
821  * @dst: Destination null port which the other domain is connected to
822  * @transmit_path: HopID used for transmitting packets
823  * @transmit_ring: NHI ring number used to send packets towards the
824  *                 other domain. Set to %-1 if TX path is not needed.
825  * @receive_path: HopID used for receiving packets
826  * @receive_ring: NHI ring number used to receive packets from the
827  *                other domain. Set to %-1 if RX path is not needed.
828  *
829  * Return: Returns a tb_tunnel on success or NULL on failure.
830  */
831 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
832                                       struct tb_port *dst, int transmit_path,
833                                       int transmit_ring, int receive_path,
834                                       int receive_ring)
835 {
836         struct tb_tunnel *tunnel;
837         size_t npaths = 0, i = 0;
838         struct tb_path *path;
839         u32 credits;
840
841         if (receive_ring > 0)
842                 npaths++;
843         if (transmit_ring > 0)
844                 npaths++;
845
846         if (WARN_ON(!npaths))
847                 return NULL;
848
849         tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
850         if (!tunnel)
851                 return NULL;
852
853         tunnel->src_port = nhi;
854         tunnel->dst_port = dst;
855
856         credits = tb_dma_credits(nhi);
857
858         if (receive_ring > 0) {
859                 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
860                                      "DMA RX");
861                 if (!path) {
862                         tb_tunnel_free(tunnel);
863                         return NULL;
864                 }
865                 tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
866                 tunnel->paths[i++] = path;
867         }
868
869         if (transmit_ring > 0) {
870                 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
871                                      "DMA TX");
872                 if (!path) {
873                         tb_tunnel_free(tunnel);
874                         return NULL;
875                 }
876                 tb_dma_init_path(path, TB_PATH_ALL, credits);
877                 tunnel->paths[i++] = path;
878         }
879
880         return tunnel;
881 }
882
883 /**
884  * tb_tunnel_match_dma() - Match DMA tunnel
885  * @tunnel: Tunnel to match
886  * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
887  * @transmit_ring: NHI ring number used to send packets towards the
888  *                 other domain. Pass %-1 to ignore.
889  * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
890  * @receive_ring: NHI ring number used to receive packets from the
891  *                other domain. Pass %-1 to ignore.
892  *
893  * This function can be used to match specific DMA tunnel, if there are
894  * multiple DMA tunnels going through the same XDomain connection.
895  * Returns true if there is match and false otherwise.
896  */
897 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
898                          int transmit_ring, int receive_path, int receive_ring)
899 {
900         const struct tb_path *tx_path = NULL, *rx_path = NULL;
901         int i;
902
903         if (!receive_ring || !transmit_ring)
904                 return false;
905
906         for (i = 0; i < tunnel->npaths; i++) {
907                 const struct tb_path *path = tunnel->paths[i];
908
909                 if (!path)
910                         continue;
911
912                 if (tb_port_is_nhi(path->hops[0].in_port))
913                         tx_path = path;
914                 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
915                         rx_path = path;
916         }
917
918         if (transmit_ring > 0 || transmit_path > 0) {
919                 if (!tx_path)
920                         return false;
921                 if (transmit_ring > 0 &&
922                     (tx_path->hops[0].in_hop_index != transmit_ring))
923                         return false;
924                 if (transmit_path > 0 &&
925                     (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
926                         return false;
927         }
928
929         if (receive_ring > 0 || receive_path > 0) {
930                 if (!rx_path)
931                         return false;
932                 if (receive_path > 0 &&
933                     (rx_path->hops[0].in_hop_index != receive_path))
934                         return false;
935                 if (receive_ring > 0 &&
936                     (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
937                         return false;
938         }
939
940         return true;
941 }
942
943 static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
944 {
945         int ret, up_max_rate, down_max_rate;
946
947         ret = usb4_usb3_port_max_link_rate(up);
948         if (ret < 0)
949                 return ret;
950         up_max_rate = ret;
951
952         ret = usb4_usb3_port_max_link_rate(down);
953         if (ret < 0)
954                 return ret;
955         down_max_rate = ret;
956
957         return min(up_max_rate, down_max_rate);
958 }
959
960 static int tb_usb3_init(struct tb_tunnel *tunnel)
961 {
962         tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
963                       tunnel->allocated_up, tunnel->allocated_down);
964
965         return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
966                                                  &tunnel->allocated_up,
967                                                  &tunnel->allocated_down);
968 }
969
970 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
971 {
972         int res;
973
974         res = tb_usb3_port_enable(tunnel->src_port, activate);
975         if (res)
976                 return res;
977
978         if (tb_port_is_usb3_up(tunnel->dst_port))
979                 return tb_usb3_port_enable(tunnel->dst_port, activate);
980
981         return 0;
982 }
983
984 static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
985                 int *consumed_up, int *consumed_down)
986 {
987         int pcie_enabled = tb_acpi_may_tunnel_pcie();
988
989         /*
990          * PCIe tunneling, if enabled, affects the USB3 bandwidth so
991          * take that it into account here.
992          */
993         *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
994         *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
995         return 0;
996 }
997
998 static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
999 {
1000         int ret;
1001
1002         ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1003                                                &tunnel->allocated_up,
1004                                                &tunnel->allocated_down);
1005         if (ret)
1006                 return ret;
1007
1008         tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1009                       tunnel->allocated_up, tunnel->allocated_down);
1010         return 0;
1011 }
1012
1013 static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1014                                                 int *available_up,
1015                                                 int *available_down)
1016 {
1017         int ret, max_rate, allocate_up, allocate_down;
1018
1019         ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1020         if (ret < 0) {
1021                 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1022                 return;
1023         } else if (!ret) {
1024                 /* Use maximum link rate if the link valid is not set */
1025                 ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1026                 if (ret < 0) {
1027                         tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1028                         return;
1029                 }
1030         }
1031
1032         /*
1033          * 90% of the max rate can be allocated for isochronous
1034          * transfers.
1035          */
1036         max_rate = ret * 90 / 100;
1037
1038         /* No need to reclaim if already at maximum */
1039         if (tunnel->allocated_up >= max_rate &&
1040             tunnel->allocated_down >= max_rate)
1041                 return;
1042
1043         /* Don't go lower than what is already allocated */
1044         allocate_up = min(max_rate, *available_up);
1045         if (allocate_up < tunnel->allocated_up)
1046                 allocate_up = tunnel->allocated_up;
1047
1048         allocate_down = min(max_rate, *available_down);
1049         if (allocate_down < tunnel->allocated_down)
1050                 allocate_down = tunnel->allocated_down;
1051
1052         /* If no changes no need to do more */
1053         if (allocate_up == tunnel->allocated_up &&
1054             allocate_down == tunnel->allocated_down)
1055                 return;
1056
1057         ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1058                                                 &allocate_down);
1059         if (ret) {
1060                 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1061                 return;
1062         }
1063
1064         tunnel->allocated_up = allocate_up;
1065         *available_up -= tunnel->allocated_up;
1066
1067         tunnel->allocated_down = allocate_down;
1068         *available_down -= tunnel->allocated_down;
1069
1070         tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1071                       tunnel->allocated_up, tunnel->allocated_down);
1072 }
1073
1074 static void tb_usb3_init_path(struct tb_path *path)
1075 {
1076         path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1077         path->egress_shared_buffer = TB_PATH_NONE;
1078         path->ingress_fc_enable = TB_PATH_ALL;
1079         path->ingress_shared_buffer = TB_PATH_NONE;
1080         path->priority = 3;
1081         path->weight = 3;
1082         path->drop_packages = 0;
1083         path->hops[0].initial_credits = 7;
1084         if (path->path_length > 1)
1085                 path->hops[1].initial_credits =
1086                         tb_initial_credits(path->hops[1].in_port->sw);
1087 }
1088
1089 /**
1090  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1091  * @tb: Pointer to the domain structure
1092  * @down: USB3 downstream adapter
1093  *
1094  * If @down adapter is active, follows the tunnel to the USB3 upstream
1095  * adapter and back. Returns the discovered tunnel or %NULL if there was
1096  * no tunnel.
1097  */
1098 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1099 {
1100         struct tb_tunnel *tunnel;
1101         struct tb_path *path;
1102
1103         if (!tb_usb3_port_is_enabled(down))
1104                 return NULL;
1105
1106         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1107         if (!tunnel)
1108                 return NULL;
1109
1110         tunnel->activate = tb_usb3_activate;
1111         tunnel->src_port = down;
1112
1113         /*
1114          * Discover both paths even if they are not complete. We will
1115          * clean them up by calling tb_tunnel_deactivate() below in that
1116          * case.
1117          */
1118         path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1119                                 &tunnel->dst_port, "USB3 Down");
1120         if (!path) {
1121                 /* Just disable the downstream port */
1122                 tb_usb3_port_enable(down, false);
1123                 goto err_free;
1124         }
1125         tunnel->paths[TB_USB3_PATH_DOWN] = path;
1126         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1127
1128         path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1129                                 "USB3 Up");
1130         if (!path)
1131                 goto err_deactivate;
1132         tunnel->paths[TB_USB3_PATH_UP] = path;
1133         tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1134
1135         /* Validate that the tunnel is complete */
1136         if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1137                 tb_port_warn(tunnel->dst_port,
1138                              "path does not end on an USB3 adapter, cleaning up\n");
1139                 goto err_deactivate;
1140         }
1141
1142         if (down != tunnel->src_port) {
1143                 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1144                 goto err_deactivate;
1145         }
1146
1147         if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1148                 tb_tunnel_warn(tunnel,
1149                                "tunnel is not fully activated, cleaning up\n");
1150                 goto err_deactivate;
1151         }
1152
1153         if (!tb_route(down->sw)) {
1154                 int ret;
1155
1156                 /*
1157                  * Read the initial bandwidth allocation for the first
1158                  * hop tunnel.
1159                  */
1160                 ret = usb4_usb3_port_allocated_bandwidth(down,
1161                         &tunnel->allocated_up, &tunnel->allocated_down);
1162                 if (ret)
1163                         goto err_deactivate;
1164
1165                 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1166                               tunnel->allocated_up, tunnel->allocated_down);
1167
1168                 tunnel->init = tb_usb3_init;
1169                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1170                 tunnel->release_unused_bandwidth =
1171                         tb_usb3_release_unused_bandwidth;
1172                 tunnel->reclaim_available_bandwidth =
1173                         tb_usb3_reclaim_available_bandwidth;
1174         }
1175
1176         tb_tunnel_dbg(tunnel, "discovered\n");
1177         return tunnel;
1178
1179 err_deactivate:
1180         tb_tunnel_deactivate(tunnel);
1181 err_free:
1182         tb_tunnel_free(tunnel);
1183
1184         return NULL;
1185 }
1186
1187 /**
1188  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1189  * @tb: Pointer to the domain structure
1190  * @up: USB3 upstream adapter port
1191  * @down: USB3 downstream adapter port
1192  * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1193  *          if not limited).
1194  * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1195  *            (%0 if not limited).
1196  *
1197  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1198  * @TB_TYPE_USB3_DOWN.
1199  *
1200  * Return: Returns a tb_tunnel on success or %NULL on failure.
1201  */
1202 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1203                                        struct tb_port *down, int max_up,
1204                                        int max_down)
1205 {
1206         struct tb_tunnel *tunnel;
1207         struct tb_path *path;
1208         int max_rate = 0;
1209
1210         /*
1211          * Check that we have enough bandwidth available for the new
1212          * USB3 tunnel.
1213          */
1214         if (max_up > 0 || max_down > 0) {
1215                 max_rate = tb_usb3_max_link_rate(down, up);
1216                 if (max_rate < 0)
1217                         return NULL;
1218
1219                 /* Only 90% can be allocated for USB3 isochronous transfers */
1220                 max_rate = max_rate * 90 / 100;
1221                 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1222                             max_rate);
1223
1224                 if (max_rate > max_up || max_rate > max_down) {
1225                         tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1226                         return NULL;
1227                 }
1228         }
1229
1230         tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1231         if (!tunnel)
1232                 return NULL;
1233
1234         tunnel->activate = tb_usb3_activate;
1235         tunnel->src_port = down;
1236         tunnel->dst_port = up;
1237         tunnel->max_up = max_up;
1238         tunnel->max_down = max_down;
1239
1240         path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1241                              "USB3 Down");
1242         if (!path) {
1243                 tb_tunnel_free(tunnel);
1244                 return NULL;
1245         }
1246         tb_usb3_init_path(path);
1247         tunnel->paths[TB_USB3_PATH_DOWN] = path;
1248
1249         path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1250                              "USB3 Up");
1251         if (!path) {
1252                 tb_tunnel_free(tunnel);
1253                 return NULL;
1254         }
1255         tb_usb3_init_path(path);
1256         tunnel->paths[TB_USB3_PATH_UP] = path;
1257
1258         if (!tb_route(down->sw)) {
1259                 tunnel->allocated_up = max_rate;
1260                 tunnel->allocated_down = max_rate;
1261
1262                 tunnel->init = tb_usb3_init;
1263                 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1264                 tunnel->release_unused_bandwidth =
1265                         tb_usb3_release_unused_bandwidth;
1266                 tunnel->reclaim_available_bandwidth =
1267                         tb_usb3_reclaim_available_bandwidth;
1268         }
1269
1270         return tunnel;
1271 }
1272
1273 /**
1274  * tb_tunnel_free() - free a tunnel
1275  * @tunnel: Tunnel to be freed
1276  *
1277  * Frees a tunnel. The tunnel does not need to be deactivated.
1278  */
1279 void tb_tunnel_free(struct tb_tunnel *tunnel)
1280 {
1281         int i;
1282
1283         if (!tunnel)
1284                 return;
1285
1286         for (i = 0; i < tunnel->npaths; i++) {
1287                 if (tunnel->paths[i])
1288                         tb_path_free(tunnel->paths[i]);
1289         }
1290
1291         kfree(tunnel->paths);
1292         kfree(tunnel);
1293 }
1294
1295 /**
1296  * tb_tunnel_is_invalid - check whether an activated path is still valid
1297  * @tunnel: Tunnel to check
1298  */
1299 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1300 {
1301         int i;
1302
1303         for (i = 0; i < tunnel->npaths; i++) {
1304                 WARN_ON(!tunnel->paths[i]->activated);
1305                 if (tb_path_is_invalid(tunnel->paths[i]))
1306                         return true;
1307         }
1308
1309         return false;
1310 }
1311
1312 /**
1313  * tb_tunnel_restart() - activate a tunnel after a hardware reset
1314  * @tunnel: Tunnel to restart
1315  *
1316  * Return: 0 on success and negative errno in case if failure
1317  */
1318 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1319 {
1320         int res, i;
1321
1322         tb_tunnel_dbg(tunnel, "activating\n");
1323
1324         /*
1325          * Make sure all paths are properly disabled before enabling
1326          * them again.
1327          */
1328         for (i = 0; i < tunnel->npaths; i++) {
1329                 if (tunnel->paths[i]->activated) {
1330                         tb_path_deactivate(tunnel->paths[i]);
1331                         tunnel->paths[i]->activated = false;
1332                 }
1333         }
1334
1335         if (tunnel->init) {
1336                 res = tunnel->init(tunnel);
1337                 if (res)
1338                         return res;
1339         }
1340
1341         for (i = 0; i < tunnel->npaths; i++) {
1342                 res = tb_path_activate(tunnel->paths[i]);
1343                 if (res)
1344                         goto err;
1345         }
1346
1347         if (tunnel->activate) {
1348                 res = tunnel->activate(tunnel, true);
1349                 if (res)
1350                         goto err;
1351         }
1352
1353         return 0;
1354
1355 err:
1356         tb_tunnel_warn(tunnel, "activation failed\n");
1357         tb_tunnel_deactivate(tunnel);
1358         return res;
1359 }
1360
1361 /**
1362  * tb_tunnel_activate() - activate a tunnel
1363  * @tunnel: Tunnel to activate
1364  *
1365  * Return: Returns 0 on success or an error code on failure.
1366  */
1367 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1368 {
1369         int i;
1370
1371         for (i = 0; i < tunnel->npaths; i++) {
1372                 if (tunnel->paths[i]->activated) {
1373                         tb_tunnel_WARN(tunnel,
1374                                        "trying to activate an already activated tunnel\n");
1375                         return -EINVAL;
1376                 }
1377         }
1378
1379         return tb_tunnel_restart(tunnel);
1380 }
1381
1382 /**
1383  * tb_tunnel_deactivate() - deactivate a tunnel
1384  * @tunnel: Tunnel to deactivate
1385  */
1386 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1387 {
1388         int i;
1389
1390         tb_tunnel_dbg(tunnel, "deactivating\n");
1391
1392         if (tunnel->activate)
1393                 tunnel->activate(tunnel, false);
1394
1395         for (i = 0; i < tunnel->npaths; i++) {
1396                 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1397                         tb_path_deactivate(tunnel->paths[i]);
1398         }
1399 }
1400
1401 /**
1402  * tb_tunnel_port_on_path() - Does the tunnel go through port
1403  * @tunnel: Tunnel to check
1404  * @port: Port to check
1405  *
1406  * Returns true if @tunnel goes through @port (direction does not matter),
1407  * false otherwise.
1408  */
1409 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1410                             const struct tb_port *port)
1411 {
1412         int i;
1413
1414         for (i = 0; i < tunnel->npaths; i++) {
1415                 if (!tunnel->paths[i])
1416                         continue;
1417
1418                 if (tb_path_port_on_path(tunnel->paths[i], port))
1419                         return true;
1420         }
1421
1422         return false;
1423 }
1424
1425 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1426 {
1427         int i;
1428
1429         for (i = 0; i < tunnel->npaths; i++) {
1430                 if (!tunnel->paths[i])
1431                         return false;
1432                 if (!tunnel->paths[i]->activated)
1433                         return false;
1434         }
1435
1436         return true;
1437 }
1438
1439 /**
1440  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1441  * @tunnel: Tunnel to check
1442  * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1443  *               Can be %NULL.
1444  * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1445  *                 Can be %NULL.
1446  *
1447  * Stores the amount of isochronous bandwidth @tunnel consumes in
1448  * @consumed_up and @consumed_down. In case of success returns %0,
1449  * negative errno otherwise.
1450  */
1451 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1452                                  int *consumed_down)
1453 {
1454         int up_bw = 0, down_bw = 0;
1455
1456         if (!tb_tunnel_is_active(tunnel))
1457                 goto out;
1458
1459         if (tunnel->consumed_bandwidth) {
1460                 int ret;
1461
1462                 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1463                 if (ret)
1464                         return ret;
1465
1466                 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1467                               down_bw);
1468         }
1469
1470 out:
1471         if (consumed_up)
1472                 *consumed_up = up_bw;
1473         if (consumed_down)
1474                 *consumed_down = down_bw;
1475
1476         return 0;
1477 }
1478
1479 /**
1480  * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1481  * @tunnel: Tunnel whose unused bandwidth to release
1482  *
1483  * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1484  * moment) this function makes it to release all the unused bandwidth.
1485  *
1486  * Returns %0 in case of success and negative errno otherwise.
1487  */
1488 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1489 {
1490         if (!tb_tunnel_is_active(tunnel))
1491                 return 0;
1492
1493         if (tunnel->release_unused_bandwidth) {
1494                 int ret;
1495
1496                 ret = tunnel->release_unused_bandwidth(tunnel);
1497                 if (ret)
1498                         return ret;
1499         }
1500
1501         return 0;
1502 }
1503
1504 /**
1505  * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1506  * @tunnel: Tunnel reclaiming available bandwidth
1507  * @available_up: Available upstream bandwidth (in Mb/s)
1508  * @available_down: Available downstream bandwidth (in Mb/s)
1509  *
1510  * Reclaims bandwidth from @available_up and @available_down and updates
1511  * the variables accordingly (e.g decreases both according to what was
1512  * reclaimed by the tunnel). If nothing was reclaimed the values are
1513  * kept as is.
1514  */
1515 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1516                                            int *available_up,
1517                                            int *available_down)
1518 {
1519         if (!tb_tunnel_is_active(tunnel))
1520                 return;
1521
1522         if (tunnel->reclaim_available_bandwidth)
1523                 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1524                                                     available_down);
1525 }