mmc: core: Update CMD13 busy check for CMD6 commands
[linux-2.6-microblaze.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12
13 #include "tb.h"
14 #include "tb_regs.h"
15 #include "tunnel.h"
16
17 /**
18  * struct tb_cm - Simple Thunderbolt connection manager
19  * @tunnel_list: List of active tunnels
20  * @dp_resources: List of available DP resources for DP tunneling
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         struct list_head dp_resources;
29         bool hotplug_active;
30 };
31
32 struct tb_hotplug_event {
33         struct work_struct work;
34         struct tb *tb;
35         u64 route;
36         u8 port;
37         bool unplug;
38 };
39
40 static void tb_handle_hotplug(struct work_struct *work);
41
42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
43 {
44         struct tb_hotplug_event *ev;
45
46         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
47         if (!ev)
48                 return;
49
50         ev->tb = tb;
51         ev->route = route;
52         ev->port = port;
53         ev->unplug = unplug;
54         INIT_WORK(&ev->work, tb_handle_hotplug);
55         queue_work(tb->wq, &ev->work);
56 }
57
58 /* enumeration & hot plug handling */
59
60 static void tb_add_dp_resources(struct tb_switch *sw)
61 {
62         struct tb_cm *tcm = tb_priv(sw->tb);
63         struct tb_port *port;
64
65         tb_switch_for_each_port(sw, port) {
66                 if (!tb_port_is_dpin(port))
67                         continue;
68
69                 if (!tb_switch_query_dp_resource(sw, port))
70                         continue;
71
72                 list_add_tail(&port->list, &tcm->dp_resources);
73                 tb_port_dbg(port, "DP IN resource available\n");
74         }
75 }
76
77 static void tb_remove_dp_resources(struct tb_switch *sw)
78 {
79         struct tb_cm *tcm = tb_priv(sw->tb);
80         struct tb_port *port, *tmp;
81
82         /* Clear children resources first */
83         tb_switch_for_each_port(sw, port) {
84                 if (tb_port_has_remote(port))
85                         tb_remove_dp_resources(port->remote->sw);
86         }
87
88         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
89                 if (port->sw == sw) {
90                         tb_port_dbg(port, "DP OUT resource unavailable\n");
91                         list_del_init(&port->list);
92                 }
93         }
94 }
95
96 static void tb_discover_tunnels(struct tb_switch *sw)
97 {
98         struct tb *tb = sw->tb;
99         struct tb_cm *tcm = tb_priv(tb);
100         struct tb_port *port;
101
102         tb_switch_for_each_port(sw, port) {
103                 struct tb_tunnel *tunnel = NULL;
104
105                 switch (port->config.type) {
106                 case TB_TYPE_DP_HDMI_IN:
107                         tunnel = tb_tunnel_discover_dp(tb, port);
108                         break;
109
110                 case TB_TYPE_PCIE_DOWN:
111                         tunnel = tb_tunnel_discover_pci(tb, port);
112                         break;
113
114                 case TB_TYPE_USB3_DOWN:
115                         tunnel = tb_tunnel_discover_usb3(tb, port);
116                         break;
117
118                 default:
119                         break;
120                 }
121
122                 if (!tunnel)
123                         continue;
124
125                 if (tb_tunnel_is_pci(tunnel)) {
126                         struct tb_switch *parent = tunnel->dst_port->sw;
127
128                         while (parent != tunnel->src_port->sw) {
129                                 parent->boot = true;
130                                 parent = tb_switch_parent(parent);
131                         }
132                 }
133
134                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
135         }
136
137         tb_switch_for_each_port(sw, port) {
138                 if (tb_port_has_remote(port))
139                         tb_discover_tunnels(port->remote->sw);
140         }
141 }
142
143 static void tb_scan_xdomain(struct tb_port *port)
144 {
145         struct tb_switch *sw = port->sw;
146         struct tb *tb = sw->tb;
147         struct tb_xdomain *xd;
148         u64 route;
149
150         route = tb_downstream_route(port);
151         xd = tb_xdomain_find_by_route(tb, route);
152         if (xd) {
153                 tb_xdomain_put(xd);
154                 return;
155         }
156
157         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
158                               NULL);
159         if (xd) {
160                 tb_port_at(route, sw)->xdomain = xd;
161                 tb_xdomain_add(xd);
162         }
163 }
164
165 static int tb_enable_tmu(struct tb_switch *sw)
166 {
167         int ret;
168
169         /* If it is already enabled in correct mode, don't touch it */
170         if (tb_switch_tmu_is_enabled(sw))
171                 return 0;
172
173         ret = tb_switch_tmu_disable(sw);
174         if (ret)
175                 return ret;
176
177         ret = tb_switch_tmu_post_time(sw);
178         if (ret)
179                 return ret;
180
181         return tb_switch_tmu_enable(sw);
182 }
183
184 /**
185  * tb_find_unused_port() - return the first inactive port on @sw
186  * @sw: Switch to find the port on
187  * @type: Port type to look for
188  */
189 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
190                                            enum tb_port_type type)
191 {
192         struct tb_port *port;
193
194         tb_switch_for_each_port(sw, port) {
195                 if (tb_is_upstream_port(port))
196                         continue;
197                 if (port->config.type != type)
198                         continue;
199                 if (!port->cap_adap)
200                         continue;
201                 if (tb_port_is_enabled(port))
202                         continue;
203                 return port;
204         }
205         return NULL;
206 }
207
208 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
209                                         const struct tb_port *port)
210 {
211         struct tb_port *down;
212
213         down = usb4_switch_map_usb3_down(sw, port);
214         if (down) {
215                 if (WARN_ON(!tb_port_is_usb3_down(down)))
216                         goto out;
217                 if (WARN_ON(tb_usb3_port_is_enabled(down)))
218                         goto out;
219
220                 return down;
221         }
222
223 out:
224         return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN);
225 }
226
227 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
228 {
229         struct tb_switch *parent = tb_switch_parent(sw);
230         struct tb_port *up, *down, *port;
231         struct tb_cm *tcm = tb_priv(tb);
232         struct tb_tunnel *tunnel;
233
234         up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
235         if (!up)
236                 return 0;
237
238         /*
239          * Look up available down port. Since we are chaining it should
240          * be found right above this switch.
241          */
242         port = tb_port_at(tb_route(sw), parent);
243         down = tb_find_usb3_down(parent, port);
244         if (!down)
245                 return 0;
246
247         if (tb_route(parent)) {
248                 struct tb_port *parent_up;
249                 /*
250                  * Check first that the parent switch has its upstream USB3
251                  * port enabled. Otherwise the chain is not complete and
252                  * there is no point setting up a new tunnel.
253                  */
254                 parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
255                 if (!parent_up || !tb_port_is_enabled(parent_up))
256                         return 0;
257         }
258
259         tunnel = tb_tunnel_alloc_usb3(tb, up, down);
260         if (!tunnel)
261                 return -ENOMEM;
262
263         if (tb_tunnel_activate(tunnel)) {
264                 tb_port_info(up,
265                              "USB3 tunnel activation failed, aborting\n");
266                 tb_tunnel_free(tunnel);
267                 return -EIO;
268         }
269
270         list_add_tail(&tunnel->list, &tcm->tunnel_list);
271         return 0;
272 }
273
274 static int tb_create_usb3_tunnels(struct tb_switch *sw)
275 {
276         struct tb_port *port;
277         int ret;
278
279         if (tb_route(sw)) {
280                 ret = tb_tunnel_usb3(sw->tb, sw);
281                 if (ret)
282                         return ret;
283         }
284
285         tb_switch_for_each_port(sw, port) {
286                 if (!tb_port_has_remote(port))
287                         continue;
288                 ret = tb_create_usb3_tunnels(port->remote->sw);
289                 if (ret)
290                         return ret;
291         }
292
293         return 0;
294 }
295
296 static void tb_scan_port(struct tb_port *port);
297
298 /**
299  * tb_scan_switch() - scan for and initialize downstream switches
300  */
301 static void tb_scan_switch(struct tb_switch *sw)
302 {
303         struct tb_port *port;
304
305         tb_switch_for_each_port(sw, port)
306                 tb_scan_port(port);
307 }
308
309 /**
310  * tb_scan_port() - check for and initialize switches below port
311  */
312 static void tb_scan_port(struct tb_port *port)
313 {
314         struct tb_cm *tcm = tb_priv(port->sw->tb);
315         struct tb_port *upstream_port;
316         struct tb_switch *sw;
317
318         if (tb_is_upstream_port(port))
319                 return;
320
321         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
322             !tb_dp_port_is_enabled(port)) {
323                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
324                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
325                                  false);
326                 return;
327         }
328
329         if (port->config.type != TB_TYPE_PORT)
330                 return;
331         if (port->dual_link_port && port->link_nr)
332                 return; /*
333                          * Downstream switch is reachable through two ports.
334                          * Only scan on the primary port (link_nr == 0).
335                          */
336         if (tb_wait_for_port(port, false) <= 0)
337                 return;
338         if (port->remote) {
339                 tb_port_dbg(port, "port already has a remote\n");
340                 return;
341         }
342         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
343                              tb_downstream_route(port));
344         if (IS_ERR(sw)) {
345                 /*
346                  * If there is an error accessing the connected switch
347                  * it may be connected to another domain. Also we allow
348                  * the other domain to be connected to a max depth switch.
349                  */
350                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
351                         tb_scan_xdomain(port);
352                 return;
353         }
354
355         if (tb_switch_configure(sw)) {
356                 tb_switch_put(sw);
357                 return;
358         }
359
360         /*
361          * If there was previously another domain connected remove it
362          * first.
363          */
364         if (port->xdomain) {
365                 tb_xdomain_remove(port->xdomain);
366                 port->xdomain = NULL;
367         }
368
369         /*
370          * Do not send uevents until we have discovered all existing
371          * tunnels and know which switches were authorized already by
372          * the boot firmware.
373          */
374         if (!tcm->hotplug_active)
375                 dev_set_uevent_suppress(&sw->dev, true);
376
377         if (tb_switch_add(sw)) {
378                 tb_switch_put(sw);
379                 return;
380         }
381
382         /* Link the switches using both links if available */
383         upstream_port = tb_upstream_port(sw);
384         port->remote = upstream_port;
385         upstream_port->remote = port;
386         if (port->dual_link_port && upstream_port->dual_link_port) {
387                 port->dual_link_port->remote = upstream_port->dual_link_port;
388                 upstream_port->dual_link_port->remote = port->dual_link_port;
389         }
390
391         /* Enable lane bonding if supported */
392         if (tb_switch_lane_bonding_enable(sw))
393                 tb_sw_warn(sw, "failed to enable lane bonding\n");
394
395         if (tb_enable_tmu(sw))
396                 tb_sw_warn(sw, "failed to enable TMU\n");
397
398         /*
399          * Create USB 3.x tunnels only when the switch is plugged to the
400          * domain. This is because we scan the domain also during discovery
401          * and want to discover existing USB 3.x tunnels before we create
402          * any new.
403          */
404         if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
405                 tb_sw_warn(sw, "USB3 tunnel creation failed\n");
406
407         tb_scan_switch(sw);
408 }
409
410 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
411                                         struct tb_port *src_port,
412                                         struct tb_port *dst_port)
413 {
414         struct tb_cm *tcm = tb_priv(tb);
415         struct tb_tunnel *tunnel;
416
417         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
418                 if (tunnel->type == type &&
419                     ((src_port && src_port == tunnel->src_port) ||
420                      (dst_port && dst_port == tunnel->dst_port))) {
421                         return tunnel;
422                 }
423         }
424
425         return NULL;
426 }
427
428 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
429 {
430         if (!tunnel)
431                 return;
432
433         tb_tunnel_deactivate(tunnel);
434         list_del(&tunnel->list);
435
436         /*
437          * In case of DP tunnel make sure the DP IN resource is deallocated
438          * properly.
439          */
440         if (tb_tunnel_is_dp(tunnel)) {
441                 struct tb_port *in = tunnel->src_port;
442
443                 tb_switch_dealloc_dp_resource(in->sw, in);
444         }
445
446         tb_tunnel_free(tunnel);
447 }
448
449 /**
450  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
451  */
452 static void tb_free_invalid_tunnels(struct tb *tb)
453 {
454         struct tb_cm *tcm = tb_priv(tb);
455         struct tb_tunnel *tunnel;
456         struct tb_tunnel *n;
457
458         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
459                 if (tb_tunnel_is_invalid(tunnel))
460                         tb_deactivate_and_free_tunnel(tunnel);
461         }
462 }
463
464 /**
465  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
466  */
467 static void tb_free_unplugged_children(struct tb_switch *sw)
468 {
469         struct tb_port *port;
470
471         tb_switch_for_each_port(sw, port) {
472                 if (!tb_port_has_remote(port))
473                         continue;
474
475                 if (port->remote->sw->is_unplugged) {
476                         tb_remove_dp_resources(port->remote->sw);
477                         tb_switch_lane_bonding_disable(port->remote->sw);
478                         tb_switch_remove(port->remote->sw);
479                         port->remote = NULL;
480                         if (port->dual_link_port)
481                                 port->dual_link_port->remote = NULL;
482                 } else {
483                         tb_free_unplugged_children(port->remote->sw);
484                 }
485         }
486 }
487
488 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
489                                          const struct tb_port *port)
490 {
491         struct tb_port *down = NULL;
492
493         /*
494          * To keep plugging devices consistently in the same PCIe
495          * hierarchy, do mapping here for switch downstream PCIe ports.
496          */
497         if (tb_switch_is_usb4(sw)) {
498                 down = usb4_switch_map_pcie_down(sw, port);
499         } else if (!tb_route(sw)) {
500                 int phy_port = tb_phy_port_from_link(port->port);
501                 int index;
502
503                 /*
504                  * Hard-coded Thunderbolt port to PCIe down port mapping
505                  * per controller.
506                  */
507                 if (tb_switch_is_cactus_ridge(sw) ||
508                     tb_switch_is_alpine_ridge(sw))
509                         index = !phy_port ? 6 : 7;
510                 else if (tb_switch_is_falcon_ridge(sw))
511                         index = !phy_port ? 6 : 8;
512                 else if (tb_switch_is_titan_ridge(sw))
513                         index = !phy_port ? 8 : 9;
514                 else
515                         goto out;
516
517                 /* Validate the hard-coding */
518                 if (WARN_ON(index > sw->config.max_port_number))
519                         goto out;
520
521                 down = &sw->ports[index];
522         }
523
524         if (down) {
525                 if (WARN_ON(!tb_port_is_pcie_down(down)))
526                         goto out;
527                 if (WARN_ON(tb_pci_port_is_enabled(down)))
528                         goto out;
529
530                 return down;
531         }
532
533 out:
534         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
535 }
536
537 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
538                            struct tb_port *out)
539 {
540         struct tb_switch *sw = out->sw;
541         struct tb_tunnel *tunnel;
542         int bw, available_bw = 40000;
543
544         while (sw && sw != in->sw) {
545                 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
546                 /* Leave 10% guard band */
547                 bw -= bw / 10;
548
549                 /*
550                  * Check for any active DP tunnels that go through this
551                  * switch and reduce their consumed bandwidth from
552                  * available.
553                  */
554                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
555                         int consumed_bw;
556
557                         if (!tb_tunnel_switch_on_path(tunnel, sw))
558                                 continue;
559
560                         consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
561                         if (consumed_bw < 0)
562                                 return consumed_bw;
563
564                         bw -= consumed_bw;
565                 }
566
567                 if (bw < available_bw)
568                         available_bw = bw;
569
570                 sw = tb_switch_parent(sw);
571         }
572
573         return available_bw;
574 }
575
576 static void tb_tunnel_dp(struct tb *tb)
577 {
578         struct tb_cm *tcm = tb_priv(tb);
579         struct tb_port *port, *in, *out;
580         struct tb_tunnel *tunnel;
581         int available_bw;
582
583         /*
584          * Find pair of inactive DP IN and DP OUT adapters and then
585          * establish a DP tunnel between them.
586          */
587         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
588
589         in = NULL;
590         out = NULL;
591         list_for_each_entry(port, &tcm->dp_resources, list) {
592                 if (tb_port_is_enabled(port)) {
593                         tb_port_dbg(port, "in use\n");
594                         continue;
595                 }
596
597                 tb_port_dbg(port, "available\n");
598
599                 if (!in && tb_port_is_dpin(port))
600                         in = port;
601                 else if (!out && tb_port_is_dpout(port))
602                         out = port;
603         }
604
605         if (!in) {
606                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
607                 return;
608         }
609         if (!out) {
610                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
611                 return;
612         }
613
614         if (tb_switch_alloc_dp_resource(in->sw, in)) {
615                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
616                 return;
617         }
618
619         /* Calculate available bandwidth between in and out */
620         available_bw = tb_available_bw(tcm, in, out);
621         if (available_bw < 0) {
622                 tb_warn(tb, "failed to determine available bandwidth\n");
623                 return;
624         }
625
626         tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
627                available_bw);
628
629         tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
630         if (!tunnel) {
631                 tb_port_dbg(out, "could not allocate DP tunnel\n");
632                 goto dealloc_dp;
633         }
634
635         if (tb_tunnel_activate(tunnel)) {
636                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
637                 tb_tunnel_free(tunnel);
638                 goto dealloc_dp;
639         }
640
641         list_add_tail(&tunnel->list, &tcm->tunnel_list);
642         return;
643
644 dealloc_dp:
645         tb_switch_dealloc_dp_resource(in->sw, in);
646 }
647
648 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
649 {
650         struct tb_port *in, *out;
651         struct tb_tunnel *tunnel;
652
653         if (tb_port_is_dpin(port)) {
654                 tb_port_dbg(port, "DP IN resource unavailable\n");
655                 in = port;
656                 out = NULL;
657         } else {
658                 tb_port_dbg(port, "DP OUT resource unavailable\n");
659                 in = NULL;
660                 out = port;
661         }
662
663         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
664         tb_deactivate_and_free_tunnel(tunnel);
665         list_del_init(&port->list);
666
667         /*
668          * See if there is another DP OUT port that can be used for
669          * to create another tunnel.
670          */
671         tb_tunnel_dp(tb);
672 }
673
674 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
675 {
676         struct tb_cm *tcm = tb_priv(tb);
677         struct tb_port *p;
678
679         if (tb_port_is_enabled(port))
680                 return;
681
682         list_for_each_entry(p, &tcm->dp_resources, list) {
683                 if (p == port)
684                         return;
685         }
686
687         tb_port_dbg(port, "DP %s resource available\n",
688                     tb_port_is_dpin(port) ? "IN" : "OUT");
689         list_add_tail(&port->list, &tcm->dp_resources);
690
691         /* Look for suitable DP IN <-> DP OUT pairs now */
692         tb_tunnel_dp(tb);
693 }
694
695 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
696 {
697         struct tb_port *up, *down, *port;
698         struct tb_cm *tcm = tb_priv(tb);
699         struct tb_switch *parent_sw;
700         struct tb_tunnel *tunnel;
701
702         up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
703         if (!up)
704                 return 0;
705
706         /*
707          * Look up available down port. Since we are chaining it should
708          * be found right above this switch.
709          */
710         parent_sw = tb_to_switch(sw->dev.parent);
711         port = tb_port_at(tb_route(sw), parent_sw);
712         down = tb_find_pcie_down(parent_sw, port);
713         if (!down)
714                 return 0;
715
716         tunnel = tb_tunnel_alloc_pci(tb, up, down);
717         if (!tunnel)
718                 return -ENOMEM;
719
720         if (tb_tunnel_activate(tunnel)) {
721                 tb_port_info(up,
722                              "PCIe tunnel activation failed, aborting\n");
723                 tb_tunnel_free(tunnel);
724                 return -EIO;
725         }
726
727         list_add_tail(&tunnel->list, &tcm->tunnel_list);
728         return 0;
729 }
730
731 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
732 {
733         struct tb_cm *tcm = tb_priv(tb);
734         struct tb_port *nhi_port, *dst_port;
735         struct tb_tunnel *tunnel;
736         struct tb_switch *sw;
737
738         sw = tb_to_switch(xd->dev.parent);
739         dst_port = tb_port_at(xd->route, sw);
740         nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
741
742         mutex_lock(&tb->lock);
743         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
744                                      xd->transmit_path, xd->receive_ring,
745                                      xd->receive_path);
746         if (!tunnel) {
747                 mutex_unlock(&tb->lock);
748                 return -ENOMEM;
749         }
750
751         if (tb_tunnel_activate(tunnel)) {
752                 tb_port_info(nhi_port,
753                              "DMA tunnel activation failed, aborting\n");
754                 tb_tunnel_free(tunnel);
755                 mutex_unlock(&tb->lock);
756                 return -EIO;
757         }
758
759         list_add_tail(&tunnel->list, &tcm->tunnel_list);
760         mutex_unlock(&tb->lock);
761         return 0;
762 }
763
764 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
765 {
766         struct tb_port *dst_port;
767         struct tb_tunnel *tunnel;
768         struct tb_switch *sw;
769
770         sw = tb_to_switch(xd->dev.parent);
771         dst_port = tb_port_at(xd->route, sw);
772
773         /*
774          * It is possible that the tunnel was already teared down (in
775          * case of cable disconnect) so it is fine if we cannot find it
776          * here anymore.
777          */
778         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
779         tb_deactivate_and_free_tunnel(tunnel);
780 }
781
782 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
783 {
784         if (!xd->is_unplugged) {
785                 mutex_lock(&tb->lock);
786                 __tb_disconnect_xdomain_paths(tb, xd);
787                 mutex_unlock(&tb->lock);
788         }
789         return 0;
790 }
791
792 /* hotplug handling */
793
794 /**
795  * tb_handle_hotplug() - handle hotplug event
796  *
797  * Executes on tb->wq.
798  */
799 static void tb_handle_hotplug(struct work_struct *work)
800 {
801         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
802         struct tb *tb = ev->tb;
803         struct tb_cm *tcm = tb_priv(tb);
804         struct tb_switch *sw;
805         struct tb_port *port;
806         mutex_lock(&tb->lock);
807         if (!tcm->hotplug_active)
808                 goto out; /* during init, suspend or shutdown */
809
810         sw = tb_switch_find_by_route(tb, ev->route);
811         if (!sw) {
812                 tb_warn(tb,
813                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
814                         ev->route, ev->port, ev->unplug);
815                 goto out;
816         }
817         if (ev->port > sw->config.max_port_number) {
818                 tb_warn(tb,
819                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
820                         ev->route, ev->port, ev->unplug);
821                 goto put_sw;
822         }
823         port = &sw->ports[ev->port];
824         if (tb_is_upstream_port(port)) {
825                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
826                        ev->route, ev->port, ev->unplug);
827                 goto put_sw;
828         }
829         if (ev->unplug) {
830                 if (tb_port_has_remote(port)) {
831                         tb_port_dbg(port, "switch unplugged\n");
832                         tb_sw_set_unplugged(port->remote->sw);
833                         tb_free_invalid_tunnels(tb);
834                         tb_remove_dp_resources(port->remote->sw);
835                         tb_switch_tmu_disable(port->remote->sw);
836                         tb_switch_lane_bonding_disable(port->remote->sw);
837                         tb_switch_remove(port->remote->sw);
838                         port->remote = NULL;
839                         if (port->dual_link_port)
840                                 port->dual_link_port->remote = NULL;
841                         /* Maybe we can create another DP tunnel */
842                         tb_tunnel_dp(tb);
843                 } else if (port->xdomain) {
844                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
845
846                         tb_port_dbg(port, "xdomain unplugged\n");
847                         /*
848                          * Service drivers are unbound during
849                          * tb_xdomain_remove() so setting XDomain as
850                          * unplugged here prevents deadlock if they call
851                          * tb_xdomain_disable_paths(). We will tear down
852                          * the path below.
853                          */
854                         xd->is_unplugged = true;
855                         tb_xdomain_remove(xd);
856                         port->xdomain = NULL;
857                         __tb_disconnect_xdomain_paths(tb, xd);
858                         tb_xdomain_put(xd);
859                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
860                         tb_dp_resource_unavailable(tb, port);
861                 } else {
862                         tb_port_dbg(port,
863                                    "got unplug event for disconnected port, ignoring\n");
864                 }
865         } else if (port->remote) {
866                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
867         } else {
868                 if (tb_port_is_null(port)) {
869                         tb_port_dbg(port, "hotplug: scanning\n");
870                         tb_scan_port(port);
871                         if (!port->remote)
872                                 tb_port_dbg(port, "hotplug: no switch found\n");
873                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
874                         tb_dp_resource_available(tb, port);
875                 }
876         }
877
878 put_sw:
879         tb_switch_put(sw);
880 out:
881         mutex_unlock(&tb->lock);
882         kfree(ev);
883 }
884
885 /**
886  * tb_schedule_hotplug_handler() - callback function for the control channel
887  *
888  * Delegates to tb_handle_hotplug.
889  */
890 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
891                             const void *buf, size_t size)
892 {
893         const struct cfg_event_pkg *pkg = buf;
894         u64 route;
895
896         if (type != TB_CFG_PKG_EVENT) {
897                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
898                 return;
899         }
900
901         route = tb_cfg_get_route(&pkg->header);
902
903         if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
904                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
905                         pkg->port);
906         }
907
908         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
909 }
910
911 static void tb_stop(struct tb *tb)
912 {
913         struct tb_cm *tcm = tb_priv(tb);
914         struct tb_tunnel *tunnel;
915         struct tb_tunnel *n;
916
917         /* tunnels are only present after everything has been initialized */
918         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
919                 /*
920                  * DMA tunnels require the driver to be functional so we
921                  * tear them down. Other protocol tunnels can be left
922                  * intact.
923                  */
924                 if (tb_tunnel_is_dma(tunnel))
925                         tb_tunnel_deactivate(tunnel);
926                 tb_tunnel_free(tunnel);
927         }
928         tb_switch_remove(tb->root_switch);
929         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
930 }
931
932 static int tb_scan_finalize_switch(struct device *dev, void *data)
933 {
934         if (tb_is_switch(dev)) {
935                 struct tb_switch *sw = tb_to_switch(dev);
936
937                 /*
938                  * If we found that the switch was already setup by the
939                  * boot firmware, mark it as authorized now before we
940                  * send uevent to userspace.
941                  */
942                 if (sw->boot)
943                         sw->authorized = 1;
944
945                 dev_set_uevent_suppress(dev, false);
946                 kobject_uevent(&dev->kobj, KOBJ_ADD);
947                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
948         }
949
950         return 0;
951 }
952
953 static int tb_start(struct tb *tb)
954 {
955         struct tb_cm *tcm = tb_priv(tb);
956         int ret;
957
958         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
959         if (IS_ERR(tb->root_switch))
960                 return PTR_ERR(tb->root_switch);
961
962         /*
963          * ICM firmware upgrade needs running firmware and in native
964          * mode that is not available so disable firmware upgrade of the
965          * root switch.
966          */
967         tb->root_switch->no_nvm_upgrade = true;
968
969         ret = tb_switch_configure(tb->root_switch);
970         if (ret) {
971                 tb_switch_put(tb->root_switch);
972                 return ret;
973         }
974
975         /* Announce the switch to the world */
976         ret = tb_switch_add(tb->root_switch);
977         if (ret) {
978                 tb_switch_put(tb->root_switch);
979                 return ret;
980         }
981
982         /* Enable TMU if it is off */
983         tb_switch_tmu_enable(tb->root_switch);
984         /* Full scan to discover devices added before the driver was loaded. */
985         tb_scan_switch(tb->root_switch);
986         /* Find out tunnels created by the boot firmware */
987         tb_discover_tunnels(tb->root_switch);
988         /*
989          * If the boot firmware did not create USB 3.x tunnels create them
990          * now for the whole topology.
991          */
992         tb_create_usb3_tunnels(tb->root_switch);
993         /* Add DP IN resources for the root switch */
994         tb_add_dp_resources(tb->root_switch);
995         /* Make the discovered switches available to the userspace */
996         device_for_each_child(&tb->root_switch->dev, NULL,
997                               tb_scan_finalize_switch);
998
999         /* Allow tb_handle_hotplug to progress events */
1000         tcm->hotplug_active = true;
1001         return 0;
1002 }
1003
1004 static int tb_suspend_noirq(struct tb *tb)
1005 {
1006         struct tb_cm *tcm = tb_priv(tb);
1007
1008         tb_dbg(tb, "suspending...\n");
1009         tb_switch_suspend(tb->root_switch);
1010         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
1011         tb_dbg(tb, "suspend finished\n");
1012
1013         return 0;
1014 }
1015
1016 static void tb_restore_children(struct tb_switch *sw)
1017 {
1018         struct tb_port *port;
1019
1020         if (tb_enable_tmu(sw))
1021                 tb_sw_warn(sw, "failed to restore TMU configuration\n");
1022
1023         tb_switch_for_each_port(sw, port) {
1024                 if (!tb_port_has_remote(port))
1025                         continue;
1026
1027                 if (tb_switch_lane_bonding_enable(port->remote->sw))
1028                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
1029
1030                 tb_restore_children(port->remote->sw);
1031         }
1032 }
1033
1034 static int tb_resume_noirq(struct tb *tb)
1035 {
1036         struct tb_cm *tcm = tb_priv(tb);
1037         struct tb_tunnel *tunnel, *n;
1038
1039         tb_dbg(tb, "resuming...\n");
1040
1041         /* remove any pci devices the firmware might have setup */
1042         tb_switch_reset(tb, 0);
1043
1044         tb_switch_resume(tb->root_switch);
1045         tb_free_invalid_tunnels(tb);
1046         tb_free_unplugged_children(tb->root_switch);
1047         tb_restore_children(tb->root_switch);
1048         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
1049                 tb_tunnel_restart(tunnel);
1050         if (!list_empty(&tcm->tunnel_list)) {
1051                 /*
1052                  * the pcie links need some time to get going.
1053                  * 100ms works for me...
1054                  */
1055                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
1056                 msleep(100);
1057         }
1058          /* Allow tb_handle_hotplug to progress events */
1059         tcm->hotplug_active = true;
1060         tb_dbg(tb, "resume finished\n");
1061
1062         return 0;
1063 }
1064
1065 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
1066 {
1067         struct tb_port *port;
1068         int ret = 0;
1069
1070         tb_switch_for_each_port(sw, port) {
1071                 if (tb_is_upstream_port(port))
1072                         continue;
1073                 if (port->xdomain && port->xdomain->is_unplugged) {
1074                         tb_xdomain_remove(port->xdomain);
1075                         port->xdomain = NULL;
1076                         ret++;
1077                 } else if (port->remote) {
1078                         ret += tb_free_unplugged_xdomains(port->remote->sw);
1079                 }
1080         }
1081
1082         return ret;
1083 }
1084
1085 static void tb_complete(struct tb *tb)
1086 {
1087         /*
1088          * Release any unplugged XDomains and if there is a case where
1089          * another domain is swapped in place of unplugged XDomain we
1090          * need to run another rescan.
1091          */
1092         mutex_lock(&tb->lock);
1093         if (tb_free_unplugged_xdomains(tb->root_switch))
1094                 tb_scan_switch(tb->root_switch);
1095         mutex_unlock(&tb->lock);
1096 }
1097
1098 static const struct tb_cm_ops tb_cm_ops = {
1099         .start = tb_start,
1100         .stop = tb_stop,
1101         .suspend_noirq = tb_suspend_noirq,
1102         .resume_noirq = tb_resume_noirq,
1103         .complete = tb_complete,
1104         .handle_event = tb_handle_event,
1105         .approve_switch = tb_tunnel_pci,
1106         .approve_xdomain_paths = tb_approve_xdomain_paths,
1107         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
1108 };
1109
1110 struct tb *tb_probe(struct tb_nhi *nhi)
1111 {
1112         struct tb_cm *tcm;
1113         struct tb *tb;
1114
1115         tb = tb_domain_alloc(nhi, sizeof(*tcm));
1116         if (!tb)
1117                 return NULL;
1118
1119         tb->security_level = TB_SECURITY_USER;
1120         tb->cm_ops = &tb_cm_ops;
1121
1122         tcm = tb_priv(tb);
1123         INIT_LIST_HEAD(&tcm->tunnel_list);
1124         INIT_LIST_HEAD(&tcm->dp_resources);
1125
1126         return tb;
1127 }