Merge tag 'drm-fixes-5.5-2019-12-12' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / drivers / thunderbolt / tb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - bus logic (NHI independent)
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12
13 #include "tb.h"
14 #include "tb_regs.h"
15 #include "tunnel.h"
16
17 /**
18  * struct tb_cm - Simple Thunderbolt connection manager
19  * @tunnel_list: List of active tunnels
20  * @dp_resources: List of available DP resources for DP tunneling
21  * @hotplug_active: tb_handle_hotplug will stop progressing plug
22  *                  events and exit if this is not set (it needs to
23  *                  acquire the lock one more time). Used to drain wq
24  *                  after cfg has been paused.
25  */
26 struct tb_cm {
27         struct list_head tunnel_list;
28         struct list_head dp_resources;
29         bool hotplug_active;
30 };
31
32 struct tb_hotplug_event {
33         struct work_struct work;
34         struct tb *tb;
35         u64 route;
36         u8 port;
37         bool unplug;
38 };
39
40 static void tb_handle_hotplug(struct work_struct *work);
41
42 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
43 {
44         struct tb_hotplug_event *ev;
45
46         ev = kmalloc(sizeof(*ev), GFP_KERNEL);
47         if (!ev)
48                 return;
49
50         ev->tb = tb;
51         ev->route = route;
52         ev->port = port;
53         ev->unplug = unplug;
54         INIT_WORK(&ev->work, tb_handle_hotplug);
55         queue_work(tb->wq, &ev->work);
56 }
57
58 /* enumeration & hot plug handling */
59
60 static void tb_add_dp_resources(struct tb_switch *sw)
61 {
62         struct tb_cm *tcm = tb_priv(sw->tb);
63         struct tb_port *port;
64
65         tb_switch_for_each_port(sw, port) {
66                 if (!tb_port_is_dpin(port))
67                         continue;
68
69                 if (!tb_switch_query_dp_resource(sw, port))
70                         continue;
71
72                 list_add_tail(&port->list, &tcm->dp_resources);
73                 tb_port_dbg(port, "DP IN resource available\n");
74         }
75 }
76
77 static void tb_remove_dp_resources(struct tb_switch *sw)
78 {
79         struct tb_cm *tcm = tb_priv(sw->tb);
80         struct tb_port *port, *tmp;
81
82         /* Clear children resources first */
83         tb_switch_for_each_port(sw, port) {
84                 if (tb_port_has_remote(port))
85                         tb_remove_dp_resources(port->remote->sw);
86         }
87
88         list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
89                 if (port->sw == sw) {
90                         tb_port_dbg(port, "DP OUT resource unavailable\n");
91                         list_del_init(&port->list);
92                 }
93         }
94 }
95
96 static void tb_discover_tunnels(struct tb_switch *sw)
97 {
98         struct tb *tb = sw->tb;
99         struct tb_cm *tcm = tb_priv(tb);
100         struct tb_port *port;
101
102         tb_switch_for_each_port(sw, port) {
103                 struct tb_tunnel *tunnel = NULL;
104
105                 switch (port->config.type) {
106                 case TB_TYPE_DP_HDMI_IN:
107                         tunnel = tb_tunnel_discover_dp(tb, port);
108                         break;
109
110                 case TB_TYPE_PCIE_DOWN:
111                         tunnel = tb_tunnel_discover_pci(tb, port);
112                         break;
113
114                 default:
115                         break;
116                 }
117
118                 if (!tunnel)
119                         continue;
120
121                 if (tb_tunnel_is_pci(tunnel)) {
122                         struct tb_switch *parent = tunnel->dst_port->sw;
123
124                         while (parent != tunnel->src_port->sw) {
125                                 parent->boot = true;
126                                 parent = tb_switch_parent(parent);
127                         }
128                 }
129
130                 list_add_tail(&tunnel->list, &tcm->tunnel_list);
131         }
132
133         tb_switch_for_each_port(sw, port) {
134                 if (tb_port_has_remote(port))
135                         tb_discover_tunnels(port->remote->sw);
136         }
137 }
138
139 static void tb_scan_xdomain(struct tb_port *port)
140 {
141         struct tb_switch *sw = port->sw;
142         struct tb *tb = sw->tb;
143         struct tb_xdomain *xd;
144         u64 route;
145
146         route = tb_downstream_route(port);
147         xd = tb_xdomain_find_by_route(tb, route);
148         if (xd) {
149                 tb_xdomain_put(xd);
150                 return;
151         }
152
153         xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
154                               NULL);
155         if (xd) {
156                 tb_port_at(route, sw)->xdomain = xd;
157                 tb_xdomain_add(xd);
158         }
159 }
160
161 static void tb_scan_port(struct tb_port *port);
162
163 /**
164  * tb_scan_switch() - scan for and initialize downstream switches
165  */
166 static void tb_scan_switch(struct tb_switch *sw)
167 {
168         struct tb_port *port;
169
170         tb_switch_for_each_port(sw, port)
171                 tb_scan_port(port);
172 }
173
174 /**
175  * tb_scan_port() - check for and initialize switches below port
176  */
177 static void tb_scan_port(struct tb_port *port)
178 {
179         struct tb_cm *tcm = tb_priv(port->sw->tb);
180         struct tb_port *upstream_port;
181         struct tb_switch *sw;
182
183         if (tb_is_upstream_port(port))
184                 return;
185
186         if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
187             !tb_dp_port_is_enabled(port)) {
188                 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
189                 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
190                                  false);
191                 return;
192         }
193
194         if (port->config.type != TB_TYPE_PORT)
195                 return;
196         if (port->dual_link_port && port->link_nr)
197                 return; /*
198                          * Downstream switch is reachable through two ports.
199                          * Only scan on the primary port (link_nr == 0).
200                          */
201         if (tb_wait_for_port(port, false) <= 0)
202                 return;
203         if (port->remote) {
204                 tb_port_dbg(port, "port already has a remote\n");
205                 return;
206         }
207         sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
208                              tb_downstream_route(port));
209         if (IS_ERR(sw)) {
210                 /*
211                  * If there is an error accessing the connected switch
212                  * it may be connected to another domain. Also we allow
213                  * the other domain to be connected to a max depth switch.
214                  */
215                 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
216                         tb_scan_xdomain(port);
217                 return;
218         }
219
220         if (tb_switch_configure(sw)) {
221                 tb_switch_put(sw);
222                 return;
223         }
224
225         /*
226          * If there was previously another domain connected remove it
227          * first.
228          */
229         if (port->xdomain) {
230                 tb_xdomain_remove(port->xdomain);
231                 port->xdomain = NULL;
232         }
233
234         /*
235          * Do not send uevents until we have discovered all existing
236          * tunnels and know which switches were authorized already by
237          * the boot firmware.
238          */
239         if (!tcm->hotplug_active)
240                 dev_set_uevent_suppress(&sw->dev, true);
241
242         if (tb_switch_add(sw)) {
243                 tb_switch_put(sw);
244                 return;
245         }
246
247         /* Link the switches using both links if available */
248         upstream_port = tb_upstream_port(sw);
249         port->remote = upstream_port;
250         upstream_port->remote = port;
251         if (port->dual_link_port && upstream_port->dual_link_port) {
252                 port->dual_link_port->remote = upstream_port->dual_link_port;
253                 upstream_port->dual_link_port->remote = port->dual_link_port;
254         }
255
256         /* Enable lane bonding if supported */
257         if (tb_switch_lane_bonding_enable(sw))
258                 tb_sw_warn(sw, "failed to enable lane bonding\n");
259
260         tb_scan_switch(sw);
261 }
262
263 static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
264                                         struct tb_port *src_port,
265                                         struct tb_port *dst_port)
266 {
267         struct tb_cm *tcm = tb_priv(tb);
268         struct tb_tunnel *tunnel;
269
270         list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
271                 if (tunnel->type == type &&
272                     ((src_port && src_port == tunnel->src_port) ||
273                      (dst_port && dst_port == tunnel->dst_port))) {
274                         return tunnel;
275                 }
276         }
277
278         return NULL;
279 }
280
281 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
282 {
283         if (!tunnel)
284                 return;
285
286         tb_tunnel_deactivate(tunnel);
287         list_del(&tunnel->list);
288
289         /*
290          * In case of DP tunnel make sure the DP IN resource is deallocated
291          * properly.
292          */
293         if (tb_tunnel_is_dp(tunnel)) {
294                 struct tb_port *in = tunnel->src_port;
295
296                 tb_switch_dealloc_dp_resource(in->sw, in);
297         }
298
299         tb_tunnel_free(tunnel);
300 }
301
302 /**
303  * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
304  */
305 static void tb_free_invalid_tunnels(struct tb *tb)
306 {
307         struct tb_cm *tcm = tb_priv(tb);
308         struct tb_tunnel *tunnel;
309         struct tb_tunnel *n;
310
311         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
312                 if (tb_tunnel_is_invalid(tunnel))
313                         tb_deactivate_and_free_tunnel(tunnel);
314         }
315 }
316
317 /**
318  * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
319  */
320 static void tb_free_unplugged_children(struct tb_switch *sw)
321 {
322         struct tb_port *port;
323
324         tb_switch_for_each_port(sw, port) {
325                 if (!tb_port_has_remote(port))
326                         continue;
327
328                 if (port->remote->sw->is_unplugged) {
329                         tb_remove_dp_resources(port->remote->sw);
330                         tb_switch_lane_bonding_disable(port->remote->sw);
331                         tb_switch_remove(port->remote->sw);
332                         port->remote = NULL;
333                         if (port->dual_link_port)
334                                 port->dual_link_port->remote = NULL;
335                 } else {
336                         tb_free_unplugged_children(port->remote->sw);
337                 }
338         }
339 }
340
341 /**
342  * tb_find_port() - return the first port of @type on @sw or NULL
343  * @sw: Switch to find the port from
344  * @type: Port type to look for
345  */
346 static struct tb_port *tb_find_port(struct tb_switch *sw,
347                                     enum tb_port_type type)
348 {
349         struct tb_port *port;
350
351         tb_switch_for_each_port(sw, port) {
352                 if (port->config.type == type)
353                         return port;
354         }
355
356         return NULL;
357 }
358
359 /**
360  * tb_find_unused_port() - return the first inactive port on @sw
361  * @sw: Switch to find the port on
362  * @type: Port type to look for
363  */
364 static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
365                                            enum tb_port_type type)
366 {
367         struct tb_port *port;
368
369         tb_switch_for_each_port(sw, port) {
370                 if (tb_is_upstream_port(port))
371                         continue;
372                 if (port->config.type != type)
373                         continue;
374                 if (port->cap_adap)
375                         continue;
376                 if (tb_port_is_enabled(port))
377                         continue;
378                 return port;
379         }
380         return NULL;
381 }
382
383 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
384                                          const struct tb_port *port)
385 {
386         /*
387          * To keep plugging devices consistently in the same PCIe
388          * hierarchy, do mapping here for root switch downstream PCIe
389          * ports.
390          */
391         if (!tb_route(sw)) {
392                 int phy_port = tb_phy_port_from_link(port->port);
393                 int index;
394
395                 /*
396                  * Hard-coded Thunderbolt port to PCIe down port mapping
397                  * per controller.
398                  */
399                 if (tb_switch_is_cactus_ridge(sw) ||
400                     tb_switch_is_alpine_ridge(sw))
401                         index = !phy_port ? 6 : 7;
402                 else if (tb_switch_is_falcon_ridge(sw))
403                         index = !phy_port ? 6 : 8;
404                 else if (tb_switch_is_titan_ridge(sw))
405                         index = !phy_port ? 8 : 9;
406                 else
407                         goto out;
408
409                 /* Validate the hard-coding */
410                 if (WARN_ON(index > sw->config.max_port_number))
411                         goto out;
412                 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
413                         goto out;
414                 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
415                         goto out;
416
417                 return &sw->ports[index];
418         }
419
420 out:
421         return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
422 }
423
424 static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
425                            struct tb_port *out)
426 {
427         struct tb_switch *sw = out->sw;
428         struct tb_tunnel *tunnel;
429         int bw, available_bw = 40000;
430
431         while (sw && sw != in->sw) {
432                 bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
433                 /* Leave 10% guard band */
434                 bw -= bw / 10;
435
436                 /*
437                  * Check for any active DP tunnels that go through this
438                  * switch and reduce their consumed bandwidth from
439                  * available.
440                  */
441                 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
442                         int consumed_bw;
443
444                         if (!tb_tunnel_switch_on_path(tunnel, sw))
445                                 continue;
446
447                         consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
448                         if (consumed_bw < 0)
449                                 return consumed_bw;
450
451                         bw -= consumed_bw;
452                 }
453
454                 if (bw < available_bw)
455                         available_bw = bw;
456
457                 sw = tb_switch_parent(sw);
458         }
459
460         return available_bw;
461 }
462
463 static void tb_tunnel_dp(struct tb *tb)
464 {
465         struct tb_cm *tcm = tb_priv(tb);
466         struct tb_port *port, *in, *out;
467         struct tb_tunnel *tunnel;
468         int available_bw;
469
470         /*
471          * Find pair of inactive DP IN and DP OUT adapters and then
472          * establish a DP tunnel between them.
473          */
474         tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
475
476         in = NULL;
477         out = NULL;
478         list_for_each_entry(port, &tcm->dp_resources, list) {
479                 if (tb_port_is_enabled(port)) {
480                         tb_port_dbg(port, "in use\n");
481                         continue;
482                 }
483
484                 tb_port_dbg(port, "available\n");
485
486                 if (!in && tb_port_is_dpin(port))
487                         in = port;
488                 else if (!out && tb_port_is_dpout(port))
489                         out = port;
490         }
491
492         if (!in) {
493                 tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
494                 return;
495         }
496         if (!out) {
497                 tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
498                 return;
499         }
500
501         if (tb_switch_alloc_dp_resource(in->sw, in)) {
502                 tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
503                 return;
504         }
505
506         /* Calculate available bandwidth between in and out */
507         available_bw = tb_available_bw(tcm, in, out);
508         if (available_bw < 0) {
509                 tb_warn(tb, "failed to determine available bandwidth\n");
510                 return;
511         }
512
513         tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
514                available_bw);
515
516         tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
517         if (!tunnel) {
518                 tb_port_dbg(out, "could not allocate DP tunnel\n");
519                 goto dealloc_dp;
520         }
521
522         if (tb_tunnel_activate(tunnel)) {
523                 tb_port_info(out, "DP tunnel activation failed, aborting\n");
524                 tb_tunnel_free(tunnel);
525                 goto dealloc_dp;
526         }
527
528         list_add_tail(&tunnel->list, &tcm->tunnel_list);
529         return;
530
531 dealloc_dp:
532         tb_switch_dealloc_dp_resource(in->sw, in);
533 }
534
535 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
536 {
537         struct tb_port *in, *out;
538         struct tb_tunnel *tunnel;
539
540         if (tb_port_is_dpin(port)) {
541                 tb_port_dbg(port, "DP IN resource unavailable\n");
542                 in = port;
543                 out = NULL;
544         } else {
545                 tb_port_dbg(port, "DP OUT resource unavailable\n");
546                 in = NULL;
547                 out = port;
548         }
549
550         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
551         tb_deactivate_and_free_tunnel(tunnel);
552         list_del_init(&port->list);
553
554         /*
555          * See if there is another DP OUT port that can be used for
556          * to create another tunnel.
557          */
558         tb_tunnel_dp(tb);
559 }
560
561 static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
562 {
563         struct tb_cm *tcm = tb_priv(tb);
564         struct tb_port *p;
565
566         if (tb_port_is_enabled(port))
567                 return;
568
569         list_for_each_entry(p, &tcm->dp_resources, list) {
570                 if (p == port)
571                         return;
572         }
573
574         tb_port_dbg(port, "DP %s resource available\n",
575                     tb_port_is_dpin(port) ? "IN" : "OUT");
576         list_add_tail(&port->list, &tcm->dp_resources);
577
578         /* Look for suitable DP IN <-> DP OUT pairs now */
579         tb_tunnel_dp(tb);
580 }
581
582 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
583 {
584         struct tb_port *up, *down, *port;
585         struct tb_cm *tcm = tb_priv(tb);
586         struct tb_switch *parent_sw;
587         struct tb_tunnel *tunnel;
588
589         up = tb_find_port(sw, TB_TYPE_PCIE_UP);
590         if (!up)
591                 return 0;
592
593         /*
594          * Look up available down port. Since we are chaining it should
595          * be found right above this switch.
596          */
597         parent_sw = tb_to_switch(sw->dev.parent);
598         port = tb_port_at(tb_route(sw), parent_sw);
599         down = tb_find_pcie_down(parent_sw, port);
600         if (!down)
601                 return 0;
602
603         tunnel = tb_tunnel_alloc_pci(tb, up, down);
604         if (!tunnel)
605                 return -ENOMEM;
606
607         if (tb_tunnel_activate(tunnel)) {
608                 tb_port_info(up,
609                              "PCIe tunnel activation failed, aborting\n");
610                 tb_tunnel_free(tunnel);
611                 return -EIO;
612         }
613
614         list_add_tail(&tunnel->list, &tcm->tunnel_list);
615         return 0;
616 }
617
618 static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
619 {
620         struct tb_cm *tcm = tb_priv(tb);
621         struct tb_port *nhi_port, *dst_port;
622         struct tb_tunnel *tunnel;
623         struct tb_switch *sw;
624
625         sw = tb_to_switch(xd->dev.parent);
626         dst_port = tb_port_at(xd->route, sw);
627         nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
628
629         mutex_lock(&tb->lock);
630         tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
631                                      xd->transmit_path, xd->receive_ring,
632                                      xd->receive_path);
633         if (!tunnel) {
634                 mutex_unlock(&tb->lock);
635                 return -ENOMEM;
636         }
637
638         if (tb_tunnel_activate(tunnel)) {
639                 tb_port_info(nhi_port,
640                              "DMA tunnel activation failed, aborting\n");
641                 tb_tunnel_free(tunnel);
642                 mutex_unlock(&tb->lock);
643                 return -EIO;
644         }
645
646         list_add_tail(&tunnel->list, &tcm->tunnel_list);
647         mutex_unlock(&tb->lock);
648         return 0;
649 }
650
651 static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
652 {
653         struct tb_port *dst_port;
654         struct tb_tunnel *tunnel;
655         struct tb_switch *sw;
656
657         sw = tb_to_switch(xd->dev.parent);
658         dst_port = tb_port_at(xd->route, sw);
659
660         /*
661          * It is possible that the tunnel was already teared down (in
662          * case of cable disconnect) so it is fine if we cannot find it
663          * here anymore.
664          */
665         tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
666         tb_deactivate_and_free_tunnel(tunnel);
667 }
668
669 static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
670 {
671         if (!xd->is_unplugged) {
672                 mutex_lock(&tb->lock);
673                 __tb_disconnect_xdomain_paths(tb, xd);
674                 mutex_unlock(&tb->lock);
675         }
676         return 0;
677 }
678
679 /* hotplug handling */
680
681 /**
682  * tb_handle_hotplug() - handle hotplug event
683  *
684  * Executes on tb->wq.
685  */
686 static void tb_handle_hotplug(struct work_struct *work)
687 {
688         struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
689         struct tb *tb = ev->tb;
690         struct tb_cm *tcm = tb_priv(tb);
691         struct tb_switch *sw;
692         struct tb_port *port;
693         mutex_lock(&tb->lock);
694         if (!tcm->hotplug_active)
695                 goto out; /* during init, suspend or shutdown */
696
697         sw = tb_switch_find_by_route(tb, ev->route);
698         if (!sw) {
699                 tb_warn(tb,
700                         "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
701                         ev->route, ev->port, ev->unplug);
702                 goto out;
703         }
704         if (ev->port > sw->config.max_port_number) {
705                 tb_warn(tb,
706                         "hotplug event from non existent port %llx:%x (unplug: %d)\n",
707                         ev->route, ev->port, ev->unplug);
708                 goto put_sw;
709         }
710         port = &sw->ports[ev->port];
711         if (tb_is_upstream_port(port)) {
712                 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
713                        ev->route, ev->port, ev->unplug);
714                 goto put_sw;
715         }
716         if (ev->unplug) {
717                 if (tb_port_has_remote(port)) {
718                         tb_port_dbg(port, "switch unplugged\n");
719                         tb_sw_set_unplugged(port->remote->sw);
720                         tb_free_invalid_tunnels(tb);
721                         tb_remove_dp_resources(port->remote->sw);
722                         tb_switch_lane_bonding_disable(port->remote->sw);
723                         tb_switch_remove(port->remote->sw);
724                         port->remote = NULL;
725                         if (port->dual_link_port)
726                                 port->dual_link_port->remote = NULL;
727                         /* Maybe we can create another DP tunnel */
728                         tb_tunnel_dp(tb);
729                 } else if (port->xdomain) {
730                         struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
731
732                         tb_port_dbg(port, "xdomain unplugged\n");
733                         /*
734                          * Service drivers are unbound during
735                          * tb_xdomain_remove() so setting XDomain as
736                          * unplugged here prevents deadlock if they call
737                          * tb_xdomain_disable_paths(). We will tear down
738                          * the path below.
739                          */
740                         xd->is_unplugged = true;
741                         tb_xdomain_remove(xd);
742                         port->xdomain = NULL;
743                         __tb_disconnect_xdomain_paths(tb, xd);
744                         tb_xdomain_put(xd);
745                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
746                         tb_dp_resource_unavailable(tb, port);
747                 } else {
748                         tb_port_dbg(port,
749                                    "got unplug event for disconnected port, ignoring\n");
750                 }
751         } else if (port->remote) {
752                 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
753         } else {
754                 if (tb_port_is_null(port)) {
755                         tb_port_dbg(port, "hotplug: scanning\n");
756                         tb_scan_port(port);
757                         if (!port->remote)
758                                 tb_port_dbg(port, "hotplug: no switch found\n");
759                 } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
760                         tb_dp_resource_available(tb, port);
761                 }
762         }
763
764 put_sw:
765         tb_switch_put(sw);
766 out:
767         mutex_unlock(&tb->lock);
768         kfree(ev);
769 }
770
771 /**
772  * tb_schedule_hotplug_handler() - callback function for the control channel
773  *
774  * Delegates to tb_handle_hotplug.
775  */
776 static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
777                             const void *buf, size_t size)
778 {
779         const struct cfg_event_pkg *pkg = buf;
780         u64 route;
781
782         if (type != TB_CFG_PKG_EVENT) {
783                 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
784                 return;
785         }
786
787         route = tb_cfg_get_route(&pkg->header);
788
789         if (tb_cfg_error(tb->ctl, route, pkg->port,
790                          TB_CFG_ERROR_ACK_PLUG_EVENT)) {
791                 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
792                         pkg->port);
793         }
794
795         tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
796 }
797
798 static void tb_stop(struct tb *tb)
799 {
800         struct tb_cm *tcm = tb_priv(tb);
801         struct tb_tunnel *tunnel;
802         struct tb_tunnel *n;
803
804         /* tunnels are only present after everything has been initialized */
805         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
806                 /*
807                  * DMA tunnels require the driver to be functional so we
808                  * tear them down. Other protocol tunnels can be left
809                  * intact.
810                  */
811                 if (tb_tunnel_is_dma(tunnel))
812                         tb_tunnel_deactivate(tunnel);
813                 tb_tunnel_free(tunnel);
814         }
815         tb_switch_remove(tb->root_switch);
816         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
817 }
818
819 static int tb_scan_finalize_switch(struct device *dev, void *data)
820 {
821         if (tb_is_switch(dev)) {
822                 struct tb_switch *sw = tb_to_switch(dev);
823
824                 /*
825                  * If we found that the switch was already setup by the
826                  * boot firmware, mark it as authorized now before we
827                  * send uevent to userspace.
828                  */
829                 if (sw->boot)
830                         sw->authorized = 1;
831
832                 dev_set_uevent_suppress(dev, false);
833                 kobject_uevent(&dev->kobj, KOBJ_ADD);
834                 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
835         }
836
837         return 0;
838 }
839
840 static int tb_start(struct tb *tb)
841 {
842         struct tb_cm *tcm = tb_priv(tb);
843         int ret;
844
845         tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
846         if (IS_ERR(tb->root_switch))
847                 return PTR_ERR(tb->root_switch);
848
849         /*
850          * ICM firmware upgrade needs running firmware and in native
851          * mode that is not available so disable firmware upgrade of the
852          * root switch.
853          */
854         tb->root_switch->no_nvm_upgrade = true;
855
856         ret = tb_switch_configure(tb->root_switch);
857         if (ret) {
858                 tb_switch_put(tb->root_switch);
859                 return ret;
860         }
861
862         /* Announce the switch to the world */
863         ret = tb_switch_add(tb->root_switch);
864         if (ret) {
865                 tb_switch_put(tb->root_switch);
866                 return ret;
867         }
868
869         /* Full scan to discover devices added before the driver was loaded. */
870         tb_scan_switch(tb->root_switch);
871         /* Find out tunnels created by the boot firmware */
872         tb_discover_tunnels(tb->root_switch);
873         /* Add DP IN resources for the root switch */
874         tb_add_dp_resources(tb->root_switch);
875         /* Make the discovered switches available to the userspace */
876         device_for_each_child(&tb->root_switch->dev, NULL,
877                               tb_scan_finalize_switch);
878
879         /* Allow tb_handle_hotplug to progress events */
880         tcm->hotplug_active = true;
881         return 0;
882 }
883
884 static int tb_suspend_noirq(struct tb *tb)
885 {
886         struct tb_cm *tcm = tb_priv(tb);
887
888         tb_dbg(tb, "suspending...\n");
889         tb_switch_suspend(tb->root_switch);
890         tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
891         tb_dbg(tb, "suspend finished\n");
892
893         return 0;
894 }
895
896 static void tb_restore_children(struct tb_switch *sw)
897 {
898         struct tb_port *port;
899
900         tb_switch_for_each_port(sw, port) {
901                 if (!tb_port_has_remote(port))
902                         continue;
903
904                 if (tb_switch_lane_bonding_enable(port->remote->sw))
905                         dev_warn(&sw->dev, "failed to restore lane bonding\n");
906
907                 tb_restore_children(port->remote->sw);
908         }
909 }
910
911 static int tb_resume_noirq(struct tb *tb)
912 {
913         struct tb_cm *tcm = tb_priv(tb);
914         struct tb_tunnel *tunnel, *n;
915
916         tb_dbg(tb, "resuming...\n");
917
918         /* remove any pci devices the firmware might have setup */
919         tb_switch_reset(tb, 0);
920
921         tb_switch_resume(tb->root_switch);
922         tb_free_invalid_tunnels(tb);
923         tb_free_unplugged_children(tb->root_switch);
924         tb_restore_children(tb->root_switch);
925         list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
926                 tb_tunnel_restart(tunnel);
927         if (!list_empty(&tcm->tunnel_list)) {
928                 /*
929                  * the pcie links need some time to get going.
930                  * 100ms works for me...
931                  */
932                 tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
933                 msleep(100);
934         }
935          /* Allow tb_handle_hotplug to progress events */
936         tcm->hotplug_active = true;
937         tb_dbg(tb, "resume finished\n");
938
939         return 0;
940 }
941
942 static int tb_free_unplugged_xdomains(struct tb_switch *sw)
943 {
944         struct tb_port *port;
945         int ret = 0;
946
947         tb_switch_for_each_port(sw, port) {
948                 if (tb_is_upstream_port(port))
949                         continue;
950                 if (port->xdomain && port->xdomain->is_unplugged) {
951                         tb_xdomain_remove(port->xdomain);
952                         port->xdomain = NULL;
953                         ret++;
954                 } else if (port->remote) {
955                         ret += tb_free_unplugged_xdomains(port->remote->sw);
956                 }
957         }
958
959         return ret;
960 }
961
962 static void tb_complete(struct tb *tb)
963 {
964         /*
965          * Release any unplugged XDomains and if there is a case where
966          * another domain is swapped in place of unplugged XDomain we
967          * need to run another rescan.
968          */
969         mutex_lock(&tb->lock);
970         if (tb_free_unplugged_xdomains(tb->root_switch))
971                 tb_scan_switch(tb->root_switch);
972         mutex_unlock(&tb->lock);
973 }
974
975 static const struct tb_cm_ops tb_cm_ops = {
976         .start = tb_start,
977         .stop = tb_stop,
978         .suspend_noirq = tb_suspend_noirq,
979         .resume_noirq = tb_resume_noirq,
980         .complete = tb_complete,
981         .handle_event = tb_handle_event,
982         .approve_switch = tb_tunnel_pci,
983         .approve_xdomain_paths = tb_approve_xdomain_paths,
984         .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
985 };
986
987 struct tb *tb_probe(struct tb_nhi *nhi)
988 {
989         struct tb_cm *tcm;
990         struct tb *tb;
991
992         tb = tb_domain_alloc(nhi, sizeof(*tcm));
993         if (!tb)
994                 return NULL;
995
996         tb->security_level = TB_SECURITY_USER;
997         tb->cm_ops = &tb_cm_ops;
998
999         tcm = tb_priv(tb);
1000         INIT_LIST_HEAD(&tcm->tunnel_list);
1001         INIT_LIST_HEAD(&tcm->dp_resources);
1002
1003         return tb;
1004 }