Merge tag 'ovl-update-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs
[linux-2.6-microblaze.git] / drivers / thunderbolt / xdomain.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt XDomain discovery protocol support
4  *
5  * Copyright (C) 2017, Intel Corporation
6  * Authors: Michael Jamet <michael.jamet@intel.com>
7  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/utsname.h>
16 #include <linux/uuid.h>
17 #include <linux/workqueue.h>
18
19 #include "tb.h"
20
21 #define XDOMAIN_DEFAULT_TIMEOUT                 5000 /* ms */
22 #define XDOMAIN_UUID_RETRIES                    10
23 #define XDOMAIN_PROPERTIES_RETRIES              60
24 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES      10
25 #define XDOMAIN_BONDING_WAIT                    100  /* ms */
26
27 struct xdomain_request_work {
28         struct work_struct work;
29         struct tb_xdp_header *pkg;
30         struct tb *tb;
31 };
32
33 /* Serializes access to the properties and protocol handlers below */
34 static DEFINE_MUTEX(xdomain_lock);
35
36 /* Properties exposed to the remote domains */
37 static struct tb_property_dir *xdomain_property_dir;
38 static u32 *xdomain_property_block;
39 static u32 xdomain_property_block_len;
40 static u32 xdomain_property_block_gen;
41
42 /* Additional protocol handlers */
43 static LIST_HEAD(protocol_handlers);
44
45 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
46 static const uuid_t tb_xdp_uuid =
47         UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
48                   0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
49
50 static bool tb_xdomain_match(const struct tb_cfg_request *req,
51                              const struct ctl_pkg *pkg)
52 {
53         switch (pkg->frame.eof) {
54         case TB_CFG_PKG_ERROR:
55                 return true;
56
57         case TB_CFG_PKG_XDOMAIN_RESP: {
58                 const struct tb_xdp_header *res_hdr = pkg->buffer;
59                 const struct tb_xdp_header *req_hdr = req->request;
60
61                 if (pkg->frame.size < req->response_size / 4)
62                         return false;
63
64                 /* Make sure route matches */
65                 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
66                      req_hdr->xd_hdr.route_hi)
67                         return false;
68                 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
69                         return false;
70
71                 /* Check that the XDomain protocol matches */
72                 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
73                         return false;
74
75                 return true;
76         }
77
78         default:
79                 return false;
80         }
81 }
82
83 static bool tb_xdomain_copy(struct tb_cfg_request *req,
84                             const struct ctl_pkg *pkg)
85 {
86         memcpy(req->response, pkg->buffer, req->response_size);
87         req->result.err = 0;
88         return true;
89 }
90
91 static void response_ready(void *data)
92 {
93         tb_cfg_request_put(data);
94 }
95
96 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
97                                  size_t size, enum tb_cfg_pkg_type type)
98 {
99         struct tb_cfg_request *req;
100
101         req = tb_cfg_request_alloc();
102         if (!req)
103                 return -ENOMEM;
104
105         req->match = tb_xdomain_match;
106         req->copy = tb_xdomain_copy;
107         req->request = response;
108         req->request_size = size;
109         req->request_type = type;
110
111         return tb_cfg_request(ctl, req, response_ready, req);
112 }
113
114 /**
115  * tb_xdomain_response() - Send a XDomain response message
116  * @xd: XDomain to send the message
117  * @response: Response to send
118  * @size: Size of the response
119  * @type: PDF type of the response
120  *
121  * This can be used to send a XDomain response message to the other
122  * domain. No response for the message is expected.
123  *
124  * Return: %0 in case of success and negative errno in case of failure
125  */
126 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
127                         size_t size, enum tb_cfg_pkg_type type)
128 {
129         return __tb_xdomain_response(xd->tb->ctl, response, size, type);
130 }
131 EXPORT_SYMBOL_GPL(tb_xdomain_response);
132
133 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
134         size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
135         size_t response_size, enum tb_cfg_pkg_type response_type,
136         unsigned int timeout_msec)
137 {
138         struct tb_cfg_request *req;
139         struct tb_cfg_result res;
140
141         req = tb_cfg_request_alloc();
142         if (!req)
143                 return -ENOMEM;
144
145         req->match = tb_xdomain_match;
146         req->copy = tb_xdomain_copy;
147         req->request = request;
148         req->request_size = request_size;
149         req->request_type = request_type;
150         req->response = response;
151         req->response_size = response_size;
152         req->response_type = response_type;
153
154         res = tb_cfg_request_sync(ctl, req, timeout_msec);
155
156         tb_cfg_request_put(req);
157
158         return res.err == 1 ? -EIO : res.err;
159 }
160
161 /**
162  * tb_xdomain_request() - Send a XDomain request
163  * @xd: XDomain to send the request
164  * @request: Request to send
165  * @request_size: Size of the request in bytes
166  * @request_type: PDF type of the request
167  * @response: Response is copied here
168  * @response_size: Expected size of the response in bytes
169  * @response_type: Expected PDF type of the response
170  * @timeout_msec: Timeout in milliseconds to wait for the response
171  *
172  * This function can be used to send XDomain control channel messages to
173  * the other domain. The function waits until the response is received
174  * or when timeout triggers. Whichever comes first.
175  *
176  * Return: %0 in case of success and negative errno in case of failure
177  */
178 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
179         size_t request_size, enum tb_cfg_pkg_type request_type,
180         void *response, size_t response_size,
181         enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
182 {
183         return __tb_xdomain_request(xd->tb->ctl, request, request_size,
184                                     request_type, response, response_size,
185                                     response_type, timeout_msec);
186 }
187 EXPORT_SYMBOL_GPL(tb_xdomain_request);
188
189 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
190         u8 sequence, enum tb_xdp_type type, size_t size)
191 {
192         u32 length_sn;
193
194         length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
195         length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
196
197         hdr->xd_hdr.route_hi = upper_32_bits(route);
198         hdr->xd_hdr.route_lo = lower_32_bits(route);
199         hdr->xd_hdr.length_sn = length_sn;
200         hdr->type = type;
201         memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
202 }
203
204 static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
205 {
206         const struct tb_xdp_error_response *error;
207
208         if (hdr->type != ERROR_RESPONSE)
209                 return 0;
210
211         error = (const struct tb_xdp_error_response *)hdr;
212
213         switch (error->error) {
214         case ERROR_UNKNOWN_PACKET:
215         case ERROR_UNKNOWN_DOMAIN:
216                 return -EIO;
217         case ERROR_NOT_SUPPORTED:
218                 return -ENOTSUPP;
219         case ERROR_NOT_READY:
220                 return -EAGAIN;
221         default:
222                 break;
223         }
224
225         return 0;
226 }
227
228 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
229                                uuid_t *uuid)
230 {
231         struct tb_xdp_uuid_response res;
232         struct tb_xdp_uuid req;
233         int ret;
234
235         memset(&req, 0, sizeof(req));
236         tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
237                            sizeof(req));
238
239         memset(&res, 0, sizeof(res));
240         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
241                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
242                                    TB_CFG_PKG_XDOMAIN_RESP,
243                                    XDOMAIN_DEFAULT_TIMEOUT);
244         if (ret)
245                 return ret;
246
247         ret = tb_xdp_handle_error(&res.hdr);
248         if (ret)
249                 return ret;
250
251         uuid_copy(uuid, &res.src_uuid);
252         return 0;
253 }
254
255 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
256                                 const uuid_t *uuid)
257 {
258         struct tb_xdp_uuid_response res;
259
260         memset(&res, 0, sizeof(res));
261         tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
262                            sizeof(res));
263
264         uuid_copy(&res.src_uuid, uuid);
265         res.src_route_hi = upper_32_bits(route);
266         res.src_route_lo = lower_32_bits(route);
267
268         return __tb_xdomain_response(ctl, &res, sizeof(res),
269                                      TB_CFG_PKG_XDOMAIN_RESP);
270 }
271
272 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
273                                  enum tb_xdp_error error)
274 {
275         struct tb_xdp_error_response res;
276
277         memset(&res, 0, sizeof(res));
278         tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
279                            sizeof(res));
280         res.error = error;
281
282         return __tb_xdomain_response(ctl, &res, sizeof(res),
283                                      TB_CFG_PKG_XDOMAIN_RESP);
284 }
285
286 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
287         const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
288         u32 **block, u32 *generation)
289 {
290         struct tb_xdp_properties_response *res;
291         struct tb_xdp_properties req;
292         u16 data_len, len;
293         size_t total_size;
294         u32 *data = NULL;
295         int ret;
296
297         total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
298         res = kzalloc(total_size, GFP_KERNEL);
299         if (!res)
300                 return -ENOMEM;
301
302         memset(&req, 0, sizeof(req));
303         tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
304                            sizeof(req));
305         memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
306         memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
307
308         len = 0;
309         data_len = 0;
310
311         do {
312                 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
313                                            TB_CFG_PKG_XDOMAIN_REQ, res,
314                                            total_size, TB_CFG_PKG_XDOMAIN_RESP,
315                                            XDOMAIN_DEFAULT_TIMEOUT);
316                 if (ret)
317                         goto err;
318
319                 ret = tb_xdp_handle_error(&res->hdr);
320                 if (ret)
321                         goto err;
322
323                 /*
324                  * Package length includes the whole payload without the
325                  * XDomain header. Validate first that the package is at
326                  * least size of the response structure.
327                  */
328                 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
329                 if (len < sizeof(*res) / 4) {
330                         ret = -EINVAL;
331                         goto err;
332                 }
333
334                 len += sizeof(res->hdr.xd_hdr) / 4;
335                 len -= sizeof(*res) / 4;
336
337                 if (res->offset != req.offset) {
338                         ret = -EINVAL;
339                         goto err;
340                 }
341
342                 /*
343                  * First time allocate block that has enough space for
344                  * the whole properties block.
345                  */
346                 if (!data) {
347                         data_len = res->data_length;
348                         if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
349                                 ret = -E2BIG;
350                                 goto err;
351                         }
352
353                         data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
354                         if (!data) {
355                                 ret = -ENOMEM;
356                                 goto err;
357                         }
358                 }
359
360                 memcpy(data + req.offset, res->data, len * 4);
361                 req.offset += len;
362         } while (!data_len || req.offset < data_len);
363
364         *block = data;
365         *generation = res->generation;
366
367         kfree(res);
368
369         return data_len;
370
371 err:
372         kfree(data);
373         kfree(res);
374
375         return ret;
376 }
377
378 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
379         u64 route, u8 sequence, const uuid_t *src_uuid,
380         const struct tb_xdp_properties *req)
381 {
382         struct tb_xdp_properties_response *res;
383         size_t total_size;
384         u16 len;
385         int ret;
386
387         /*
388          * Currently we expect all requests to be directed to us. The
389          * protocol supports forwarding, though which we might add
390          * support later on.
391          */
392         if (!uuid_equal(src_uuid, &req->dst_uuid)) {
393                 tb_xdp_error_response(ctl, route, sequence,
394                                       ERROR_UNKNOWN_DOMAIN);
395                 return 0;
396         }
397
398         mutex_lock(&xdomain_lock);
399
400         if (req->offset >= xdomain_property_block_len) {
401                 mutex_unlock(&xdomain_lock);
402                 return -EINVAL;
403         }
404
405         len = xdomain_property_block_len - req->offset;
406         len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
407         total_size = sizeof(*res) + len * 4;
408
409         res = kzalloc(total_size, GFP_KERNEL);
410         if (!res) {
411                 mutex_unlock(&xdomain_lock);
412                 return -ENOMEM;
413         }
414
415         tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
416                            total_size);
417         res->generation = xdomain_property_block_gen;
418         res->data_length = xdomain_property_block_len;
419         res->offset = req->offset;
420         uuid_copy(&res->src_uuid, src_uuid);
421         uuid_copy(&res->dst_uuid, &req->src_uuid);
422         memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
423
424         mutex_unlock(&xdomain_lock);
425
426         ret = __tb_xdomain_response(ctl, res, total_size,
427                                     TB_CFG_PKG_XDOMAIN_RESP);
428
429         kfree(res);
430         return ret;
431 }
432
433 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
434                                              int retry, const uuid_t *uuid)
435 {
436         struct tb_xdp_properties_changed_response res;
437         struct tb_xdp_properties_changed req;
438         int ret;
439
440         memset(&req, 0, sizeof(req));
441         tb_xdp_fill_header(&req.hdr, route, retry % 4,
442                            PROPERTIES_CHANGED_REQUEST, sizeof(req));
443         uuid_copy(&req.src_uuid, uuid);
444
445         memset(&res, 0, sizeof(res));
446         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
447                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
448                                    TB_CFG_PKG_XDOMAIN_RESP,
449                                    XDOMAIN_DEFAULT_TIMEOUT);
450         if (ret)
451                 return ret;
452
453         return tb_xdp_handle_error(&res.hdr);
454 }
455
456 static int
457 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
458 {
459         struct tb_xdp_properties_changed_response res;
460
461         memset(&res, 0, sizeof(res));
462         tb_xdp_fill_header(&res.hdr, route, sequence,
463                            PROPERTIES_CHANGED_RESPONSE, sizeof(res));
464         return __tb_xdomain_response(ctl, &res, sizeof(res),
465                                      TB_CFG_PKG_XDOMAIN_RESP);
466 }
467
468 /**
469  * tb_register_protocol_handler() - Register protocol handler
470  * @handler: Handler to register
471  *
472  * This allows XDomain service drivers to hook into incoming XDomain
473  * messages. After this function is called the service driver needs to
474  * be able to handle calls to callback whenever a package with the
475  * registered protocol is received.
476  */
477 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
478 {
479         if (!handler->uuid || !handler->callback)
480                 return -EINVAL;
481         if (uuid_equal(handler->uuid, &tb_xdp_uuid))
482                 return -EINVAL;
483
484         mutex_lock(&xdomain_lock);
485         list_add_tail(&handler->list, &protocol_handlers);
486         mutex_unlock(&xdomain_lock);
487
488         return 0;
489 }
490 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
491
492 /**
493  * tb_unregister_protocol_handler() - Unregister protocol handler
494  * @handler: Handler to unregister
495  *
496  * Removes the previously registered protocol handler.
497  */
498 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
499 {
500         mutex_lock(&xdomain_lock);
501         list_del_init(&handler->list);
502         mutex_unlock(&xdomain_lock);
503 }
504 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
505
506 static int rebuild_property_block(void)
507 {
508         u32 *block, len;
509         int ret;
510
511         ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
512         if (ret < 0)
513                 return ret;
514
515         len = ret;
516
517         block = kcalloc(len, sizeof(u32), GFP_KERNEL);
518         if (!block)
519                 return -ENOMEM;
520
521         ret = tb_property_format_dir(xdomain_property_dir, block, len);
522         if (ret) {
523                 kfree(block);
524                 return ret;
525         }
526
527         kfree(xdomain_property_block);
528         xdomain_property_block = block;
529         xdomain_property_block_len = len;
530         xdomain_property_block_gen++;
531
532         return 0;
533 }
534
535 static void finalize_property_block(void)
536 {
537         const struct tb_property *nodename;
538
539         /*
540          * On first XDomain connection we set up the the system
541          * nodename. This delayed here because userspace may not have it
542          * set when the driver is first probed.
543          */
544         mutex_lock(&xdomain_lock);
545         nodename = tb_property_find(xdomain_property_dir, "deviceid",
546                                     TB_PROPERTY_TYPE_TEXT);
547         if (!nodename) {
548                 tb_property_add_text(xdomain_property_dir, "deviceid",
549                                      utsname()->nodename);
550                 rebuild_property_block();
551         }
552         mutex_unlock(&xdomain_lock);
553 }
554
555 static void tb_xdp_handle_request(struct work_struct *work)
556 {
557         struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
558         const struct tb_xdp_header *pkg = xw->pkg;
559         const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
560         struct tb *tb = xw->tb;
561         struct tb_ctl *ctl = tb->ctl;
562         const uuid_t *uuid;
563         int ret = 0;
564         u32 sequence;
565         u64 route;
566
567         route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
568         sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
569         sequence >>= TB_XDOMAIN_SN_SHIFT;
570
571         mutex_lock(&tb->lock);
572         if (tb->root_switch)
573                 uuid = tb->root_switch->uuid;
574         else
575                 uuid = NULL;
576         mutex_unlock(&tb->lock);
577
578         if (!uuid) {
579                 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
580                 goto out;
581         }
582
583         finalize_property_block();
584
585         switch (pkg->type) {
586         case PROPERTIES_REQUEST:
587                 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
588                         (const struct tb_xdp_properties *)pkg);
589                 break;
590
591         case PROPERTIES_CHANGED_REQUEST: {
592                 struct tb_xdomain *xd;
593
594                 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
595
596                 /*
597                  * Since the properties have been changed, let's update
598                  * the xdomain related to this connection as well in
599                  * case there is a change in services it offers.
600                  */
601                 xd = tb_xdomain_find_by_route_locked(tb, route);
602                 if (xd) {
603                         if (device_is_registered(&xd->dev)) {
604                                 queue_delayed_work(tb->wq, &xd->get_properties_work,
605                                                    msecs_to_jiffies(50));
606                         }
607                         tb_xdomain_put(xd);
608                 }
609
610                 break;
611         }
612
613         case UUID_REQUEST_OLD:
614         case UUID_REQUEST:
615                 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
616                 break;
617
618         default:
619                 tb_xdp_error_response(ctl, route, sequence,
620                                       ERROR_NOT_SUPPORTED);
621                 break;
622         }
623
624         if (ret) {
625                 tb_warn(tb, "failed to send XDomain response for %#x\n",
626                         pkg->type);
627         }
628
629 out:
630         kfree(xw->pkg);
631         kfree(xw);
632
633         tb_domain_put(tb);
634 }
635
636 static bool
637 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
638                         size_t size)
639 {
640         struct xdomain_request_work *xw;
641
642         xw = kmalloc(sizeof(*xw), GFP_KERNEL);
643         if (!xw)
644                 return false;
645
646         INIT_WORK(&xw->work, tb_xdp_handle_request);
647         xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
648         if (!xw->pkg) {
649                 kfree(xw);
650                 return false;
651         }
652         xw->tb = tb_domain_get(tb);
653
654         schedule_work(&xw->work);
655         return true;
656 }
657
658 /**
659  * tb_register_service_driver() - Register XDomain service driver
660  * @drv: Driver to register
661  *
662  * Registers new service driver from @drv to the bus.
663  */
664 int tb_register_service_driver(struct tb_service_driver *drv)
665 {
666         drv->driver.bus = &tb_bus_type;
667         return driver_register(&drv->driver);
668 }
669 EXPORT_SYMBOL_GPL(tb_register_service_driver);
670
671 /**
672  * tb_unregister_service_driver() - Unregister XDomain service driver
673  * @xdrv: Driver to unregister
674  *
675  * Unregisters XDomain service driver from the bus.
676  */
677 void tb_unregister_service_driver(struct tb_service_driver *drv)
678 {
679         driver_unregister(&drv->driver);
680 }
681 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
682
683 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
684                         char *buf)
685 {
686         struct tb_service *svc = container_of(dev, struct tb_service, dev);
687
688         /*
689          * It should be null terminated but anything else is pretty much
690          * allowed.
691          */
692         return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
693 }
694 static DEVICE_ATTR_RO(key);
695
696 static int get_modalias(struct tb_service *svc, char *buf, size_t size)
697 {
698         return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
699                         svc->prtcid, svc->prtcvers, svc->prtcrevs);
700 }
701
702 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
703                              char *buf)
704 {
705         struct tb_service *svc = container_of(dev, struct tb_service, dev);
706
707         /* Full buffer size except new line and null termination */
708         get_modalias(svc, buf, PAGE_SIZE - 2);
709         return sprintf(buf, "%s\n", buf);
710 }
711 static DEVICE_ATTR_RO(modalias);
712
713 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
714                            char *buf)
715 {
716         struct tb_service *svc = container_of(dev, struct tb_service, dev);
717
718         return sprintf(buf, "%u\n", svc->prtcid);
719 }
720 static DEVICE_ATTR_RO(prtcid);
721
722 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
723                              char *buf)
724 {
725         struct tb_service *svc = container_of(dev, struct tb_service, dev);
726
727         return sprintf(buf, "%u\n", svc->prtcvers);
728 }
729 static DEVICE_ATTR_RO(prtcvers);
730
731 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
732                              char *buf)
733 {
734         struct tb_service *svc = container_of(dev, struct tb_service, dev);
735
736         return sprintf(buf, "%u\n", svc->prtcrevs);
737 }
738 static DEVICE_ATTR_RO(prtcrevs);
739
740 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
741                              char *buf)
742 {
743         struct tb_service *svc = container_of(dev, struct tb_service, dev);
744
745         return sprintf(buf, "0x%08x\n", svc->prtcstns);
746 }
747 static DEVICE_ATTR_RO(prtcstns);
748
749 static struct attribute *tb_service_attrs[] = {
750         &dev_attr_key.attr,
751         &dev_attr_modalias.attr,
752         &dev_attr_prtcid.attr,
753         &dev_attr_prtcvers.attr,
754         &dev_attr_prtcrevs.attr,
755         &dev_attr_prtcstns.attr,
756         NULL,
757 };
758
759 static struct attribute_group tb_service_attr_group = {
760         .attrs = tb_service_attrs,
761 };
762
763 static const struct attribute_group *tb_service_attr_groups[] = {
764         &tb_service_attr_group,
765         NULL,
766 };
767
768 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
769 {
770         struct tb_service *svc = container_of(dev, struct tb_service, dev);
771         char modalias[64];
772
773         get_modalias(svc, modalias, sizeof(modalias));
774         return add_uevent_var(env, "MODALIAS=%s", modalias);
775 }
776
777 static void tb_service_release(struct device *dev)
778 {
779         struct tb_service *svc = container_of(dev, struct tb_service, dev);
780         struct tb_xdomain *xd = tb_service_parent(svc);
781
782         tb_service_debugfs_remove(svc);
783         ida_simple_remove(&xd->service_ids, svc->id);
784         kfree(svc->key);
785         kfree(svc);
786 }
787
788 struct device_type tb_service_type = {
789         .name = "thunderbolt_service",
790         .groups = tb_service_attr_groups,
791         .uevent = tb_service_uevent,
792         .release = tb_service_release,
793 };
794 EXPORT_SYMBOL_GPL(tb_service_type);
795
796 static int remove_missing_service(struct device *dev, void *data)
797 {
798         struct tb_xdomain *xd = data;
799         struct tb_service *svc;
800
801         svc = tb_to_service(dev);
802         if (!svc)
803                 return 0;
804
805         if (!tb_property_find(xd->properties, svc->key,
806                               TB_PROPERTY_TYPE_DIRECTORY))
807                 device_unregister(dev);
808
809         return 0;
810 }
811
812 static int find_service(struct device *dev, void *data)
813 {
814         const struct tb_property *p = data;
815         struct tb_service *svc;
816
817         svc = tb_to_service(dev);
818         if (!svc)
819                 return 0;
820
821         return !strcmp(svc->key, p->key);
822 }
823
824 static int populate_service(struct tb_service *svc,
825                             struct tb_property *property)
826 {
827         struct tb_property_dir *dir = property->value.dir;
828         struct tb_property *p;
829
830         /* Fill in standard properties */
831         p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
832         if (p)
833                 svc->prtcid = p->value.immediate;
834         p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
835         if (p)
836                 svc->prtcvers = p->value.immediate;
837         p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
838         if (p)
839                 svc->prtcrevs = p->value.immediate;
840         p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
841         if (p)
842                 svc->prtcstns = p->value.immediate;
843
844         svc->key = kstrdup(property->key, GFP_KERNEL);
845         if (!svc->key)
846                 return -ENOMEM;
847
848         return 0;
849 }
850
851 static void enumerate_services(struct tb_xdomain *xd)
852 {
853         struct tb_service *svc;
854         struct tb_property *p;
855         struct device *dev;
856         int id;
857
858         /*
859          * First remove all services that are not available anymore in
860          * the updated property block.
861          */
862         device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
863
864         /* Then re-enumerate properties creating new services as we go */
865         tb_property_for_each(xd->properties, p) {
866                 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
867                         continue;
868
869                 /* If the service exists already we are fine */
870                 dev = device_find_child(&xd->dev, p, find_service);
871                 if (dev) {
872                         put_device(dev);
873                         continue;
874                 }
875
876                 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
877                 if (!svc)
878                         break;
879
880                 if (populate_service(svc, p)) {
881                         kfree(svc);
882                         break;
883                 }
884
885                 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
886                 if (id < 0) {
887                         kfree(svc->key);
888                         kfree(svc);
889                         break;
890                 }
891                 svc->id = id;
892                 svc->dev.bus = &tb_bus_type;
893                 svc->dev.type = &tb_service_type;
894                 svc->dev.parent = &xd->dev;
895                 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
896
897                 tb_service_debugfs_init(svc);
898
899                 if (device_register(&svc->dev)) {
900                         put_device(&svc->dev);
901                         break;
902                 }
903         }
904 }
905
906 static int populate_properties(struct tb_xdomain *xd,
907                                struct tb_property_dir *dir)
908 {
909         const struct tb_property *p;
910
911         /* Required properties */
912         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
913         if (!p)
914                 return -EINVAL;
915         xd->device = p->value.immediate;
916
917         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
918         if (!p)
919                 return -EINVAL;
920         xd->vendor = p->value.immediate;
921
922         kfree(xd->device_name);
923         xd->device_name = NULL;
924         kfree(xd->vendor_name);
925         xd->vendor_name = NULL;
926
927         /* Optional properties */
928         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
929         if (p)
930                 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
931         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
932         if (p)
933                 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
934
935         return 0;
936 }
937
938 /* Called with @xd->lock held */
939 static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
940 {
941         if (!xd->resume)
942                 return;
943
944         xd->resume = false;
945         if (xd->transmit_path) {
946                 dev_dbg(&xd->dev, "re-establishing DMA path\n");
947                 tb_domain_approve_xdomain_paths(xd->tb, xd);
948         }
949 }
950
951 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
952 {
953         return tb_to_switch(xd->dev.parent);
954 }
955
956 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
957 {
958         bool change = false;
959         struct tb_port *port;
960         int ret;
961
962         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
963
964         ret = tb_port_get_link_speed(port);
965         if (ret < 0)
966                 return ret;
967
968         if (xd->link_speed != ret)
969                 change = true;
970
971         xd->link_speed = ret;
972
973         ret = tb_port_get_link_width(port);
974         if (ret < 0)
975                 return ret;
976
977         if (xd->link_width != ret)
978                 change = true;
979
980         xd->link_width = ret;
981
982         if (change)
983                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
984
985         return 0;
986 }
987
988 static void tb_xdomain_get_uuid(struct work_struct *work)
989 {
990         struct tb_xdomain *xd = container_of(work, typeof(*xd),
991                                              get_uuid_work.work);
992         struct tb *tb = xd->tb;
993         uuid_t uuid;
994         int ret;
995
996         ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
997         if (ret < 0) {
998                 if (xd->uuid_retries-- > 0) {
999                         queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1000                                            msecs_to_jiffies(100));
1001                 } else {
1002                         dev_dbg(&xd->dev, "failed to read remote UUID\n");
1003                 }
1004                 return;
1005         }
1006
1007         if (uuid_equal(&uuid, xd->local_uuid))
1008                 dev_dbg(&xd->dev, "intra-domain loop detected\n");
1009
1010         /*
1011          * If the UUID is different, there is another domain connected
1012          * so mark this one unplugged and wait for the connection
1013          * manager to replace it.
1014          */
1015         if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1016                 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1017                 xd->is_unplugged = true;
1018                 return;
1019         }
1020
1021         /* First time fill in the missing UUID */
1022         if (!xd->remote_uuid) {
1023                 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1024                 if (!xd->remote_uuid)
1025                         return;
1026         }
1027
1028         /* Now we can start the normal properties exchange */
1029         queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1030                            msecs_to_jiffies(100));
1031         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1032                            msecs_to_jiffies(1000));
1033 }
1034
1035 static void tb_xdomain_get_properties(struct work_struct *work)
1036 {
1037         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1038                                              get_properties_work.work);
1039         struct tb_property_dir *dir;
1040         struct tb *tb = xd->tb;
1041         bool update = false;
1042         u32 *block = NULL;
1043         u32 gen = 0;
1044         int ret;
1045
1046         ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1047                                         xd->remote_uuid, xd->properties_retries,
1048                                         &block, &gen);
1049         if (ret < 0) {
1050                 if (xd->properties_retries-- > 0) {
1051                         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1052                                            msecs_to_jiffies(1000));
1053                 } else {
1054                         /* Give up now */
1055                         dev_err(&xd->dev,
1056                                 "failed read XDomain properties from %pUb\n",
1057                                 xd->remote_uuid);
1058                 }
1059                 return;
1060         }
1061
1062         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1063
1064         mutex_lock(&xd->lock);
1065
1066         /* Only accept newer generation properties */
1067         if (xd->properties && gen <= xd->property_block_gen) {
1068                 /*
1069                  * On resume it is likely that the properties block is
1070                  * not changed (unless the other end added or removed
1071                  * services). However, we need to make sure the existing
1072                  * DMA paths are restored properly.
1073                  */
1074                 tb_xdomain_restore_paths(xd);
1075                 goto err_free_block;
1076         }
1077
1078         dir = tb_property_parse_dir(block, ret);
1079         if (!dir) {
1080                 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1081                 goto err_free_block;
1082         }
1083
1084         ret = populate_properties(xd, dir);
1085         if (ret) {
1086                 dev_err(&xd->dev, "missing XDomain properties in response\n");
1087                 goto err_free_dir;
1088         }
1089
1090         /* Release the existing one */
1091         if (xd->properties) {
1092                 tb_property_free_dir(xd->properties);
1093                 update = true;
1094         }
1095
1096         xd->properties = dir;
1097         xd->property_block_gen = gen;
1098
1099         tb_xdomain_update_link_attributes(xd);
1100
1101         tb_xdomain_restore_paths(xd);
1102
1103         mutex_unlock(&xd->lock);
1104
1105         kfree(block);
1106
1107         /*
1108          * Now the device should be ready enough so we can add it to the
1109          * bus and let userspace know about it. If the device is already
1110          * registered, we notify the userspace that it has changed.
1111          */
1112         if (!update) {
1113                 if (device_add(&xd->dev)) {
1114                         dev_err(&xd->dev, "failed to add XDomain device\n");
1115                         return;
1116                 }
1117         } else {
1118                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1119         }
1120
1121         enumerate_services(xd);
1122         return;
1123
1124 err_free_dir:
1125         tb_property_free_dir(dir);
1126 err_free_block:
1127         kfree(block);
1128         mutex_unlock(&xd->lock);
1129 }
1130
1131 static void tb_xdomain_properties_changed(struct work_struct *work)
1132 {
1133         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1134                                              properties_changed_work.work);
1135         int ret;
1136
1137         ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1138                                 xd->properties_changed_retries, xd->local_uuid);
1139         if (ret) {
1140                 if (xd->properties_changed_retries-- > 0)
1141                         queue_delayed_work(xd->tb->wq,
1142                                            &xd->properties_changed_work,
1143                                            msecs_to_jiffies(1000));
1144                 return;
1145         }
1146
1147         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1148 }
1149
1150 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1151                            char *buf)
1152 {
1153         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1154
1155         return sprintf(buf, "%#x\n", xd->device);
1156 }
1157 static DEVICE_ATTR_RO(device);
1158
1159 static ssize_t
1160 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1161 {
1162         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1163         int ret;
1164
1165         if (mutex_lock_interruptible(&xd->lock))
1166                 return -ERESTARTSYS;
1167         ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1168         mutex_unlock(&xd->lock);
1169
1170         return ret;
1171 }
1172 static DEVICE_ATTR_RO(device_name);
1173
1174 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1175                            char *buf)
1176 {
1177         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1178
1179         return sprintf(buf, "%#x\n", xd->vendor);
1180 }
1181 static DEVICE_ATTR_RO(vendor);
1182
1183 static ssize_t
1184 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1185 {
1186         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1187         int ret;
1188
1189         if (mutex_lock_interruptible(&xd->lock))
1190                 return -ERESTARTSYS;
1191         ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1192         mutex_unlock(&xd->lock);
1193
1194         return ret;
1195 }
1196 static DEVICE_ATTR_RO(vendor_name);
1197
1198 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1199                               char *buf)
1200 {
1201         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1202
1203         return sprintf(buf, "%pUb\n", xd->remote_uuid);
1204 }
1205 static DEVICE_ATTR_RO(unique_id);
1206
1207 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1208                           char *buf)
1209 {
1210         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1211
1212         return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
1213 }
1214
1215 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1216 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1217
1218 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1219                           char *buf)
1220 {
1221         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1222
1223         return sprintf(buf, "%u\n", xd->link_width);
1224 }
1225
1226 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1227 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1228
1229 static struct attribute *xdomain_attrs[] = {
1230         &dev_attr_device.attr,
1231         &dev_attr_device_name.attr,
1232         &dev_attr_rx_lanes.attr,
1233         &dev_attr_rx_speed.attr,
1234         &dev_attr_tx_lanes.attr,
1235         &dev_attr_tx_speed.attr,
1236         &dev_attr_unique_id.attr,
1237         &dev_attr_vendor.attr,
1238         &dev_attr_vendor_name.attr,
1239         NULL,
1240 };
1241
1242 static struct attribute_group xdomain_attr_group = {
1243         .attrs = xdomain_attrs,
1244 };
1245
1246 static const struct attribute_group *xdomain_attr_groups[] = {
1247         &xdomain_attr_group,
1248         NULL,
1249 };
1250
1251 static void tb_xdomain_release(struct device *dev)
1252 {
1253         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1254
1255         put_device(xd->dev.parent);
1256
1257         tb_property_free_dir(xd->properties);
1258         ida_destroy(&xd->service_ids);
1259
1260         kfree(xd->local_uuid);
1261         kfree(xd->remote_uuid);
1262         kfree(xd->device_name);
1263         kfree(xd->vendor_name);
1264         kfree(xd);
1265 }
1266
1267 static void start_handshake(struct tb_xdomain *xd)
1268 {
1269         xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1270         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1271         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1272
1273         if (xd->needs_uuid) {
1274                 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1275                                    msecs_to_jiffies(100));
1276         } else {
1277                 /* Start exchanging properties with the other host */
1278                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1279                                    msecs_to_jiffies(100));
1280                 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1281                                    msecs_to_jiffies(1000));
1282         }
1283 }
1284
1285 static void stop_handshake(struct tb_xdomain *xd)
1286 {
1287         xd->uuid_retries = 0;
1288         xd->properties_retries = 0;
1289         xd->properties_changed_retries = 0;
1290
1291         cancel_delayed_work_sync(&xd->get_uuid_work);
1292         cancel_delayed_work_sync(&xd->get_properties_work);
1293         cancel_delayed_work_sync(&xd->properties_changed_work);
1294 }
1295
1296 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1297 {
1298         stop_handshake(tb_to_xdomain(dev));
1299         return 0;
1300 }
1301
1302 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1303 {
1304         struct tb_xdomain *xd = tb_to_xdomain(dev);
1305
1306         /*
1307          * Ask tb_xdomain_get_properties() restore any existing DMA
1308          * paths after properties are re-read.
1309          */
1310         xd->resume = true;
1311         start_handshake(xd);
1312
1313         return 0;
1314 }
1315
1316 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1317         SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1318 };
1319
1320 struct device_type tb_xdomain_type = {
1321         .name = "thunderbolt_xdomain",
1322         .release = tb_xdomain_release,
1323         .pm = &tb_xdomain_pm_ops,
1324 };
1325 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1326
1327 /**
1328  * tb_xdomain_alloc() - Allocate new XDomain object
1329  * @tb: Domain where the XDomain belongs
1330  * @parent: Parent device (the switch through the connection to the
1331  *          other domain is reached).
1332  * @route: Route string used to reach the other domain
1333  * @local_uuid: Our local domain UUID
1334  * @remote_uuid: UUID of the other domain (optional)
1335  *
1336  * Allocates new XDomain structure and returns pointer to that. The
1337  * object must be released by calling tb_xdomain_put().
1338  */
1339 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1340                                     u64 route, const uuid_t *local_uuid,
1341                                     const uuid_t *remote_uuid)
1342 {
1343         struct tb_switch *parent_sw = tb_to_switch(parent);
1344         struct tb_xdomain *xd;
1345         struct tb_port *down;
1346
1347         /* Make sure the downstream domain is accessible */
1348         down = tb_port_at(route, parent_sw);
1349         tb_port_unlock(down);
1350
1351         xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1352         if (!xd)
1353                 return NULL;
1354
1355         xd->tb = tb;
1356         xd->route = route;
1357         ida_init(&xd->service_ids);
1358         mutex_init(&xd->lock);
1359         INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1360         INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1361         INIT_DELAYED_WORK(&xd->properties_changed_work,
1362                           tb_xdomain_properties_changed);
1363
1364         xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1365         if (!xd->local_uuid)
1366                 goto err_free;
1367
1368         if (remote_uuid) {
1369                 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1370                                           GFP_KERNEL);
1371                 if (!xd->remote_uuid)
1372                         goto err_free_local_uuid;
1373         } else {
1374                 xd->needs_uuid = true;
1375         }
1376
1377         device_initialize(&xd->dev);
1378         xd->dev.parent = get_device(parent);
1379         xd->dev.bus = &tb_bus_type;
1380         xd->dev.type = &tb_xdomain_type;
1381         xd->dev.groups = xdomain_attr_groups;
1382         dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1383
1384         /*
1385          * This keeps the DMA powered on as long as we have active
1386          * connection to another host.
1387          */
1388         pm_runtime_set_active(&xd->dev);
1389         pm_runtime_get_noresume(&xd->dev);
1390         pm_runtime_enable(&xd->dev);
1391
1392         return xd;
1393
1394 err_free_local_uuid:
1395         kfree(xd->local_uuid);
1396 err_free:
1397         kfree(xd);
1398
1399         return NULL;
1400 }
1401
1402 /**
1403  * tb_xdomain_add() - Add XDomain to the bus
1404  * @xd: XDomain to add
1405  *
1406  * This function starts XDomain discovery protocol handshake and
1407  * eventually adds the XDomain to the bus. After calling this function
1408  * the caller needs to call tb_xdomain_remove() in order to remove and
1409  * release the object regardless whether the handshake succeeded or not.
1410  */
1411 void tb_xdomain_add(struct tb_xdomain *xd)
1412 {
1413         /* Start exchanging properties with the other host */
1414         start_handshake(xd);
1415 }
1416
1417 static int unregister_service(struct device *dev, void *data)
1418 {
1419         device_unregister(dev);
1420         return 0;
1421 }
1422
1423 /**
1424  * tb_xdomain_remove() - Remove XDomain from the bus
1425  * @xd: XDomain to remove
1426  *
1427  * This will stop all ongoing configuration work and remove the XDomain
1428  * along with any services from the bus. When the last reference to @xd
1429  * is released the object will be released as well.
1430  */
1431 void tb_xdomain_remove(struct tb_xdomain *xd)
1432 {
1433         stop_handshake(xd);
1434
1435         device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1436
1437         /*
1438          * Undo runtime PM here explicitly because it is possible that
1439          * the XDomain was never added to the bus and thus device_del()
1440          * is not called for it (device_del() would handle this otherwise).
1441          */
1442         pm_runtime_disable(&xd->dev);
1443         pm_runtime_put_noidle(&xd->dev);
1444         pm_runtime_set_suspended(&xd->dev);
1445
1446         if (!device_is_registered(&xd->dev))
1447                 put_device(&xd->dev);
1448         else
1449                 device_unregister(&xd->dev);
1450 }
1451
1452 /**
1453  * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
1454  * @xd: XDomain connection
1455  *
1456  * Lane bonding is disabled by default for XDomains. This function tries
1457  * to enable bonding by first enabling the port and waiting for the CL0
1458  * state.
1459  *
1460  * Return: %0 in case of success and negative errno in case of error.
1461  */
1462 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1463 {
1464         struct tb_port *port;
1465         int ret;
1466
1467         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1468         if (!port->dual_link_port)
1469                 return -ENODEV;
1470
1471         ret = tb_port_enable(port->dual_link_port);
1472         if (ret)
1473                 return ret;
1474
1475         ret = tb_wait_for_port(port->dual_link_port, true);
1476         if (ret < 0)
1477                 return ret;
1478         if (!ret)
1479                 return -ENOTCONN;
1480
1481         ret = tb_port_lane_bonding_enable(port);
1482         if (ret) {
1483                 tb_port_warn(port, "failed to enable lane bonding\n");
1484                 return ret;
1485         }
1486
1487         tb_xdomain_update_link_attributes(xd);
1488
1489         dev_dbg(&xd->dev, "lane bonding enabled\n");
1490         return 0;
1491 }
1492 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
1493
1494 /**
1495  * tb_xdomain_lane_bonding_disable() - Disable lane bonding
1496  * @xd: XDomain connection
1497  *
1498  * Lane bonding is disabled by default for XDomains. If bonding has been
1499  * enabled, this function can be used to disable it.
1500  */
1501 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
1502 {
1503         struct tb_port *port;
1504
1505         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1506         if (port->dual_link_port) {
1507                 tb_port_lane_bonding_disable(port);
1508                 tb_port_disable(port->dual_link_port);
1509                 tb_xdomain_update_link_attributes(xd);
1510
1511                 dev_dbg(&xd->dev, "lane bonding disabled\n");
1512         }
1513 }
1514 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
1515
1516 /**
1517  * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1518  * @xd: XDomain connection
1519  * @transmit_path: HopID of the transmit path the other end is using to
1520  *                 send packets
1521  * @transmit_ring: DMA ring used to receive packets from the other end
1522  * @receive_path: HopID of the receive path the other end is using to
1523  *                receive packets
1524  * @receive_ring: DMA ring used to send packets to the other end
1525  *
1526  * The function enables DMA paths accordingly so that after successful
1527  * return the caller can send and receive packets using high-speed DMA
1528  * path.
1529  *
1530  * Return: %0 in case of success and negative errno in case of error
1531  */
1532 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1533                             u16 transmit_ring, u16 receive_path,
1534                             u16 receive_ring)
1535 {
1536         int ret;
1537
1538         mutex_lock(&xd->lock);
1539
1540         if (xd->transmit_path) {
1541                 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1542                 goto exit_unlock;
1543         }
1544
1545         xd->transmit_path = transmit_path;
1546         xd->transmit_ring = transmit_ring;
1547         xd->receive_path = receive_path;
1548         xd->receive_ring = receive_ring;
1549
1550         ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1551
1552 exit_unlock:
1553         mutex_unlock(&xd->lock);
1554
1555         return ret;
1556 }
1557 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1558
1559 /**
1560  * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1561  * @xd: XDomain connection
1562  *
1563  * This does the opposite of tb_xdomain_enable_paths(). After call to
1564  * this the caller is not expected to use the rings anymore.
1565  *
1566  * Return: %0 in case of success and negative errno in case of error
1567  */
1568 int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1569 {
1570         int ret = 0;
1571
1572         mutex_lock(&xd->lock);
1573         if (xd->transmit_path) {
1574                 xd->transmit_path = 0;
1575                 xd->transmit_ring = 0;
1576                 xd->receive_path = 0;
1577                 xd->receive_ring = 0;
1578
1579                 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1580         }
1581         mutex_unlock(&xd->lock);
1582
1583         return ret;
1584 }
1585 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1586
1587 struct tb_xdomain_lookup {
1588         const uuid_t *uuid;
1589         u8 link;
1590         u8 depth;
1591         u64 route;
1592 };
1593
1594 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1595         const struct tb_xdomain_lookup *lookup)
1596 {
1597         struct tb_port *port;
1598
1599         tb_switch_for_each_port(sw, port) {
1600                 struct tb_xdomain *xd;
1601
1602                 if (port->xdomain) {
1603                         xd = port->xdomain;
1604
1605                         if (lookup->uuid) {
1606                                 if (xd->remote_uuid &&
1607                                     uuid_equal(xd->remote_uuid, lookup->uuid))
1608                                         return xd;
1609                         } else if (lookup->link &&
1610                                    lookup->link == xd->link &&
1611                                    lookup->depth == xd->depth) {
1612                                 return xd;
1613                         } else if (lookup->route &&
1614                                    lookup->route == xd->route) {
1615                                 return xd;
1616                         }
1617                 } else if (tb_port_has_remote(port)) {
1618                         xd = switch_find_xdomain(port->remote->sw, lookup);
1619                         if (xd)
1620                                 return xd;
1621                 }
1622         }
1623
1624         return NULL;
1625 }
1626
1627 /**
1628  * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1629  * @tb: Domain where the XDomain belongs to
1630  * @uuid: UUID to look for
1631  *
1632  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1633  * The returned XDomain will have its reference count increased so the
1634  * caller needs to call tb_xdomain_put() when it is done with the
1635  * object.
1636  *
1637  * This will find all XDomains including the ones that are not yet added
1638  * to the bus (handshake is still in progress).
1639  *
1640  * The caller needs to hold @tb->lock.
1641  */
1642 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1643 {
1644         struct tb_xdomain_lookup lookup;
1645         struct tb_xdomain *xd;
1646
1647         memset(&lookup, 0, sizeof(lookup));
1648         lookup.uuid = uuid;
1649
1650         xd = switch_find_xdomain(tb->root_switch, &lookup);
1651         return tb_xdomain_get(xd);
1652 }
1653 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1654
1655 /**
1656  * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1657  * @tb: Domain where the XDomain belongs to
1658  * @link: Root switch link number
1659  * @depth: Depth in the link
1660  *
1661  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1662  * The returned XDomain will have its reference count increased so the
1663  * caller needs to call tb_xdomain_put() when it is done with the
1664  * object.
1665  *
1666  * This will find all XDomains including the ones that are not yet added
1667  * to the bus (handshake is still in progress).
1668  *
1669  * The caller needs to hold @tb->lock.
1670  */
1671 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1672                                                  u8 depth)
1673 {
1674         struct tb_xdomain_lookup lookup;
1675         struct tb_xdomain *xd;
1676
1677         memset(&lookup, 0, sizeof(lookup));
1678         lookup.link = link;
1679         lookup.depth = depth;
1680
1681         xd = switch_find_xdomain(tb->root_switch, &lookup);
1682         return tb_xdomain_get(xd);
1683 }
1684
1685 /**
1686  * tb_xdomain_find_by_route() - Find an XDomain by route string
1687  * @tb: Domain where the XDomain belongs to
1688  * @route: XDomain route string
1689  *
1690  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1691  * The returned XDomain will have its reference count increased so the
1692  * caller needs to call tb_xdomain_put() when it is done with the
1693  * object.
1694  *
1695  * This will find all XDomains including the ones that are not yet added
1696  * to the bus (handshake is still in progress).
1697  *
1698  * The caller needs to hold @tb->lock.
1699  */
1700 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1701 {
1702         struct tb_xdomain_lookup lookup;
1703         struct tb_xdomain *xd;
1704
1705         memset(&lookup, 0, sizeof(lookup));
1706         lookup.route = route;
1707
1708         xd = switch_find_xdomain(tb->root_switch, &lookup);
1709         return tb_xdomain_get(xd);
1710 }
1711 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1712
1713 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1714                                const void *buf, size_t size)
1715 {
1716         const struct tb_protocol_handler *handler, *tmp;
1717         const struct tb_xdp_header *hdr = buf;
1718         unsigned int length;
1719         int ret = 0;
1720
1721         /* We expect the packet is at least size of the header */
1722         length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1723         if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1724                 return true;
1725         if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1726                 return true;
1727
1728         /*
1729          * Handle XDomain discovery protocol packets directly here. For
1730          * other protocols (based on their UUID) we call registered
1731          * handlers in turn.
1732          */
1733         if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1734                 if (type == TB_CFG_PKG_XDOMAIN_REQ)
1735                         return tb_xdp_schedule_request(tb, hdr, size);
1736                 return false;
1737         }
1738
1739         mutex_lock(&xdomain_lock);
1740         list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1741                 if (!uuid_equal(&hdr->uuid, handler->uuid))
1742                         continue;
1743
1744                 mutex_unlock(&xdomain_lock);
1745                 ret = handler->callback(buf, size, handler->data);
1746                 mutex_lock(&xdomain_lock);
1747
1748                 if (ret)
1749                         break;
1750         }
1751         mutex_unlock(&xdomain_lock);
1752
1753         return ret > 0;
1754 }
1755
1756 static int update_xdomain(struct device *dev, void *data)
1757 {
1758         struct tb_xdomain *xd;
1759
1760         xd = tb_to_xdomain(dev);
1761         if (xd) {
1762                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1763                                    msecs_to_jiffies(50));
1764         }
1765
1766         return 0;
1767 }
1768
1769 static void update_all_xdomains(void)
1770 {
1771         bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1772 }
1773
1774 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1775 {
1776         struct tb_property *p;
1777
1778         p = tb_property_find(xdomain_property_dir, key,
1779                              TB_PROPERTY_TYPE_DIRECTORY);
1780         if (p && p->value.dir == dir) {
1781                 tb_property_remove(p);
1782                 return true;
1783         }
1784         return false;
1785 }
1786
1787 /**
1788  * tb_register_property_dir() - Register property directory to the host
1789  * @key: Key (name) of the directory to add
1790  * @dir: Directory to add
1791  *
1792  * Service drivers can use this function to add new property directory
1793  * to the host available properties. The other connected hosts are
1794  * notified so they can re-read properties of this host if they are
1795  * interested.
1796  *
1797  * Return: %0 on success and negative errno on failure
1798  */
1799 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1800 {
1801         int ret;
1802
1803         if (WARN_ON(!xdomain_property_dir))
1804                 return -EAGAIN;
1805
1806         if (!key || strlen(key) > 8)
1807                 return -EINVAL;
1808
1809         mutex_lock(&xdomain_lock);
1810         if (tb_property_find(xdomain_property_dir, key,
1811                              TB_PROPERTY_TYPE_DIRECTORY)) {
1812                 ret = -EEXIST;
1813                 goto err_unlock;
1814         }
1815
1816         ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1817         if (ret)
1818                 goto err_unlock;
1819
1820         ret = rebuild_property_block();
1821         if (ret) {
1822                 remove_directory(key, dir);
1823                 goto err_unlock;
1824         }
1825
1826         mutex_unlock(&xdomain_lock);
1827         update_all_xdomains();
1828         return 0;
1829
1830 err_unlock:
1831         mutex_unlock(&xdomain_lock);
1832         return ret;
1833 }
1834 EXPORT_SYMBOL_GPL(tb_register_property_dir);
1835
1836 /**
1837  * tb_unregister_property_dir() - Removes property directory from host
1838  * @key: Key (name) of the directory
1839  * @dir: Directory to remove
1840  *
1841  * This will remove the existing directory from this host and notify the
1842  * connected hosts about the change.
1843  */
1844 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1845 {
1846         int ret = 0;
1847
1848         mutex_lock(&xdomain_lock);
1849         if (remove_directory(key, dir))
1850                 ret = rebuild_property_block();
1851         mutex_unlock(&xdomain_lock);
1852
1853         if (!ret)
1854                 update_all_xdomains();
1855 }
1856 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1857
1858 int tb_xdomain_init(void)
1859 {
1860         xdomain_property_dir = tb_property_create_dir(NULL);
1861         if (!xdomain_property_dir)
1862                 return -ENOMEM;
1863
1864         /*
1865          * Initialize standard set of properties without any service
1866          * directories. Those will be added by service drivers
1867          * themselves when they are loaded.
1868          *
1869          * We also add node name later when first connection is made.
1870          */
1871         tb_property_add_immediate(xdomain_property_dir, "vendorid",
1872                                   PCI_VENDOR_ID_INTEL);
1873         tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1874         tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1875         tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1876
1877         return 0;
1878 }
1879
1880 void tb_xdomain_exit(void)
1881 {
1882         kfree(xdomain_property_block);
1883         tb_property_free_dir(xdomain_property_dir);
1884 }